muchsync-7/0000755000175000017500000000000014357601161010060 500000000000000muchsync-7/notmuch_db.h0000644000175000017500000000451412560332414012273 00000000000000// -*- C++ -*- #include #include #include #include #include #include #include "cleanup.h" using std::string; struct notmuch_err : std::exception { const char *const op_; const notmuch_status_t status_; const string what_; notmuch_err(const char *op, notmuch_status_t status) : op_(op), status_(status), what_(op_ + string(": ") + notmuch_status_to_string(status_)) {} const char *what() const noexcept override { return what_.c_str(); } }; class notmuch_db { notmuch_database_t *notmuch_ = nullptr; static void nmtry(const char *op, notmuch_status_t stat) { if (stat) throw notmuch_err (op, stat); } string run_notmuch(const char *const *av, const char *errprefix = nullptr, int *exit_value = nullptr); public: using tags_t = std::unordered_set; using message_t = unique_obj; const string notmuch_config; const string maildir; const tags_t new_tags; const tags_t and_tags; const bool sync_flags; static string default_notmuch_config(); /* The next function is massively evil, but looking through git * history, doc_id has been the second element of the * notmuch_message_t structure for a long time. */ static Xapian::docid get_docid(notmuch_message_t *msg) { struct fake_message { notmuch_database_t *notmuch; Xapian::docid doc_id; }; return reinterpret_cast(msg)->doc_id; } private: tags_t make_and_tags(); public: notmuch_db(string config, bool create = false); notmuch_db(const notmuch_db &) = delete; ~notmuch_db(); void begin_atomic() { nmtry("begin_atomic", notmuch_database_begin_atomic(notmuch())); } void end_atomic() { nmtry("end_atomic", notmuch_database_end_atomic(notmuch())); } message_t get_message(const char *msgid); message_t add_message(const string &path, const tags_t *new_tags = nullptr, bool *was_new = nullptr); void remove_message(const string &path); void set_tags(notmuch_message_t *msg, const tags_t &tags); Xapian::docid get_dir_docid(const char *path); notmuch_database_t *notmuch(); string get_config(const char *, int *err = nullptr); void set_config(const char *, ...); void close(); void run_new(const char *prefix = "[notmuch] "); }; muchsync-7/configure0000755000175000017500000051114614357577113011730 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.71 for muchsync 7. # # # Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, # Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh as_nop=: if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else $as_nop case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi # Reset variables that may have inherited troublesome values from # the environment. # IFS needs to be set, to space, tab, and newline, in precisely that order. # (If _AS_PATH_WALK were called with IFS unset, it would have the # side effect of setting IFS to empty, thus disabling word splitting.) # Quoting is to prevent editors from complaining about space-tab. as_nl=' ' export as_nl IFS=" "" $as_nl" PS1='$ ' PS2='> ' PS4='+ ' # Ensure predictable behavior from utilities with locale-dependent output. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # We cannot yet rely on "unset" to work, but we need these variables # to be unset--not just set to an empty or harmless value--now, to # avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct # also avoids known problems related to "unset" and subshell syntax # in other old shells (e.g. bash 2.01 and pdksh 5.2.14). for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH do eval test \${$as_var+y} \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done # Ensure that fds 0, 1, and 2 are open. if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi if (exec 3>&2) ; then :; else exec 2>/dev/null; fi # The user is always right. if ${PATH_SEPARATOR+false} :; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac test -r "$as_dir$0" && as_myself=$as_dir$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="as_nop=: if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else \$as_nop case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ) then : else \$as_nop exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 blah=\$(echo \$(echo blah)) test x\"\$blah\" = xblah || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1" if (eval "$as_required") 2>/dev/null then : as_have_required=yes else $as_nop as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null then : else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null then : CONFIG_SHELL=$as_shell as_have_required=yes if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null then : break 2 fi fi done;; esac as_found=false done IFS=$as_save_IFS if $as_found then : else $as_nop if { test -f "$SHELL" || test -f "$SHELL.exe"; } && as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null then : CONFIG_SHELL=$SHELL as_have_required=yes fi fi if test "x$CONFIG_SHELL" != x then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno then : printf "%s\n" "$0: This script requires a shell more modern than all" printf "%s\n" "$0: the shells that I found on your system." if test ${ZSH_VERSION+y} ; then printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should" printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later." else printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_nop # --------- # Do nothing but, unlike ":", preserve the value of $?. as_fn_nop () { return $? } as_nop=as_fn_nop # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null then : eval 'as_fn_append () { eval $1+=\$2 }' else $as_nop as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null then : eval 'as_fn_arith () { as_val=$(( $* )) }' else $as_nop as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_nop # --------- # Do nothing but, unlike ":", preserve the value of $?. as_fn_nop () { return $? } as_nop=as_fn_nop # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi printf "%s\n" "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } # Determine whether it's possible to make 'echo' print without a newline. # These variables are no longer used directly by Autoconf, but are AC_SUBSTed # for compatibility with existing Makefiles. ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac # For backward compatibility with old third-party macros, we provide # the shell variables $as_echo and $as_echo_n. New code should use # AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. as_echo='printf %s\n' as_echo_n='printf %s' rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='muchsync' PACKAGE_TARNAME='muchsync' PACKAGE_VERSION='7' PACKAGE_STRING='muchsync 7' PACKAGE_BUGREPORT='' PACKAGE_URL='' ac_unique_file="configure.ac" ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS LIBOBJS xapian_LIBS xapian_CPPFLAGS XAPIAN_CONFIG libcrypto_LIBS libcrypto_CFLAGS sqlite3_LIBS sqlite3_CFLAGS PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG HAVE_CXX11 am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__include DEPDIR OBJEXT EXEEXT ac_ct_CXX CPPFLAGS LDFLAGS CXXFLAGS CXX AM_BACKSLASH AM_DEFAULT_VERBOSITY AM_DEFAULT_V AM_V CSCOPE ETAGS CTAGS am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir runstatedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL am__quote' ac_subst_files='' ac_user_opts=' enable_option_checking enable_silent_rules enable_dependency_tracking ' ac_precious_vars='build_alias host_alias target_alias CXX CXXFLAGS LDFLAGS LIBS CPPFLAGS CCC PKG_CONFIG PKG_CONFIG_PATH PKG_CONFIG_LIBDIR sqlite3_CFLAGS sqlite3_LIBS libcrypto_CFLAGS libcrypto_LIBS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -runstatedir | --runstatedir | --runstatedi | --runstated \ | --runstate | --runstat | --runsta | --runst | --runs \ | --run | --ru | --r) ac_prev=runstatedir ;; -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ | --run=* | --ru=* | --r=*) runstatedir=$ac_optarg ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures muchsync 7 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/muchsync] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of muchsync 7:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-silent-rules less verbose build output (undo: "make V=1") --disable-silent-rules verbose build output (undo: "make V=0") --enable-dependency-tracking do not reject slow dependency extractors --disable-dependency-tracking speeds up one-time build Some influential environment variables: CXX C++ compiler command CXXFLAGS C++ compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH directories to add to pkg-config's search path PKG_CONFIG_LIBDIR path overriding pkg-config's built-in search path sqlite3_CFLAGS C compiler flags for sqlite3, overriding pkg-config sqlite3_LIBS linker flags for sqlite3, overriding pkg-config libcrypto_CFLAGS C compiler flags for libcrypto, overriding pkg-config libcrypto_LIBS linker flags for libcrypto, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for configure.gnu first; this name is used for a wrapper for # Metaconfig's "Configure" on case-insensitive file systems. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF muchsync configure 7 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest.beam if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext then : ac_retval=0 else $as_nop printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext } then : ac_retval=0 else $as_nop printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_cxx_check_func LINENO FUNC VAR # ------------------------------------ # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_cxx_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 printf %s "checking for $2... " >&6; } if eval test \${$3+y} then : printf %s "(cached) " >&6 else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. */ #include #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main (void) { return $2 (); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO" then : eval "$3=yes" else $as_nop eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 printf "%s\n" "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_func ac_configure_args_raw= for ac_arg do case $ac_arg in *\'*) ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append ac_configure_args_raw " '$ac_arg'" done case $ac_configure_args_raw in *$as_nl*) ac_safe_unquote= ;; *) ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab. ac_unsafe_a="$ac_unsafe_z#~" ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g" ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;; esac cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by muchsync $as_me 7, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac printf "%s\n" "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Sanitize IFS. IFS=" "" $as_nl" # Save into config.log some information that might help in debugging. { echo printf "%s\n" "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo printf "%s\n" "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac printf "%s\n" "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then printf "%s\n" "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac printf "%s\n" "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then printf "%s\n" "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && printf "%s\n" "$as_me: caught signal $ac_signal" printf "%s\n" "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h printf "%s\n" "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. if test -n "$CONFIG_SITE"; then ac_site_files="$CONFIG_SITE" elif test "x$prefix" != xNONE; then ac_site_files="$prefix/share/config.site $prefix/etc/config.site" else ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" fi for ac_site_file in $ac_site_files do case $ac_site_file in #( */*) : ;; #( *) : ac_site_file=./$ac_site_file ;; esac if test -f "$ac_site_file" && test -r "$ac_site_file"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 printf "%s\n" "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 printf "%s\n" "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Test code for whether the C++ compiler supports C++98 (global declarations) ac_cxx_conftest_cxx98_globals=' // Does the compiler advertise C++98 conformance? #if !defined __cplusplus || __cplusplus < 199711L # error "Compiler does not advertise C++98 conformance" #endif // These inclusions are to reject old compilers that // lack the unsuffixed header files. #include #include // and are *not* freestanding headers in C++98. extern void assert (int); namespace std { extern int strcmp (const char *, const char *); } // Namespaces, exceptions, and templates were all added after "C++ 2.0". using std::exception; using std::strcmp; namespace { void test_exception_syntax() { try { throw "test"; } catch (const char *s) { // Extra parentheses suppress a warning when building autoconf itself, // due to lint rules shared with more typical C programs. assert (!(strcmp) (s, "test")); } } template struct test_template { T const val; explicit test_template(T t) : val(t) {} template T add(U u) { return static_cast(u) + val; } }; } // anonymous namespace ' # Test code for whether the C++ compiler supports C++98 (body of main) ac_cxx_conftest_cxx98_main=' assert (argc); assert (! argv[0]); { test_exception_syntax (); test_template tt (2.0); assert (tt.add (4) == 6.0); assert (true && !false); } ' # Test code for whether the C++ compiler supports C++11 (global declarations) ac_cxx_conftest_cxx11_globals=' // Does the compiler advertise C++ 2011 conformance? #if !defined __cplusplus || __cplusplus < 201103L # error "Compiler does not advertise C++11 conformance" #endif namespace cxx11test { constexpr int get_val() { return 20; } struct testinit { int i; double d; }; class delegate { public: delegate(int n) : n(n) {} delegate(): delegate(2354) {} virtual int getval() { return this->n; }; protected: int n; }; class overridden : public delegate { public: overridden(int n): delegate(n) {} virtual int getval() override final { return this->n * 2; } }; class nocopy { public: nocopy(int i): i(i) {} nocopy() = default; nocopy(const nocopy&) = delete; nocopy & operator=(const nocopy&) = delete; private: int i; }; // for testing lambda expressions template Ret eval(Fn f, Ret v) { return f(v); } // for testing variadic templates and trailing return types template auto sum(V first) -> V { return first; } template auto sum(V first, Args... rest) -> V { return first + sum(rest...); } } ' # Test code for whether the C++ compiler supports C++11 (body of main) ac_cxx_conftest_cxx11_main=' { // Test auto and decltype auto a1 = 6538; auto a2 = 48573953.4; auto a3 = "String literal"; int total = 0; for (auto i = a3; *i; ++i) { total += *i; } decltype(a2) a4 = 34895.034; } { // Test constexpr short sa[cxx11test::get_val()] = { 0 }; } { // Test initializer lists cxx11test::testinit il = { 4323, 435234.23544 }; } { // Test range-based for int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3, 14, 19, 17, 8, 6, 20, 16, 2, 11, 1}; for (auto &x : array) { x += 23; } } { // Test lambda expressions using cxx11test::eval; assert (eval ([](int x) { return x*2; }, 21) == 42); double d = 2.0; assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0); assert (d == 5.0); assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0); assert (d == 5.0); } { // Test use of variadic templates using cxx11test::sum; auto a = sum(1); auto b = sum(1, 2); auto c = sum(1.0, 2.0, 3.0); } { // Test constructor delegation cxx11test::delegate d1; cxx11test::delegate d2(); cxx11test::delegate d3(45); } { // Test override and final cxx11test::overridden o1(55464); } { // Test nullptr char *c = nullptr; } { // Test template brackets test_template<::test_template> v(test_template(12)); } { // Unicode literals char const *utf8 = u8"UTF-8 string \u2500"; char16_t const *utf16 = u"UTF-8 string \u2500"; char32_t const *utf32 = U"UTF-32 string \u2500"; } ' # Test code for whether the C compiler supports C++11 (complete). ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals} ${ac_cxx_conftest_cxx11_globals} int main (int argc, char **argv) { int ok = 0; ${ac_cxx_conftest_cxx98_main} ${ac_cxx_conftest_cxx11_main} return ok; } " # Test code for whether the C compiler supports C++98 (complete). ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals} int main (int argc, char **argv) { int ok = 0; ${ac_cxx_conftest_cxx98_main} return ok; } " # Auxiliary files required by this configure script. ac_aux_files="missing install-sh" # Locations in which to look for auxiliary files. ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.." # Search for a directory containing all of the required auxiliary files, # $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates. # If we don't find one directory that contains all the files we need, # we report the set of missing files from the *first* directory in # $ac_aux_dir_candidates and give up. ac_missing_aux_files="" ac_first_candidate=: printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5 as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in $ac_aux_dir_candidates do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac as_found=: printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5 ac_aux_dir_found=yes ac_install_sh= for ac_aux in $ac_aux_files do # As a special case, if "install-sh" is required, that requirement # can be satisfied by any of "install-sh", "install.sh", or "shtool", # and $ac_install_sh is set appropriately for whichever one is found. if test x"$ac_aux" = x"install-sh" then if test -f "${as_dir}install-sh"; then printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5 ac_install_sh="${as_dir}install-sh -c" elif test -f "${as_dir}install.sh"; then printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5 ac_install_sh="${as_dir}install.sh -c" elif test -f "${as_dir}shtool"; then printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5 ac_install_sh="${as_dir}shtool install -c" else ac_aux_dir_found=no if $ac_first_candidate; then ac_missing_aux_files="${ac_missing_aux_files} install-sh" else break fi fi else if test -f "${as_dir}${ac_aux}"; then printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5 else ac_aux_dir_found=no if $ac_first_candidate; then ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}" else break fi fi fi done if test "$ac_aux_dir_found" = yes; then ac_aux_dir="$as_dir" break fi ac_first_candidate=false as_found=false done IFS=$as_save_IFS if $as_found then : else $as_nop as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. if test -f "${ac_aux_dir}config.guess"; then ac_config_guess="$SHELL ${ac_aux_dir}config.guess" fi if test -f "${ac_aux_dir}config.sub"; then ac_config_sub="$SHELL ${ac_aux_dir}config.sub" fi if test -f "$ac_aux_dir/configure"; then ac_configure="$SHELL ${ac_aux_dir}configure" fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;} { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu am__api_version='1.16' # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 printf %s "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if test ${ac_cv_path_install+y} then : printf %s "(cached) " >&6 else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac # Account for fact that we put trailing slashes in our PATH walk. case $as_dir in #(( ./ | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test ${ac_cv_path_install+y}; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 printf "%s\n" "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 printf %s "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi if test "$2" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi rm -f conftest.file test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"` # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` if test x"${MISSING+set}" != xset; then MISSING="\${SHELL} '$am_aux_dir/missing'" fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_prog_STRIP+y} then : printf %s "(cached) " >&6 else $as_nop if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 printf "%s\n" "$STRIP" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_prog_ac_ct_STRIP+y} then : printf %s "(cached) " >&6 else $as_nop if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 printf "%s\n" "$ac_ct_STRIP" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5 printf %s "checking for a race-free mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if test ${ac_cv_path_mkdir+y} then : printf %s "(cached) " >&6 else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir ('*'coreutils) '* | \ 'BusyBox '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test ${ac_cv_path_mkdir+y}; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 printf "%s\n" "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_prog_AWK+y} then : printf %s "(cached) " >&6 else $as_nop if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 printf "%s\n" "$AWK" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi test -n "$AWK" && break done { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval test \${ac_cv_prog_make_${ac_make}_set+y} then : printf %s "(cached) " >&6 else $as_nop cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } SET_MAKE= else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null # Check whether --enable-silent-rules was given. if test ${enable_silent_rules+y} then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=1;; esac am_make=${MAKE-make} { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 printf %s "checking whether $am_make supports nested variables... " >&6; } if test ${am_cv_make_support_nested_variables+y} then : printf %s "(cached) " >&6 else $as_nop if printf "%s\n" 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 printf "%s\n" "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='muchsync' VERSION='7' printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # mkdir_p='$(MKDIR_P)' # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' # Variables for tags utilities; see am/tags.am if test -z "$CTAGS"; then CTAGS=ctags fi if test -z "$ETAGS"; then ETAGS=etags fi if test -z "$CSCOPE"; then CSCOPE=cscope fi # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_prog_CXX+y} then : printf %s "(cached) " >&6 else $as_nop if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 printf "%s\n" "$CXX" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_prog_ac_ct_CXX+y} then : printf %s "(cached) " >&6 else $as_nop if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 printf "%s\n" "$ac_ct_CXX" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C++ compiler works" >&5 printf %s "checking whether the C++ compiler works... " >&6; } ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else $as_nop ac_file='' fi if test -z "$ac_file" then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C++ compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else $as_nop { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler default output file name" >&5 printf %s "checking for C++ compiler default output file name... " >&6; } { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 printf "%s\n" "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 printf %s "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else $as_nop { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 printf "%s\n" "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main (void) { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 printf %s "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 printf "%s\n" "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 printf %s "checking for suffix of object files... " >&6; } if test ${ac_cv_objext+y} then : printf %s "(cached) " >&6 else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_nop printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 printf "%s\n" "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5 printf %s "checking whether the compiler supports GNU C++... " >&6; } if test ${ac_cv_cxx_compiler_gnu+y} then : printf %s "(cached) " >&6 else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : ac_compiler_gnu=yes else $as_nop ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; } ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+y} ac_save_CXXFLAGS=$CXXFLAGS { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 printf %s "checking whether $CXX accepts -g... " >&6; } if test ${ac_cv_prog_cxx_g+y} then : printf %s "(cached) " >&6 else $as_nop ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : ac_cv_prog_cxx_g=yes else $as_nop CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : else $as_nop ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 printf "%s\n" "$ac_cv_prog_cxx_g" >&6; } if test $ac_test_CXXFLAGS; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_prog_cxx_stdcxx=no if test x$ac_prog_cxx_stdcxx = xno then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5 printf %s "checking for $CXX option to enable C++11 features... " >&6; } if test ${ac_cv_prog_cxx_11+y} then : printf %s "(cached) " >&6 else $as_nop ac_cv_prog_cxx_11=no ac_save_CXX=$CXX cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_cxx_conftest_cxx11_program _ACEOF for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA do CXX="$ac_save_CXX $ac_arg" if ac_fn_cxx_try_compile "$LINENO" then : ac_cv_prog_cxx_cxx11=$ac_arg fi rm -f core conftest.err conftest.$ac_objext conftest.beam test "x$ac_cv_prog_cxx_cxx11" != "xno" && break done rm -f conftest.$ac_ext CXX=$ac_save_CXX fi if test "x$ac_cv_prog_cxx_cxx11" = xno then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 printf "%s\n" "unsupported" >&6; } else $as_nop if test "x$ac_cv_prog_cxx_cxx11" = x then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 printf "%s\n" "none needed" >&6; } else $as_nop { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5 printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; } CXX="$CXX $ac_cv_prog_cxx_cxx11" fi ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11 ac_prog_cxx_stdcxx=cxx11 fi fi if test x$ac_prog_cxx_stdcxx = xno then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5 printf %s "checking for $CXX option to enable C++98 features... " >&6; } if test ${ac_cv_prog_cxx_98+y} then : printf %s "(cached) " >&6 else $as_nop ac_cv_prog_cxx_98=no ac_save_CXX=$CXX cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_cxx_conftest_cxx98_program _ACEOF for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA do CXX="$ac_save_CXX $ac_arg" if ac_fn_cxx_try_compile "$LINENO" then : ac_cv_prog_cxx_cxx98=$ac_arg fi rm -f core conftest.err conftest.$ac_objext conftest.beam test "x$ac_cv_prog_cxx_cxx98" != "xno" && break done rm -f conftest.$ac_ext CXX=$ac_save_CXX fi if test "x$ac_cv_prog_cxx_cxx98" = xno then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 printf "%s\n" "unsupported" >&6; } else $as_nop if test "x$ac_cv_prog_cxx_cxx98" = x then : { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 printf "%s\n" "none needed" >&6; } else $as_nop { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5 printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; } CXX="$CXX $ac_cv_prog_cxx_cxx98" fi ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98 ac_prog_cxx_stdcxx=cxx98 fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; } cat > confinc.mk << 'END' am__doit: @echo this is the am__doit target >confinc.out .PHONY: am__doit END am__include="#" am__quote= # BSD make does it like this. echo '.include "confinc.mk" # ignored' > confmf.BSD # Other make implementations (GNU, Solaris 10, AIX) do it like this. echo 'include confinc.mk # ignored' > confmf.GNU _am_result=no for s in GNU BSD; do { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } case $?:`cat confinc.out 2>/dev/null` in #( '0:this is the am__doit target') : case $s in #( BSD) : am__include='.include' am__quote='"' ;; #( *) : am__include='include' am__quote='' ;; esac ;; #( *) : ;; esac if test "$am__include" != "#"; then _am_result="yes ($s style)" break fi done rm -f confinc.* confmf.* { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 printf "%s\n" "${_am_result}" >&6; } # Check whether --enable-dependency-tracking was given. if test ${enable_dependency_tracking+y} then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi depcc="$CXX" am_compiler_list= { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 printf %s "checking dependency style of $depcc... " >&6; } if test ${am_cv_CXX_dependencies_compiler_type+y} then : printf %s "(cached) " >&6 else $as_nop if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi ax_cxx_compile_cxx11_required=true ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ac_success=no { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++11 features by default" >&5 printf %s "checking whether $CXX supports C++11 features by default... " >&6; } if test ${ax_cv_cxx_compile_cxx11+y} then : printf %s "(cached) " >&6 else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; typedef check> right_angle_brackets; int a; decltype(a) b; typedef check check_type; check_type c; check_type&& cr = static_cast(c); auto d = a; _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : ax_cv_cxx_compile_cxx11=yes else $as_nop ax_cv_cxx_compile_cxx11=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ax_cv_cxx_compile_cxx11" >&5 printf "%s\n" "$ax_cv_cxx_compile_cxx11" >&6; } if test x$ax_cv_cxx_compile_cxx11 = xyes; then ac_success=yes fi if test x$ac_success = xno; then for switch in -std=c++11 -std=c++0x; do cachevar=`printf "%s\n" "ax_cv_cxx_compile_cxx11_$switch" | $as_tr_sh` { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++11 features with $switch" >&5 printf %s "checking whether $CXX supports C++11 features with $switch... " >&6; } if eval test \${$cachevar+y} then : printf %s "(cached) " >&6 else $as_nop ac_save_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS $switch" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; typedef check> right_angle_brackets; int a; decltype(a) b; typedef check check_type; check_type c; check_type&& cr = static_cast(c); auto d = a; _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : eval $cachevar=yes else $as_nop eval $cachevar=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext CXXFLAGS="$ac_save_CXXFLAGS" fi eval ac_res=\$$cachevar { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 printf "%s\n" "$ac_res" >&6; } if eval test x\$$cachevar = xyes; then CXXFLAGS="$CXXFLAGS $switch" ac_success=yes break fi done fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test x$ax_cxx_compile_cxx11_required = xtrue; then if test x$ac_success = xno; then as_fn_error $? "*** A compiler with support for C++11 language features is required." "$LINENO" 5 fi else if test x$ac_success = xno; then HAVE_CXX11=0 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: No compiler with C++11 support was found" >&5 printf "%s\n" "$as_me: No compiler with C++11 support was found" >&6;} else HAVE_CXX11=1 printf "%s\n" "#define HAVE_CXX11 1" >>confdefs.h fi fi : ${WFLAGS=-Wall} CXXFLAGS="$CXXFLAGS $WFLAGS" ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu # -pthread Seems to be required by g++ -stc=c++11 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether C++ compiler accepts " >&5 printf %s "checking whether C++ compiler accepts ... " >&6; } if test ${ax_cv_check_cxxflags__+y} then : printf %s "(cached) " >&6 else $as_nop ax_check_save_flags=$CXXFLAGS CXXFLAGS="$CXXFLAGS " cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : ax_cv_check_cxxflags__=yes else $as_nop ax_cv_check_cxxflags__=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext CXXFLAGS=$ax_check_save_flags fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cxxflags__" >&5 printf "%s\n" "$ax_cv_check_cxxflags__" >&6; } if test x"$ax_cv_check_cxxflags__" = xyes then : : else $as_nop : fi if test ${CXXFLAGS+y} then : case " $CXXFLAGS " in *" "*) { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: : CXXFLAGS already contains "; } >&5 (: CXXFLAGS already contains ) 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } ;; *) { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: : CXXFLAGS=\"\$CXXFLAGS \""; } >&5 (: CXXFLAGS="$CXXFLAGS ") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } CXXFLAGS="$CXXFLAGS " ;; esac else $as_nop CXXFLAGS="" fi for flag in -pthread; do as_CACHEVAR=`printf "%s\n" "ax_cv_check_cxxflags__$flag" | $as_tr_sh` { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether C++ compiler accepts $flag" >&5 printf %s "checking whether C++ compiler accepts $flag... " >&6; } if eval test \${$as_CACHEVAR+y} then : printf %s "(cached) " >&6 else $as_nop ax_check_save_flags=$CXXFLAGS CXXFLAGS="$CXXFLAGS $flag" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main (void) { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : eval "$as_CACHEVAR=yes" else $as_nop eval "$as_CACHEVAR=no" fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext CXXFLAGS=$ax_check_save_flags fi eval ac_res=\$$as_CACHEVAR { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 printf "%s\n" "$ac_res" >&6; } if test x"`eval 'as_val=${'$as_CACHEVAR'};printf "%s\n" "$as_val"'`" = xyes then : if test ${CXXFLAGS+y} then : case " $CXXFLAGS " in *" $flag "*) { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: : CXXFLAGS already contains \$flag"; } >&5 (: CXXFLAGS already contains $flag) 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } ;; *) { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: : CXXFLAGS=\"\$CXXFLAGS \$flag\""; } >&5 (: CXXFLAGS="$CXXFLAGS $flag") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } CXXFLAGS="$CXXFLAGS $flag" ;; esac else $as_nop CXXFLAGS="$flag" fi else $as_nop : fi done ac_fn_cxx_check_func "$LINENO" "openat" "ac_cv_func_openat" if test "x$ac_cv_func_openat" = xyes then : printf "%s\n" "#define HAVE_OPENAT 1" >>confdefs.h fi ac_fn_cxx_check_func "$LINENO" "fdopendir" "ac_cv_func_fdopendir" if test "x$ac_cv_func_fdopendir" = xyes then : printf "%s\n" "#define HAVE_FDOPENDIR 1" >>confdefs.h fi if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_path_PKG_CONFIG+y} then : printf %s "(cached) " >&6 else $as_nop case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir$ac_word$ac_exec_ext" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 printf "%s\n" "$PKG_CONFIG" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_path_ac_pt_PKG_CONFIG+y} then : printf %s "(cached) " >&6 else $as_nop case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir$ac_word$ac_exec_ext" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 printf "%s\n" "$ac_pt_PKG_CONFIG" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 printf %s "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } PKG_CONFIG="" fi fi pkg_failed=no { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sqlite3" >&5 printf %s "checking for sqlite3... " >&6; } if test -n "$sqlite3_CFLAGS"; then pkg_cv_sqlite3_CFLAGS="$sqlite3_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3\""; } >&5 ($PKG_CONFIG --exists --print-errors "sqlite3") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_sqlite3_CFLAGS=`$PKG_CONFIG --cflags "sqlite3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$sqlite3_LIBS"; then pkg_cv_sqlite3_LIBS="$sqlite3_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3\""; } >&5 ($PKG_CONFIG --exists --print-errors "sqlite3") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_sqlite3_LIBS=`$PKG_CONFIG --libs "sqlite3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then sqlite3_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "sqlite3" 2>&1` else sqlite3_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "sqlite3" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$sqlite3_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (sqlite3) were not met: $sqlite3_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables sqlite3_CFLAGS and sqlite3_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables sqlite3_CFLAGS and sqlite3_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else sqlite3_CFLAGS=$pkg_cv_sqlite3_CFLAGS sqlite3_LIBS=$pkg_cv_sqlite3_LIBS { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } fi pkg_failed=no { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libcrypto" >&5 printf %s "checking for libcrypto... " >&6; } if test -n "$libcrypto_CFLAGS"; then pkg_cv_libcrypto_CFLAGS="$libcrypto_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcrypto\""; } >&5 ($PKG_CONFIG --exists --print-errors "libcrypto") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_libcrypto_CFLAGS=`$PKG_CONFIG --cflags "libcrypto" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$libcrypto_LIBS"; then pkg_cv_libcrypto_LIBS="$libcrypto_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcrypto\""; } >&5 ($PKG_CONFIG --exists --print-errors "libcrypto") 2>&5 ac_status=$? printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_libcrypto_LIBS=`$PKG_CONFIG --libs "libcrypto" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then libcrypto_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcrypto" 2>&1` else libcrypto_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcrypto" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$libcrypto_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (libcrypto) were not met: $libcrypto_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables libcrypto_CFLAGS and libcrypto_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables libcrypto_CFLAGS and libcrypto_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else libcrypto_CFLAGS=$pkg_cv_libcrypto_CFLAGS libcrypto_LIBS=$pkg_cv_libcrypto_LIBS { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } fi # Extract the first word of "xapian-config", so it can be a program name with args. set dummy xapian-config; ac_word=$2 { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 printf %s "checking for $ac_word... " >&6; } if test ${ac_cv_path_XAPIAN_CONFIG+y} then : printf %s "(cached) " >&6 else $as_nop case $XAPIAN_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_XAPIAN_CONFIG="$XAPIAN_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_path_XAPIAN_CONFIG="$as_dir$ac_word$ac_exec_ext" printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi XAPIAN_CONFIG=$ac_cv_path_XAPIAN_CONFIG if test -n "$XAPIAN_CONFIG"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $XAPIAN_CONFIG" >&5 printf "%s\n" "$XAPIAN_CONFIG" >&6; } else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 printf "%s\n" "no" >&6; } fi test -n "$XAPIAN_CONFIG" || as_fn_error $? "Cannot find xapian-config" "$LINENO" 5 if ! xapian_CPPFLAGS=$($XAPIAN_CONFIG --cxxflags) \ || ! xapian_LIBS=$($XAPIAN_CONFIG --libs); then as_fn_error $? "Error running $XAPIAN_CONFIG" "$LINENO" 5 fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking For st_mtim in struct stat" >&5 printf %s "checking For st_mtim in struct stat... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main (void) { return sizeof(stat::st_mtim) > 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : have_st_mtim=yes else $as_nop have_st_mtim=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_st_mtim" >&5 printf "%s\n" "$have_st_mtim" >&6; } if test yes = "$have_st_mtim"; then ST_MTIM=st_mtim else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking For st_mtimespec in struct stat" >&5 printf %s "checking For st_mtimespec in struct stat... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main (void) { int sz = sizeof(stat::st_mtimespec); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO" then : have_st_mtimespec=yes else $as_nop have_st_mtimespec=no fi rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_st_mtimespec" >&5 printf "%s\n" "$have_st_mtimespec" >&6; } if test yes = "$have_st_mtimespec"; then ST_MTIM=st_mtimespec else as_fn_error $? "Cannot find nanoseconds mtime in stat struct" "$LINENO" 5 fi fi printf "%s\n" "#define ST_MTIM $ST_MTIM" >>confdefs.h ac_config_files="$ac_config_files Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 printf "%s\n" "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. ac_script=' :mline /\\$/{ N s,\\\n,, b mline } t clear :clear s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g t quote s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g t quote b any :quote s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g s/\[/\\&/g s/\]/\\&/g s/\$/$$/g H :any ${ g s/^\n// s/\n/ /g p } ' DEFS=`sed -n "$ac_script" confdefs.h` ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 printf %s "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5 printf "%s\n" "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh as_nop=: if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else $as_nop case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi # Reset variables that may have inherited troublesome values from # the environment. # IFS needs to be set, to space, tab, and newline, in precisely that order. # (If _AS_PATH_WALK were called with IFS unset, it would have the # side effect of setting IFS to empty, thus disabling word splitting.) # Quoting is to prevent editors from complaining about space-tab. as_nl=' ' export as_nl IFS=" "" $as_nl" PS1='$ ' PS2='> ' PS4='+ ' # Ensure predictable behavior from utilities with locale-dependent output. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # We cannot yet rely on "unset" to work, but we need these variables # to be unset--not just set to an empty or harmless value--now, to # avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct # also avoids known problems related to "unset" and subshell syntax # in other old shells (e.g. bash 2.01 and pdksh 5.2.14). for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH do eval test \${$as_var+y} \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done # Ensure that fds 0, 1, and 2 are open. if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi if (exec 3>&2) ; then :; else exec 2>/dev/null; fi # The user is always right. if ${PATH_SEPARATOR+false} :; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS case $as_dir in #((( '') as_dir=./ ;; */) ;; *) as_dir=$as_dir/ ;; esac test -r "$as_dir$0" && as_myself=$as_dir$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi printf "%s\n" "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null then : eval 'as_fn_append () { eval $1+=\$2 }' else $as_nop as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null then : eval 'as_fn_arith () { as_val=$(( $* )) }' else $as_nop as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # Determine whether it's possible to make 'echo' print without a newline. # These variables are no longer used directly by Autoconf, but are AC_SUBSTed # for compatibility with existing Makefiles. ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac # For backward compatibility with old third-party macros, we provide # the shell variables $as_echo and $as_echo_n. New code should use # AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. as_echo='printf %s\n' as_echo_n='printf %s' rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by muchsync $as_me 7, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Configuration commands: $config_commands Report bugs to the package provider." _ACEOF ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"` ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"` cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ muchsync config.status 7 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" Copyright (C) 2021 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) printf "%s\n" "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) printf "%s\n" "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --he | --h | --help | --hel | -h ) printf "%s\n" "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX printf "%s\n" "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" eval set X " :F $CONFIG_FILES :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 printf "%s\n" "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`printf "%s\n" "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 printf "%s\n" "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. # TODO: see whether this extra hack can be removed once we start # requiring Autoconf 2.70 or later. case $CONFIG_FILES in #( *\'*) : eval set x "$CONFIG_FILES" ;; #( *) : set x $CONFIG_FILES ;; #( *) : ;; esac shift # Used to flag and report bootstrapping failures. am_rc=0 for am_mf do # Strip MF so we end up with the name of the file. am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile which includes # dependency-tracking related rules and includes. # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ || continue am_dirpart=`$as_dirname -- "$am_mf" || $as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$am_mf" : 'X\(//\)[^/]' \| \ X"$am_mf" : 'X\(//\)$' \| \ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X"$am_mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` am_filepart=`$as_basename -- "$am_mf" || $as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ X"$am_mf" : 'X\(//\)$' \| \ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || printf "%s\n" X/"$am_mf" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` { echo "$as_me:$LINENO: cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles" >&5 (cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } || am_rc=$? done if test $am_rc -ne 0; then { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Something went wrong bootstrapping makefile fragments for automatic dependency tracking. If GNU make was not used, consider re-running the configure script with MAKE=\"gmake\" (or whatever is necessary). You can also try re-running configure with the '--disable-dependency-tracking' option to at least be able to build the package (albeit without support for automatic dependency tracking). See \`config.log' for more details" "$LINENO" 5; } fi { am_dirpart=; unset am_dirpart;} { am_filepart=; unset am_filepart;} { am_mf=; unset am_mf;} { am_rc=; unset am_rc;} rm -f conftest-deps.mk } ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi muchsync-7/ChangeLog0000644000175000017500000000000012302053223011524 00000000000000muchsync-7/sqlstmt.h0000644000175000017500000001216412536212431011657 00000000000000// -*- C++ -*- #ifndef _SQLSTMT_H_ #define _SQLSTMT_H_ 1 #include #include #include #include #include using i64 = sqlite3_int64; struct sqlerr_t : public std::runtime_error { sqlerr_t (const std::string &msg) : std::runtime_error (msg) {} }; /* A sqldone_t is thrown if you ask for data when no rows are left */ struct sqldone_t : public std::runtime_error { sqldone_t (const std::string &msg) : std::runtime_error (msg) {} }; class sqlstmt_t { sqlite3_stmt *stmt_; int status_ = SQLITE_OK; sqlstmt_t &set_status (int status); void fail (); void ensure_row () { if (status_ != SQLITE_ROW) fail(); } public: explicit sqlstmt_t(sqlite3_stmt *stmt) : stmt_(stmt) {} explicit sqlstmt_t(sqlite3 *db, const char *fmt, ...); sqlstmt_t(const sqlstmt_t &r); sqlstmt_t(sqlstmt_t &&r) : stmt_ (r.stmt_) { r.stmt_ = nullptr; } ~sqlstmt_t() { sqlite3_finalize (stmt_); } sqlite3_stmt *get() { return stmt_; } sqlite3 *getdb() { return sqlite3_db_handle(stmt_); } int status() const { return status_; } bool row() { if (status_ == SQLITE_ROW) return true; // Something like SQLITE_OK indicates row() not used after step() assert (status_ == SQLITE_DONE); return false; } bool done() { return !row(); } sqlstmt_t &step() { return set_status(sqlite3_step (stmt_)); } sqlstmt_t &reset() { return set_status(sqlite3_reset (stmt_)); } /* Access columns */ template T column(int); bool null(int i) { ensure_row(); return sqlite3_column_type (stmt_, i) == SQLITE_NULL; } sqlite3_int64 integer(int i) { ensure_row(); return sqlite3_column_int64 (stmt_, i); } double real(int i) { ensure_row(); return sqlite3_column_double (stmt_, i); } std::string str(int i) { ensure_row(); return { static_cast (sqlite3_column_blob (stmt_, i)), size_t (sqlite3_column_bytes (stmt_, i)) }; } const char *c_str(int i) { ensure_row(); return reinterpret_cast (sqlite3_column_text (stmt_, i)); } sqlite3_value *value(int i) { ensure_row(); return sqlite3_column_value(stmt_, i); } /* Bind parameters */ sqlstmt_t &bind_null(int i) { return set_status (sqlite3_bind_null(stmt_, i)); } sqlstmt_t &bind_int(int i, sqlite3_int64 v) { return set_status (sqlite3_bind_int64(stmt_, i, v)); } sqlstmt_t &bind_real(int i, double v) { return set_status (sqlite3_bind_double(stmt_, i, v)); } sqlstmt_t &bind_text(int i, const std::string &v) { return set_status (sqlite3_bind_text(stmt_, i, v.data(), v.size(), SQLITE_STATIC)); } sqlstmt_t &bind_text(int i, std::string &&v) { return set_status (sqlite3_bind_text(stmt_, i, v.data(), v.size(), SQLITE_TRANSIENT)); } sqlstmt_t &bind_text(int i, const char *p, int len = -1) { return set_status (sqlite3_bind_text(stmt_, i, p, len, SQLITE_STATIC)); } sqlstmt_t &bind_blob(int i, const void *p, int len) { return set_status (sqlite3_bind_blob(stmt_, i, p, len, SQLITE_STATIC)); } sqlstmt_t &bind_value(int i, const sqlite3_value *v) { return set_status (sqlite3_bind_value (stmt_, i, v)); } /* Overloaded bind */ sqlstmt_t &bind(int i, std::nullptr_t) { return bind_null(i); } sqlstmt_t &bind(int i, sqlite3_int64 v) { return bind_int(i, v); } sqlstmt_t &bind(int i, int v) { return bind_int(i, v); } sqlstmt_t &bind(int i, unsigned v) { return bind_int(i, v); } sqlstmt_t &bind(int i, const double &v) { return bind_real(i, v); } sqlstmt_t &bind(int i, const std::string &v) { return bind_text(i, v); } sqlstmt_t &bind(int i, std::string &&v) { return bind_text(i, std::move(v)); } sqlstmt_t &bind(int i, const char *v) { return bind_text(i, v); } sqlstmt_t &bind(int i, const sqlite3_value *v) { return bind_value(i, v); } /* Bind multiple parameters at once */ sqlstmt_t &_param(int) { return *this; } template sqlstmt_t &_param(int i, H&& h, T&&... t) { return this->bind(i, std::forward(h))._param(i+1, std::forward(t)...); } template sqlstmt_t ¶m(Args&&... args) { return _param (1, std::forward (args)...); } /* Bind tuple */ template struct _tparm_helper { template static sqlstmt_t &go(sqlstmt_t &s, const std::tuple &t) { return _tparm_helper::go(s.bind(N, std::get(t)), t); } }; template sqlstmt_t &tparam(const std::tuple &t) { return _tparm_helper::go(*this, t); } }; template<> struct sqlstmt_t::_tparm_helper<0> { template static sqlstmt_t &go(sqlstmt_t &s, const std::tuple &t) { return s; } }; template<> inline bool sqlstmt_t::column(int i) { return null(i); } template<> inline i64 sqlstmt_t::column(int i) { return integer(i); } template<> inline double sqlstmt_t::column(int i) { return real(i); } template<> inline std::string sqlstmt_t::column(int i) { return str(i); } template<> inline const char * sqlstmt_t::column(int i) { return c_str(i); } void sqlexec (sqlite3 *db, const char *fmt, ...); #endif /* !_SQLSTMT_H_ */ muchsync-7/protocol.cc0000644000175000017500000007127714357054642012174 00000000000000 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "misc.h" #include "muchsync.h" #include "infinibuf.h" using namespace std; bool interrupted; class msg_sync { sqlite3 *db_; notmuch_db &nm_; sqlstmt_t update_hash_stamp_; sqlstmt_t add_file_; sqlstmt_t del_file_; sqlstmt_t set_link_count_; sqlstmt_t delete_link_count_; sqlstmt_t clear_tags_; sqlstmt_t add_tag_; sqlstmt_t update_message_id_stamp_; sqlstmt_t record_docid_; std::unordered_map dir_ids_; std::pair mystamp_; i64 get_dir_docid (const string &dir); public: hash_lookup hashdb; tag_lookup tagdb; msg_sync(notmuch_db &nm, sqlite3 *db); bool hash_sync(const versvector &remote_sync_vector, const hash_info &remote_hash_info, const string *sourcefile, const tag_info *tip); bool tag_sync(const versvector &remote_sync_vector, const tag_info &remote_tag_info); void commit(); }; static void interrupt(int sig) { interrupted = true; } static void catch_interrupts(int sig, bool active) { struct sigaction act; memset(&act, 0, sizeof(act)); if (active) { act.sa_handler = &interrupt; act.sa_flags = SA_RESETHAND; } else { if (interrupted) exit(1); act.sa_handler = SIG_DFL; } sigaction(sig, &act, nullptr); } static string myhostname() { char buf[257]; buf[sizeof(buf) - 1] = '\0'; if (gethostname (buf, sizeof(buf) - 1)) throw runtime_error (string("gethsotname: ") + strerror (errno)); return buf; } inline uint32_t randint() { uint32_t v; if (RAND_bytes ((unsigned char *) &v, sizeof (v)) == -1) throw runtime_error ("RAND_bytes failed"); return v; } static string maildir_name() { static string hostname = myhostname(); static int pid = getpid(); static int ndeliveries = 0; ostringstream os; using namespace std::chrono; auto now = system_clock::now().time_since_epoch(); os << duration_cast(now).count() << ".M" << duration_cast(now % seconds(1)).count() << 'P' << pid << 'Q' << ++ndeliveries << 'R' << setfill('0') << hex << setw(2 * sizeof(randint())) << randint() << '.' << hostname; return os.str(); } static string new_maildir_path(const string &dir, string *namep = nullptr) { string name = maildir_name(); if ((dir.size() > 4 && !strncmp(dir.data() + (dir.size() - 4), "/cur", 4)) || (dir.size() == 3 && dir == "cur")) name += ":2,"; if (namep) *namep = name; if (dir.size() && dir.back() != '/') return dir + "/" + name; else return dir + name; } static writestamp get_mystamp(sqlite3 *db) { sqlstmt_t s (db, "SELECT replica, version " "FROM configuration JOIN sync_vector ON (value = replica) " "WHERE key = 'self';"); if (!s.step().row()) throw runtime_error ("Cannot find myself in sync_vector"); return { s.integer(0), s.integer(1) }; } msg_sync::msg_sync (notmuch_db &nm, sqlite3 *db) : db_(db), nm_ (nm), update_hash_stamp_(db_, "UPDATE maildir_hashes " "SET replica = ?, version = ? WHERE hash_id = ?;"), add_file_(db_, "INSERT INTO xapian_files" " (dir_docid, name, docid, mtime, inode, hash_id)" " VALUES (?, ?, ?, ?, ?, ?);"), del_file_(db, "DELETE FROM xapian_files" " WHERE (dir_docid = ?) AND (name = ?);"), set_link_count_(db_, "INSERT OR REPLACE INTO xapian_nlinks" " (hash_id, dir_docid, link_count) VALUES (?, ?, ?);"), delete_link_count_(db_, "DELETE FROM xapian_nlinks" " WHERE (hash_id = ?) AND (dir_docid = ?);"), clear_tags_(db_, "DELETE FROM tags WHERE docid = ?;"), add_tag_(db_, "INSERT OR IGNORE INTO tags (docid, tag) VALUES (?, ?);"), update_message_id_stamp_(db_, "UPDATE message_ids SET" " replica = ?, version = ? WHERE docid = ?;"), record_docid_(db_, "INSERT OR IGNORE INTO message_ids" " (message_id, docid, replica, version)" " VALUES (?, ?, 0, 0);"), mystamp_(get_mystamp(db_)), hashdb (nm_.maildir, db_), tagdb (db_) { sqlstmt_t s (db_, "SELECT dir_path, dir_docid FROM xapian_dirs;"); while (s.step().row()) { string dir {s.str(0)}; i64 dir_id {s.integer(1)}; dir_ids_.emplace (dir, dir_id); } } static bool sanity_check_path (const string &path) { if (path == "..") return false; if (path.size() < 3) return true; return (path.substr(0, 3) != "../" && path.substr(path.size()-3) != "/.." && path.find("/../") == string::npos); } inline bool is_dir (const string &path) { struct stat sb; return !stat (path.c_str(), &sb) && (errno = ENOTDIR, S_ISDIR (sb.st_mode)); } static bool recursive_mkdir(string path) { string::size_type n = 0; for (;;) { n = path.find_first_not_of ('/', n); if (n == string::npos) return true; n = path.find_first_of ('/', n); if (n != string::npos) path[n] = '\0'; if (!is_dir (path)) { if (mkdir (path.c_str(), 0777)) { cerr << "creating directory " << path << " failed (" << strerror(errno) << ")\n"; return false; } if (opt_verbose > 0) cerr << "created directory " << path.c_str() << '\n'; } if (n == string::npos) return true; path[n] = '/'; } } static bool maildir_mkdir(string path) { if (!recursive_mkdir(path)) return false; size_t pos = path.rfind('/'); if (pos == string::npos) pos = 0; else pos++; string prefix = path.substr(0, pos); string suffix = path.substr(pos); if (suffix == "new") { if (!mkdir((prefix + "cur").c_str(), 0777) && opt_verbose > 0) cerr << "created directory " << prefix << "cur\n"; if (!mkdir((prefix + "tmp").c_str(), 0777) && opt_verbose > 0) cerr << "created directory " << prefix << "tmp\n"; } else if (suffix == "cur") { if (!mkdir((prefix + "new").c_str(), 0777) && opt_verbose > 0) cerr << "created directory " << prefix << "new\n"; if (!mkdir((prefix + "tmp").c_str(), 0777) && opt_verbose > 0) cerr << "created directory " << prefix << "tmp\n"; } return true; } inline Xapian::docid notmuch_directory_get_document_id (const notmuch_directory_t *dir) { struct fake_directory { notmuch_database_t *notmuch; Xapian::docid doc_id; }; return reinterpret_cast(dir)->doc_id; } i64 msg_sync::get_dir_docid(const string &dir) { auto i = dir_ids_.find(dir); if (i != dir_ids_.end()) return i->second; i64 dir_docid = nm_.get_dir_docid(dir.c_str()); sqlexec (db_, "INSERT OR REPLACE INTO xapian_dirs" " (dir_path, dir_docid, dir_mtime) VALUES (%Q, %lld, -1);", dir.c_str(), i64(dir_docid)); dir_ids_.emplace(dir, dir_docid); return dir_docid; } static void resolve_one_link_conflict(const unordered_map &a, const unordered_map &b, const string &name, unordered_map &out) { if (out.find(name) != out.end()) return; size_t pos = name.rfind('/'); if (pos == string::npos) pos = 0; else pos++; string suffix = name.substr(pos); if (suffix != "cur" && suffix != "new") { out[name] = max(find_default(0, a, name), find_default(0, b, name)); return; } string base = name.substr(0, pos); string newpath = base + "new", curpath = base + "cur"; i64 curval = max(find_default(0, a, curpath), find_default(0, b, curpath)); i64 newval = (max(find_default(0, a, curpath) + find_default(0, a, newpath), find_default(0, b, curpath) + find_default(0, b, newpath)) - curval); if (curval) out[curpath] = curval; if (newval) out[newpath] = newval; } static unordered_map resolve_link_conflicts(const unordered_map &a, const unordered_map &b) { unordered_map ret; for (auto ia : a) resolve_one_link_conflict(a, b, ia.first, ret); for (auto ib : b) resolve_one_link_conflict(a, b, ib.first, ret); return ret; } bool msg_sync::hash_sync(const versvector &rvv, const hash_info &rhi, const string *sourcep, const tag_info *tip) { hash_info lhi; i64 docid = -1; if (hashdb.lookup(rhi.hash)) { /* We might already be up to date from a previous sync that never * completed, in which case there is nothing to do. */ if (hashdb.info().hash_stamp == rhi.hash_stamp) return true; lhi = hashdb.info(); } else lhi.hash = rhi.hash; bool links_conflict = lhi.hash_stamp.second > find_default (0, rvv, lhi.hash_stamp.first); bool deleting = rhi.dirs.empty() && (!links_conflict || lhi.dirs.empty()); unordered_map needlinks (links_conflict ? resolve_link_conflicts (lhi.dirs, rhi.dirs) : rhi.dirs); bool needsource = false; for (auto i : lhi.dirs) needlinks[i.first] -= i.second; for (auto i : needlinks) if (i.second > 0) { needsource = true; break; } /* find copy of content, if needed */ string source; bool clean_trash = false; struct stat sb; if (needsource) { if (sourcep) source = *sourcep; else if (!hashdb.ok() || !hashdb.get_pathname (&source, &clean_trash)) return false; if (stat(source.c_str(), &sb)) return false; } if (!hashdb.ok()) { hashdb.create(rhi); lhi = hashdb.info(); } /* Set writestamp for new link counts */ const writestamp *wsp = links_conflict ? &mystamp_ : &rhi.hash_stamp; update_hash_stamp_.reset() .param(wsp->first, wsp->second, hashdb.hash_id()).step(); auto save_needlinks = needlinks; /* add missing links */ for (auto li : needlinks) for (; li.second > 0; --li.second) { if (!sanity_check_path(li.first)) break; string newname; string target = new_maildir_path(hashdb.maildir + "/" + li.first, &newname); if (link(source.c_str(), target.c_str()) && (errno != ENOENT || !maildir_mkdir(hashdb.maildir + "/" + li.first) || link(source.c_str(), target.c_str()))) throw runtime_error (string("link (\"") + source + "\", \"" + target + "\"): " + strerror(errno)); cleanup end_atomic; if (tip) { nm_.begin_atomic(); end_atomic.reset(mem_fn(¬much_db::end_atomic), ref(nm_)); } bool isnew; docid = notmuch_db::get_docid(nm_.add_message(target, tip ? &tip->tags : nullptr, &isnew)); i64 dir_id = get_dir_docid(li.first); add_file_.reset().param(dir_id, newname, docid, ts_to_double(sb.ST_MTIM), i64(sb.st_ino), hashdb.hash_id()).step(); if (isnew) { record_docid_.reset().param(rhi.message_id, docid).step(); // tip might be NULL here when undeleting a file if (tip) { update_message_id_stamp_.reset() .param(tip->tag_stamp.first, tip->tag_stamp.second, docid).step(); add_tag_.reset().bind_int(1, docid); for (auto t : tip->tags) add_tag_.reset().bind_text(2, t).step(); } else { // The empty tag is always invalid, so if worse comes to // worst and we crash at the wrong time, the next scan will // end up bumping the version number on this message ID. add_tag_.reset().param(docid, "").step(); } } } /* remove extra links */ if (!links_conflict) for (int i = 0, e = hashdb.nlinks(); i < e; i++) { i64 &n = needlinks[hashdb.links().at(i).first]; if (n < 0) { string path = hashdb.link_path(i); bool err; if (deleting) { string dest = trashname(hashdb.maildir, rhi.hash); err = rename (path.c_str(), dest.c_str()); if (err) cerr << "rename " << path << ' ' << trashname(hashdb.maildir, rhi.hash) << ": " << strerror (errno) << '\n'; /* You can't rename a file onto itself, so if the trash * already contains a hard link to the same inode, we need * to delete the original. */ else unlink (path.c_str()); } else { err = unlink (path.c_str()); if (err) cerr << "unlink " << path << ' ' << ": " << strerror (errno) << '\n'; } if (!err) { ++n; auto df = hashdb.links()[i]; i64 dir_docid = get_dir_docid(df.first); del_file_.reset().param(dir_docid, df.second).step(); nm_.remove_message(path); } } } /* Adjust link counts in database */ for (auto li : save_needlinks) if (li.second != 0) { i64 dir_docid = get_dir_docid(li.first); i64 newcount = find_default(0, lhi.dirs, li.first) + li.second; if (newcount > 0) set_link_count_.reset() .param(hashdb.hash_id(), dir_docid, newcount).step(); else delete_link_count_.reset().param(hashdb.hash_id(), dir_docid).step(); } if (clean_trash) unlink (trashname(hashdb.maildir, rhi.hash).c_str()); return true; } bool msg_sync::tag_sync(const versvector &rvv, const tag_info &rti) { if (!tagdb.lookup(rti.message_id)) { cerr << "warning: can't find " << rti.message_id << '\n'; return false; } const tag_info <i = tagdb.info(); if (lti.tag_stamp == rti.tag_stamp) return true; sqlexec (db_, "SAVEPOINT tag_sync;"); cleanup c (sqlexec, db_, "ROLLBACK TO tag_sync;"); notmuch_db::message_t msg = nm_.get_message (rti.message_id.c_str()); if (tagdb.docid() != nm_.get_docid(msg)) { cerr << "error: muchsync docid " << tagdb.docid() << " != xapian docid " << nm_.get_docid(msg) << " for message " << rti.message_id << '\n'; terminate(); } bool tags_conflict = lti.tag_stamp.second > find_default (0, rvv, lti.tag_stamp.first); unordered_set newtags (rti.tags); if (tags_conflict) { // Logically OR most tags for (auto i : lti.tags) newtags.insert(i); // But logically AND and_tags for (auto i : nm_.and_tags) if (rti.tags.find(i) == rti.tags.end() || lti.tags.find(i) == lti.tags.end()) newtags.erase(i); } nm_.set_tags(msg, newtags); const writestamp *wsp = tags_conflict ? &mystamp_ : &rti.tag_stamp; update_message_id_stamp_.reset() .param(wsp->first, wsp->second, tagdb.docid()) .step(); clear_tags_.reset().param(tagdb.docid()).step(); add_tag_.reset().bind_int(1, tagdb.docid()); for (auto t : newtags) add_tag_.reset().bind_text(2, t).step(); c.release(); sqlexec (db_, "RELEASE tag_sync;"); return true; } static string receive_message (istream &in, const hash_info &hi, const string &maildir) { string path (maildir + muchsync_tmpdir + "/" + maildir_name()); ofstream tmp (path, ios_base::out|ios_base::trunc); if (!tmp.is_open()) throw runtime_error (path + ": " + strerror(errno)); cleanup _unlink (unlink, path.c_str()); i64 size = hi.size; hash_ctx ctx; while (size > 0) { char buf[16384]; int n = min(sizeof(buf), size); in.read(buf, n); if (!in.good()) throw runtime_error ("premature EOF receiving message"); ctx.update(buf, n); tmp.write(buf, n); if (!tmp.good()) throw runtime_error (string("error writing mail file: ") + strerror(errno)); size -= n; } tmp.close(); if (ctx.final() != hi.hash) throw runtime_error ("message received does not match hash"); _unlink.release(); return path; } static void set_peer_vector (sqlite3 *sqldb, const versvector &vv) { sqlexec (sqldb, R"( CREATE TEMP TABLE IF NOT EXISTS peer_vector ( replica INTEGER PRIMARY KEY, known_version INTEGER); DELETE FROM peer_vector; INSERT OR REPLACE INTO peer_vector SELECT DISTINCT replica, 0 FROM message_ids; INSERT OR REPLACE INTO peer_vector SELECT DISTINCT replica, 0 FROM maildir_hashes; )"); sqlstmt_t pvadd (sqldb, "INSERT OR REPLACE INTO" " peer_vector (replica, known_version) VALUES (?, ?);"); for (writestamp ws : vv) pvadd.param(ws.first, ws.second).step().reset(); } static void record_peer_vector(sqlite3 *sqldb) { sqlexec(sqldb, R"( INSERT OR REPLACE INTO sync_vector (replica, version) SELECT replica, p.known_version FROM peer_vector p LEFT OUTER JOIN sync_vector s USING (replica) WHERE ifnull (s.version, 0) < p.known_version)"); } static i64 send_links (sqlite3 *sqldb, const string &prefix, ostream &out) { unordered_map dirs; { sqlstmt_t d (sqldb, "SELECT dir_docid, dir_path FROM xapian_dirs;"); while (d.step().row()) dirs.emplace (d.integer(0), d.str(1)); } sqlstmt_t changed (sqldb, R"( SELECT h.hash_id, hash, size, message_id, h.replica, h.version, dir_docid, link_count FROM (peer_vector p JOIN maildir_hashes h ON ((p.replica = h.replica) AND (p.known_version < h.version))) LEFT OUTER JOIN xapian_nlinks USING (hash_id);)"); i64 count = 0; hash_info hi; changed.step(); while (changed.row()) { i64 hash_id = changed.integer(0); hi.hash = changed.str(1); hi.size = changed.integer(2); hi.message_id = changed.str(3); hi.hash_stamp.first = changed.integer(4); hi.hash_stamp.second = changed.integer(5); hi.dirs.clear(); if (changed.null(6)) changed.step(); else { hi.dirs.emplace(dirs[changed.integer(6)], changed.integer(7)); while (changed.step().row() && changed.integer(0) == hash_id) hi.dirs.emplace(dirs[changed.integer(6)], changed.integer(7)); } out << prefix << hi << '\n'; if (opt_verbose > 3) cerr << prefix << hi << '\n'; count++; } return count; } static i64 send_tags (sqlite3 *sqldb, const string &prefix, ostream &out) { sqlstmt_t changed (sqldb, R"( SELECT m.docid, m.message_id, m.replica, m.version, tags.tag FROM (peer_vector p JOIN message_ids m ON ((p.replica = m.replica) AND (p.known_version < m.version))) LEFT OUTER JOIN tags USING (docid);)"); tag_info ti; changed.step(); i64 count = 0; while (changed.row()) { i64 docid = changed.integer(0); ti.message_id = changed.str(1); ti.tag_stamp.first = changed.integer(2); ti.tag_stamp.second = changed.integer(3); ti.tags.clear(); if (changed.null(4)) changed.step(); else { ti.tags.insert (changed.str(4)); while (changed.step().row() && changed.integer(0) == docid) ti.tags.insert (changed.str(4)); } out << prefix << ti << '\n'; if (opt_verbose > 3) cerr << prefix << ti << '\n'; count++; } return count; } static bool send_content(hash_lookup &hashdb, tag_lookup &tagdb, const string &hash, const string &prefix, ostream &out) { streambuf *sb; if (hashdb.lookup(hash) && (sb = hashdb.content()) && tagdb.lookup(hashdb.info().message_id)) { out << prefix << hashdb.info() << ' ' << tagdb.info() << '\n' << sb; return true; } return false; } void muchsync_server(sqlite3 *db, notmuch_db &nm) { msg_sync msync(nm, db); hash_lookup &hashdb = msync.hashdb; tag_lookup tagdb(db); bool remotevv_valid = false; versvector remotevv; bool transaction = false; auto xbegin = [&transaction,db]() { if (!transaction) { sqlexec(db, "BEGIN IMMEDIATE;"); transaction = true; } }; cout << "200 " << dbvers << '\n'; string cmdline; istringstream cmdstream; while (getline(cin, cmdline).good()) { cmdstream.clear(); cmdstream.str(cmdline); string cmd; cmdstream >> cmd; if (cmd.empty()) { cout << "500 invalid empty line\n"; } else if (cmd == "quit") { cout << "200 goodbye\n"; return; } else if (cmd == "conffile") { ifstream is (opt_notmuch_config); ostringstream os; if (is.is_open() && (os << is.rdbuf())) { string conf (os.str()); cout << "221-" << conf.length() << '\n' << conf << "221 ok\n"; } else cout << "410 cannot find configuration\n"; } else if (cmd.substr(1) == "info") { string key; cmdstream >> key; switch (cmd[0]) { case 'l': // linfo command if (hashdb.lookup(key)) cout << "210 " << hashdb.info() << '\n'; else cout << "510 unknown hash\n"; break; case 't': // tinfo command if (tagdb.lookup(percent_decode(key))) cout << "210 " << tagdb.info() << '\n'; else cout << "510 unkown message id\n"; break; default: cout << "500 unknown verb " << cmd << '\n'; break; } } else if (cmd == "send") { string hash; cmdstream >> hash; if (send_content(hashdb, tagdb, hash, "220-", cout)) cout << "220 " << hash << '\n'; else if (hashdb.ok()) cout << "420 cannot open file with hash " << hash << "\n"; else cout << "520 unknown hash\n"; } else if (cmd == "vect") { if (!read_sync_vector(cmdstream, remotevv)) { cout << "500 could not parse vector\n"; remotevv_valid = false; } else { set_peer_vector(db, remotevv); remotevv_valid = true; cout << "200 " << show_sync_vector (get_sync_vector (db)) << '\n'; } } else if (cmd == "link") { xbegin(); hash_info hi; if (!remotevv_valid) cout << "500 must follow vect command\n"; else if (!(cmdstream >> hi)) cout << "500 could not parse hash_info\n"; else if (msync.hash_sync(remotevv, hi, nullptr, nullptr)) { if (opt_verbose > 3) cerr << "received-links " << hi << '\n'; cout << "220 " << hi.hash << " ok\n"; } else cout << "520 " << hi.hash << " missing content\n"; } else if (cmd == "recv") { xbegin(); hash_info hi; tag_info ti; if (!remotevv_valid) cout << "500 must follow vect command\n"; else if (!(cmdstream >> hi >> ti)) cout << "500 could not parse hash_info or tag_info\n"; else { string path; try { path = receive_message(cin, hi, nm.maildir); if (!msync.hash_sync(remotevv, hi, &path, &ti)) cout << "550 failed to synchronize message\n"; else { if (opt_verbose > 3) cerr << "received-file " << hi << '\n'; cout << "250 ok\n"; } } catch (exception &e) { cerr << e.what() << '\n'; cout << "550 " << e.what() << '\n'; } unlink(path.c_str()); } } else if (cmd == "tags") { xbegin(); tag_info ti; if (!remotevv_valid) cout << "500 must follow vect command\n"; else if (!(cmdstream >> ti)) cout << "500 could not parse hash_info\n"; else if (msync.tag_sync(remotevv, ti)) { if (opt_verbose > 3) cerr << "received-tags " << ti << '\n'; cout << "220 ok\n"; } else cout << "520 unknown message-id\n"; } else if (cmd.substr(1) == "sync") { if (!remotevv_valid) cout << "500 must follow vect command\n"; else switch (cmd[0]) { case 'l': // lsync command send_links (db, "210-", cout); cout << "210 ok\n"; break; case 't': // tsync command send_tags (db, "210-", cout); cout << "210 ok\n"; break; default: cout << "500 unknown verb " << cmd << '\n'; break; } } else if (cmd == "commit") { if (!remotevv_valid) cout << "500 must follow vect command\n"; record_peer_vector(db); if (transaction) { transaction = false; sqlexec(db, "COMMIT;"); } cout << "200 ok\n"; remotevv_valid = false; } else if (cmd == "help") { cout << R"(200-commit 200-conffile 200-help 200-linfo HASH 200-link LINK-INFO 200-lsync 200-quit 200-send HASH 200-recv HASH-INFO LINK-INFO CONTENTS 200-tags TAG-INFO 200-tinfo MESSAGE-ID 200-tsync 200 vect VERSION-VECTOR )"; } else cout << "500 unknown verb " << cmd << '\n'; } } istream & get_response (istream &in, string &line, bool err_ok) { if (!getline (in, line)) throw runtime_error ("premature EOF"); if (opt_verbose > 3) cerr << line << '\n'; if (line.empty()) throw runtime_error ("unexpected empty line"); if (line.size() < 4) throw runtime_error ("unexpected short line"); if (line.front() != '2' && (line.front() != '5' || !err_ok)) throw runtime_error ("bad response: " + line); return in; } void muchsync_client (sqlite3 *db, notmuch_db &nm, istream &in, ostream &out) { constexpr time_t commit_interval = 90; /* Any work done here gets overlapped with server */ sync_local_data (db, nm.maildir); versvector localvv {get_sync_vector (db)}, remotevv; string line; istringstream is; msg_sync msync (nm, db); i64 pending = 0; int down_links = 0, down_body = 0, down_tags = 0, up_links = 0, up_body = 0, up_tags = 0; out << "vect " << show_sync_vector(localvv) << "\nlsync\n" << flush; sqlexec(db, "BEGIN IMMEDIATE;"); get_response (in, line); get_response (in, line); is.str(line.substr(4)); if (!read_sync_vector(is, remotevv)) throw runtime_error ("cannot parse version vector " + line.substr(4)); set_peer_vector(db, remotevv); print_time ("received server's version vector"); catch_interrupts(SIGINT, true); catch_interrupts(SIGTERM, true); time_t last_commit = time(nullptr); auto maybe_commit = [&last_commit,&nm,db] () { time_t now = time(nullptr); if (interrupted) { cerr << "Interrupted\n"; nm.close(); sqlexec(db, "COMMIT;"); exit(1); } else if (now - last_commit >= commit_interval) { nm.close(); sqlexec(db, "COMMIT; BEGIN;"); last_commit = time(nullptr); } }; while (get_response (in, line) && line.at(3) == '-') { is.str(line.substr(4)); hash_info hi; if (!(is >> hi)) throw runtime_error ("could not parse hash_info: " + line.substr(4)); bool ok = msync.hash_sync (remotevv, hi, nullptr, nullptr); if (opt_verbose > 2) { if (ok) cerr << hi << '\n'; else cerr << hi.hash << " UNKNOWN\n"; } if (!ok) { out << "send " << hi.hash << '\n'; pending++; } else down_links++; maybe_commit(); } out << "tsync\n"; int extra_tags = 0; for (sqlstmt_t nolinks (db, "SELECT message_id FROM message_ids" " WHERE replica = 0 AND version = 0;"); nolinks.step().row();) { extra_tags++; out << "tinfo " << permissive_percent_encode(nolinks.str(0)) << '\n'; } print_time ("received hashes of new files"); down_body = pending; hash_info hi; tag_info ti; for (; pending > 0; pending--) { get_response (in, line); is.str(line.substr(4)); if (!(is >> hi >> ti)) throw runtime_error ("could not parse hash_info: " + line.substr(4)); string path = receive_message(in, hi, nm.maildir); cleanup _unlink (unlink, path.c_str()); getline (in, line); if (line.size() < 4 || line.at(0) != '2' || line.at(3) != ' ' || line.substr(4) != hi.hash) throw runtime_error ("lost sync while receiving message: " + line); if (!msync.hash_sync (remotevv, hi, &path, &ti)) throw runtime_error ("msg_sync::sync failed even with source"); if (opt_verbose > 2) cerr << hi << '\n'; maybe_commit(); } print_time ("received content of missing messages"); while (get_response (in, line) && line.at(3) == '-') { down_tags++; is.str(line.substr(4)); if (!(is >> ti)) throw runtime_error ("could not parse tag_info: " + line.substr(4)); if (opt_verbose > 2) cerr << ti << '\n'; msync.tag_sync(remotevv, ti); maybe_commit(); } for (; extra_tags > 0; extra_tags--) { get_response(in, line, true); if (line[0] == '5') continue; is.str(line.substr(4)); if (!(is >> ti)) throw runtime_error ("could not parse tag_info: " + line.substr(4)); down_tags++; if (opt_verbose > 2) cerr << ti << '\n'; msync.tag_sync(remotevv, ti); maybe_commit(); } print_time ("received tags of new and modified messages"); record_peer_vector(db); nm.close(); sqlexec (db, "COMMIT;"); print_time("commited changes to local database"); if (opt_verbose || opt_noup || opt_upbg) cerr << "received " << down_body << " messages, " << down_links << " link changes, " << down_tags << " tag changes\n"; catch_interrupts(SIGINT, false); catch_interrupts(SIGTERM, false); if (opt_noup) return; if (opt_upbg) close(opt_upbg_fd); pending = 0; i64 i = send_links(db, "link ", out); print_time("sent moved messages to server"); while (i-- > 0) { getline(in, line); if (line.size() < 4 || (line.at(0) != '2' && line.at(0) != '5')) throw runtime_error ("lost sync while receiving message: " + line); if (line.at(0) == '5') { is.str(line.substr(4)); string hash; is >> hash; if (send_content(msync.hashdb, msync.tagdb, hash, "recv ", out)) { pending++; up_body++; } } else up_links++; } print_time("sent content of new messages to server"); up_tags = send_tags(db, "tags ", out); pending += up_tags; print_time("sent modified tags to server"); out << "commit\n"; if (opt_verbose) cerr << "sent " << up_body << " messages, " << up_links << " link changes, " << up_tags << " tag changes\n"; while (pending-- > 0) get_response(in, line); get_response(in, line); print_time("commit succeeded on server"); if (!opt_upbg || opt_verbose) { int w = 5; cerr << "SUMMARY:\n" << " received " << setw(w) << down_body << " messages, " << setw(w) << down_links << " link changes, " << setw(w) << down_tags << " tag changes\n"; cerr << " sent " << setw(w) << up_body << " messages, " << setw(w) << up_links << " link changes, " << setw(w) << up_tags << " tag changes\n"; } } muchsync-7/infinibuf.h0000644000175000017500000002501413227744306012130 00000000000000// -*- C++ -*- #ifndef _INFINIBUF_H_ #define _INFINIBUF_H_ 1 /** \file infinibuf.h * \brief iostreams-friendly buffers that can grow without bounds. */ #include #include #include #include /** * \brief Abstract buffer-management class for unbounded buffers. * * A derived class must at a minimum override either `notempty()` (for * output buffers) or `gwait()` (for input buffers). * * Most methods are not thread-safe. */ class infinibuf { protected: static constexpr int default_startpos_ = 8; static constexpr int chunksize_ = 0x10000; std::list data_; int gpos_; int ppos_; bool eof_{false}; int errno_{0}; const int startpos_; // For putback /** Called to signal when the buffer transitions from empty to * non-empty. */ virtual void notempty() {} /** Called when sufficient bytes are consumed to free some memory. */ virtual void notfull() {} public: explicit infinibuf(int sp = default_startpos_) : gpos_(sp), ppos_(sp), startpos_(sp) { data_.push_back(new char[chunksize_]); } infinibuf(const infinibuf &) = delete; virtual ~infinibuf() = 0; infinibuf &operator= (const infinibuf &) = delete; // These functions are never thread safe: bool empty() { return data_.front() == data_.back() && gpos_ == ppos_; } bool eof() { return eof_; } std::size_t buffer_size() { return data_.size() * chunksize_; } int err() { return errno_; } void err(int num) { if (!errno_) errno_ = num; peof(); } char *eback() { return data_.front(); } char *gptr() { return eback() + gpos_; } int gsize() { return (data_.front() == data_.back() ? ppos_ : chunksize_) - gpos_; } char *egptr() { return gptr() + gsize(); } void gbump(int n); /** Called to wait for the buffer to be non-empty. */ virtual void gwait() {} char *pbase() { return data_.back(); } char *pptr() { return pbase() + ppos_; } int psize() { return chunksize_ - ppos_; } char *epptr() { return pptr() + psize(); } void pbump(int n); void peof() { eof_ = true; if (empty()) notempty(); } /** Called to sleep if the buffer is too full. */ virtual void pwait() {} // These functions are thread safe for some subtypes: /** By default `lock()` and `unlock()` do nothing, but threadsafe * derived classes must override these functions. */ virtual void lock() {} /** See comment at lock. */ virtual void unlock() {} /** \brief Drain the current contents of the buffer. * * This function is thread safe and must be called *without* locking * the `infinibuf`. If the `infinibuf` is already locked, deadlock * will ensue. * * \param fd The file descriptor to write to. * \return 0 at EOF if there is no point in ever calling `output` * again, -1 after EAGAIN, and 1 after successful output. * \throws runtime_error if the `write` system call fails and * `errno` is not `EAGAIN`. */ int output(int fd); /** Fill the buffer from a file descriptor. * * This function is thread safe and must be called *without* locking * the `infinibuf`. * * \param fd The file descriptor to read from. * \return 0 at EOF if there is no point in ever calling * `input` again, 1 after successful input, and -1 after EAGAIN. * \throws runtime_error if the `read` system call fails and * `errno` is not `EAGAIN`. */ int input(int fd); /** Calls `output` over and over in a loop on an `infinibuf`. * * \param ib The `infinibuf` on which to call `output`. * * \param fd The file descriptor to which to write consumed data. * * \param oblocked If non-null is called with `true` whenever the * output is blocked by flow control, and then called again with * `false` when the output becomes unblocked. */ static void output_loop(std::shared_ptr ib, int fd, std::function oblocked = nullptr); static void input_loop(std::shared_ptr ib, int fd); }; /** \brief An `infinibuf` that synchronously reads from a file * descriptor when the buffer underflows. * * Closes the file descriptor upon destruction. */ class infinibuf_infd : public infinibuf { const int fd_; public: explicit infinibuf_infd (int fd, int sp = default_startpos_) : infinibuf(sp), fd_(fd) {} ~infinibuf_infd(); void gwait() override { input(fd_); } }; /** \brief An `infinibuf` that synchronously writes to a file * descriptor when the buffer overflows or is synced. * * Closes the file descriptor upon destruction. */ class infinibuf_outfd : public infinibuf { const int fd_; std::function oblocked_; public: explicit infinibuf_outfd (int fd, std::function oblocked = nullptr); ~infinibuf_outfd(); void notempty() override; }; /** \brief Thread-safe infinibuf. * * This infinibuf can safely be used in an `iostream` by one thread, * while a different thread fills or drains the buffer (for instance * executing `infinibuf::output_loop` or `infinibuf::input_loop`). */ class infinibuf_mt : public infinibuf { std::mutex m_; std::condition_variable cv_; std::condition_variable flow_ctrl_cv_; std::size_t max_buf_size_{0}; public: explicit infinibuf_mt (int sp = default_startpos_) : infinibuf(sp) {} void lock() override { m_.lock(); } void unlock() override { m_.unlock(); } void notempty() override { cv_.notify_all(); } void notfull() override { flow_ctrl_cv_.notify_all(); } void set_max_buf_size(std::size_t val) { std::lock_guard _lk(*this); if (!val || val > max_buf_size_) notfull(); max_buf_size_ = val; } void gwait() override { if (empty() && !eof()) { std::unique_lock ul (m_, std::adopt_lock); while (empty() && !eof()) cv_.wait(ul); ul.release(); } } void pwait() override { if (max_buf_size_ && buffer_size() > max_buf_size_) { if (max_buf_size_ && buffer_size() > max_buf_size_) { std::unique_lock ul (m_, std::adopt_lock); flow_ctrl_cv_.wait(ul); ul.release(); } } } }; /** \brief `infinibuf`-based `streambuf`. * * This streambuf can make use of any buffer type derived from * `infinibuf`. The `infinibuf` is always converted to a * `shared_ptr`, even if it is passed in as a raw `infinibuf*`. */ class infinistreambuf : public std::streambuf { protected: std::shared_ptr ib_; int_type underflow() override; int_type overflow(int_type ch) override; int sync() override; public: explicit infinistreambuf(std::shared_ptr ib); explicit infinistreambuf(infinibuf *ib) : infinistreambuf(std::shared_ptr(ib)) {} infinistreambuf(infinistreambuf &&isb) : infinistreambuf(isb.ib_) {} std::shared_ptr get_infinibuf() { return ib_; } void sputeof(); }; class ifdstream : public std::istream { infinistreambuf isb_; public: ifdstream(int fd) : std::istream (nullptr), isb_ (new infinibuf_infd(fd)) { init(&isb_); } ~ifdstream() { std::lock_guard _lk (*isb_.get_infinibuf()); isb_.get_infinibuf()->err(EPIPE); } }; class ofdstream : public std::ostream { infinistreambuf isb_; public: ofdstream(int fd, std::function oblocked = nullptr) : std::ostream (nullptr), isb_(new infinibuf_outfd(fd, oblocked)) { init(&isb_); } ~ofdstream() { if (std::uncaught_exception()) try { isb_.sputeof(); } catch(...) {} else isb_.sputeof(); } }; /** \brief std::istream from file descriptor with unbounded buffer. * * Continously reads from and buffers input from a file descriptor in * another thread. Closes the file descriptor after receiving EOF. * Kill the input thread if any further input is received, but the * input thread could get stuck if no input and no EOF happens. * Maximum buffer size defaults to infinity but can be adjusted with * `ifdinfinistream::set_max_buf_size`. */ class ifdinfinistream : public std::istream { std::shared_ptr ib_ { new infinibuf_mt() }; infinistreambuf isb_ { ib_ }; public: explicit ifdinfinistream (int fd, std::size_t size = 0) : std::istream (nullptr) { set_max_buf_size(size); std::thread t (infinibuf::input_loop, isb_.get_infinibuf(), fd); t.detach(); init(&isb_); } /** Sets maximum buffer size, above which it will stop reading from * the file descriptor until more is consumed locally. * * A value of 0 means no maximum buffer size. */ void set_max_buf_size(std::size_t size) { ib_->set_max_buf_size(size); } ~ifdinfinistream() { std::lock_guard _lk (*isb_.get_infinibuf()); // Sadly, there appears to be no portable way of waking up the // thread waiting in read. I tried using dup2 to replace the file // descriptor with /dev/null, or using fcntl to set the O_NONBLOCK // flag after the read has already started, and neither works on // linux. What does work is setting an empty function (not // SIG_IGN) as the signal handler on SIGCONT, then setting // O_NONBLOCK on the file descriptor, and finally calling // pthread_kill(t.native_handle(), SIGCONT)--but that could have // unintended consequences on other parts of the program following // a Ctrl-Z. The only truly clean solution is to use a // "self-pipe" to wake up a poll call, thereby using three file // descriptors for the job of one (yuck). Since we don't really // need to clean up the file descriptor, I'm not going to add the // complexity and cost of polling a second "self-pipe" file // descriptor or dropping down to native_handle. isb_.get_infinibuf()->err(EPIPE); } }; #if 0 /** \brief `ostream` from file descriptor with unbounded buffer. * * Buffers unbounded amounts of data which are drained to a file * descriptor in another thread. The file descriptor is closed when * the draining thread exits. The class destructor waits for the * writer thread to flush the buffer and exit. */ class ofdinfinistream : public std::ostream { infinistreambuf isb_ { new infinibuf_mt(0) }; std::thread t_; public: ofdinfinistream (int fd) { std::thread t (infinibuf::output_loop, isb_.get_infinibuf(), fd, nullptr); t_ = std::move(t); rdbuf(&isb_); } // Doesn't work because std::ostream's virtual destructor is noexcept. ~ofdinfinistream() noexcept(false) { isb_.sputeof(); if (!std::uncaught_exception()) { t_.join(); std::lock_guard lk (*isb_.get_infinibuf()); if (isb_.get_infinibuf()->err()) throw std::runtime_error (std::string("~ofdinfinistream: ") + strerror(isb_.get_infinibuf()->err())); } } }; #endif #endif /* !_INFINIBUF_H_ */ muchsync-7/Makefile.in0000644000175000017500000007324314357577521012072 00000000000000# Makefile.in generated by automake 1.16.5 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : bin_PROGRAMS = muchsync$(EXEEXT) subdir = . ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_append_compile_flags.m4 \ $(top_srcdir)/m4/ax_append_flag.m4 \ $(top_srcdir)/m4/ax_check_compile_flag.m4 \ $(top_srcdir)/m4/ax_cxx_compile_stdcxx_11.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ $(am__configure_deps) $(am__DIST_COMMON) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_muchsync_OBJECTS = infinibuf.$(OBJEXT) misc.$(OBJEXT) \ muchsync.$(OBJEXT) notmuch_db.$(OBJEXT) protocol.$(OBJEXT) \ sqlstmt.$(OBJEXT) sql_db.$(OBJEXT) xapian_sync.$(OBJEXT) muchsync_OBJECTS = $(am_muchsync_OBJECTS) muchsync_LDADD = $(LDADD) am__DEPENDENCIES_1 = muchsync_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/infinibuf.Po ./$(DEPDIR)/misc.Po \ ./$(DEPDIR)/muchsync.Po ./$(DEPDIR)/notmuch_db.Po \ ./$(DEPDIR)/protocol.Po ./$(DEPDIR)/sql_db.Po \ ./$(DEPDIR)/sqlstmt.Po ./$(DEPDIR)/xapian_sync.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \ -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(muchsync_SOURCES) DIST_SOURCES = $(muchsync_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` AM_RECURSIVE_TARGETS = cscope am__DIST_COMMON = $(srcdir)/Makefile.in AUTHORS COPYING ChangeLog \ INSTALL NEWS README depcomp install-sh missing DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ if test -d "$(distdir)"; then \ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -rf "$(distdir)" \ || { sleep 5 && rm -rf "$(distdir)"; }; \ else :; fi am__post_remove_distdir = $(am__remove_distdir) DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best DIST_TARGETS = dist-gzip # Exists only to be overridden by the user if desired. AM_DISTCHECK_DVI_TARGET = dvi distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CPPFLAGS = @CPPFLAGS@ CSCOPE = @CSCOPE@ CTAGS = @CTAGS@ CXX = @CXX@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ HAVE_CXX11 = @HAVE_CXX11@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ XAPIAN_CONFIG = @XAPIAN_CONFIG@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CXX = @ac_ct_CXX@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libcrypto_CFLAGS = @libcrypto_CFLAGS@ libcrypto_LIBS = @libcrypto_LIBS@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ sqlite3_CFLAGS = @sqlite3_CFLAGS@ sqlite3_LIBS = @sqlite3_LIBS@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ xapian_CPPFLAGS = @xapian_CPPFLAGS@ xapian_LIBS = @xapian_LIBS@ ACLOCAL_AMFLAGS = ${ACLOCAL_FLAGS} -I m4 AM_CPPFLAGS = $(sqlite3_CFLAGS) $(libcrypto_CFLAGS) $(xapian_CPPFLAGS) LDADD = $(sqlite3_LIBS) $(libcrypto_LIBS) -lnotmuch $(xapian_LIBS) muchsync_SOURCES = infinibuf.cc misc.cc muchsync.cc notmuch_db.cc \ protocol.cc sqlstmt.cc sql_db.cc xapian_sync.cc cleanup.h \ misc.h muchsync.h infinibuf.h notmuch_db.h sqlstmt.h sql_db.h CLEANFILES = *~ man_MANS = muchsync.1 EXTRA_DIST = muchsync.1.md $(man_MANS) all: all-am .SUFFIXES: .SUFFIXES: .cc .o .obj am--refresh: Makefile @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) muchsync$(EXEEXT): $(muchsync_OBJECTS) $(muchsync_DEPENDENCIES) $(EXTRA_muchsync_DEPENDENCIES) @rm -f muchsync$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(muchsync_OBJECTS) $(muchsync_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/infinibuf.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/misc.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/muchsync.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/notmuch_db.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/protocol.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sql_db.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sqlstmt.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xapian_sync.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cc.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cc.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` install-man1: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscope: cscope.files test ! -s cscope.files \ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) clean-cscope: -rm -f cscope.files cscope.files: clean-cscope cscopelist cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags -rm -f cscope.out cscope.in.out cscope.po.out cscope.files distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__post_remove_distdir) dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__post_remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) dist-zstd: distdir tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst $(am__post_remove_distdir) dist-tarZ: distdir @echo WARNING: "Support for distribution archives compressed with" \ "legacy program 'compress' is deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__post_remove_distdir) dist-shar: distdir @echo WARNING: "Support for shar distribution archives is" \ "deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__post_remove_distdir) dist dist-all: $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' $(am__post_remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ *.tar.zst*) \ zstd -dc $(distdir).tar.zst | $(am__untar) ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build/sub \ && ../../configure \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ --srcdir=../.. --prefix="$$dc_install_base" \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__post_remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @test -n '$(distuninstallcheck_dir)' || { \ echo 'ERROR: trying to run $@ with an empty' \ '$$(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ $(am__cd) '$(distuninstallcheck_dir)' || { \ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic mostlyclean-am distclean: distclean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f ./$(DEPDIR)/infinibuf.Po -rm -f ./$(DEPDIR)/misc.Po -rm -f ./$(DEPDIR)/muchsync.Po -rm -f ./$(DEPDIR)/notmuch_db.Po -rm -f ./$(DEPDIR)/protocol.Po -rm -f ./$(DEPDIR)/sql_db.Po -rm -f ./$(DEPDIR)/sqlstmt.Po -rm -f ./$(DEPDIR)/xapian_sync.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f ./$(DEPDIR)/infinibuf.Po -rm -f ./$(DEPDIR)/misc.Po -rm -f ./$(DEPDIR)/muchsync.Po -rm -f ./$(DEPDIR)/notmuch_db.Po -rm -f ./$(DEPDIR)/protocol.Po -rm -f ./$(DEPDIR)/sql_db.Po -rm -f ./$(DEPDIR)/sqlstmt.Po -rm -f ./$(DEPDIR)/xapian_sync.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic \ maintainer-clean-local mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles am--refresh check \ check-am clean clean-binPROGRAMS clean-cscope clean-generic \ cscope cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \ dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \ dist-zstd distcheck distclean distclean-compile \ distclean-generic distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-man1 install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic maintainer-clean-local mostlyclean \ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS \ uninstall-man uninstall-man1 .PRECIOUS: Makefile maintainer-clean-local: +@echo rm -rf `sed -ne 's!^/!!p' .gitignore` Makefile.in rm -rf `sed -ne 's!^/!!p' .gitignore` Makefile.in muchsync.1: muchsync.1.md pandoc -s -w man muchsync.1.md -o muchsync.1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: muchsync-7/misc.cc0000644000175000017500000000547712536100426011252 00000000000000#include #include #include #include #include #include #include #include #include #include #include "misc.h" using namespace std; string percent_encode (const string &raw) { ostringstream outbuf; outbuf.fill('0'); outbuf.setf(ios::hex, ios::basefield); for (char c : raw) { if (isalnum (c) || (c >= '+' && c <= '.') || c == '_' || c == '@' || c == '=') outbuf << c; else outbuf << '%' << setw(2) << int (uint8_t (c)); } return outbuf.str (); } inline int hexdigit (char c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'a' && c <= 'f') return c - 'a' + 10; else throw runtime_error ("precent_decode: illegal hexdigit " + string (1, c)); } string percent_decode (const string &encoded) { ostringstream outbuf; int escape_pos = 0, escape_val = 0; for (char c : encoded) { switch (escape_pos) { case 0: if (c == '%') escape_pos = 1; else outbuf << c; break; case 1: escape_val = hexdigit(c) << 4; escape_pos = 2; break; case 2: escape_pos = 0; outbuf << char (escape_val | hexdigit(c)); break; } } if (escape_pos) throw runtime_error ("percent_decode: incomplete escape"); return outbuf.str(); } std::istream & input_match (std::istream &in, char want) { char got; if ((in >> got) && got != want) in.setstate (std::ios_base::failbit); return in; } bool hash_ok (const string &hash) { if (hash.size() != 2*hash_ctx::output_bytes) return false; for (char c : hash) if (c < '0' || c > 'f' || (c > '9' && c < 'a')) return false; return true; } static string hexdump (const string &s) { ostringstream os; os << hex << setfill('0'); for (auto c : s) os << setw(2) << (int (c) & 0xff); string ret = os.str(); if (ret.size() != 2 * s.size()) { cerr << ret.size() << " != 2 * " << s.size () << "\n"; cerr << "s[0] == " << hex << unsigned (s[0]) << ", s.back() = " << unsigned (s.back()) << "\n"; terminate(); } return ret; } string hash_ctx::final() { unsigned char resbuf[output_bytes]; SHA1_Final (resbuf, &ctx_); return hexdump ({ reinterpret_cast (resbuf), sizeof (resbuf) }); } using stp = std::chrono::time_point; stp start_time_stamp{stp::clock::now()}; stp last_time_stamp{start_time_stamp}; void print_time (string msg) { using namespace std::chrono; stp now = stp::clock::now(); if (opt_verbose > 0) { auto oldFlags = cerr.flags(); cerr.setf (ios::fixed, ios::floatfield); cerr << msg << "... " << duration(now - start_time_stamp).count() << " (+" << duration(now - last_time_stamp).count() << ")\n"; cerr.flags (oldFlags); } last_time_stamp = now; } muchsync-7/configure.ac0000644000175000017500000000365414357576774012323 00000000000000# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ([2.69]) AC_INIT(muchsync, 7) AM_INIT_AUTOMAKE([-Wall]) AC_CONFIG_SRCDIR([configure.ac]) AC_CONFIG_MACRO_DIR([m4]) dnl On arch, clang++ is not compatible with all libraries dnl AC_PROG_CXX([clang++ eg++ g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC]) AC_PROG_CXX AX_CXX_COMPILE_STDCXX_11(noext,mandatory) : ${WFLAGS=-Wall} CXXFLAGS="$CXXFLAGS $WFLAGS" dnl AC_USE_SYSTEM_EXTENSIONS AC_LANG(C++) # -pthread Seems to be required by g++ -stc=c++11 AX_APPEND_COMPILE_FLAGS([-pthread]) AC_CHECK_FUNCS(openat) AC_CHECK_FUNCS(fdopendir) PKG_CHECK_MODULES([sqlite3], [sqlite3]) PKG_CHECK_MODULES([libcrypto], [libcrypto]) AC_PATH_PROG(XAPIAN_CONFIG, xapian-config) test -n "$XAPIAN_CONFIG" || AC_MSG_ERROR(Cannot find xapian-config) if ! xapian_CPPFLAGS=$($XAPIAN_CONFIG --cxxflags) \ || ! xapian_LIBS=$($XAPIAN_CONFIG --libs); then AC_MSG_ERROR(Error running $XAPIAN_CONFIG) fi AC_SUBST(xapian_CPPFLAGS) AC_SUBST(xapian_LIBS) AC_MSG_CHECKING(For st_mtim in struct stat) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([#include #include #include ], [return sizeof(stat::st_mtim) > 0;])], have_st_mtim=yes, have_st_mtim=no) AC_MSG_RESULT($have_st_mtim) if test yes = "$have_st_mtim"; then ST_MTIM=st_mtim else AC_MSG_CHECKING(For st_mtimespec in struct stat) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([#include #include #include ], [int sz = sizeof(stat::st_mtimespec);])], have_st_mtimespec=yes, have_st_mtimespec=no) AC_MSG_RESULT($have_st_mtimespec) if test yes = "$have_st_mtimespec"; then ST_MTIM=st_mtimespec else AC_MSG_ERROR(Cannot find nanoseconds mtime in stat struct) fi fi AC_DEFINE_UNQUOTED(ST_MTIM, $ST_MTIM, Name of timespec modification time field in stat structure) AC_CONFIG_FILES([Makefile]) AC_OUTPUT muchsync-7/muchsync.h0000644000175000017500000000147312514134120011774 00000000000000// -*- C++ -*- #include #include #include #include #include "cleanup.h" #include "sql_db.h" #include "notmuch_db.h" using std::string; /* protocol.cc */ void muchsync_server(sqlite3 *db, notmuch_db &nm); void muchsync_client(sqlite3 *db, notmuch_db &nm, std::istream &in, std::ostream &out); std::istream &get_response(std::istream &in, string &line, bool err_ok = true); /* muchsync.cc */ extern bool opt_fullscan; extern bool opt_noscan; extern bool opt_upbg; extern int opt_upbg_fd; extern bool opt_noup; extern string opt_ssh; extern string opt_remote_muchsync_path; extern string opt_notmuch_config; extern const char muchsync_trashdir[]; extern const char muchsync_tmpdir[]; /* xapian_sync.cc */ void sync_local_data(sqlite3 *sqldb, const string &maildir); muchsync-7/muchsync.cc0000644000175000017500000003035713403557504012152 00000000000000 #include #include #include #include #include #include #include #include #include #include "misc.h" #include "muchsync.h" #include "infinibuf.h" using namespace std; #if 0 // This gives core dumps to make it easier to debug struct no_such_exception_t { const char *what() noexcept { return "no such exception"; } }; using whattocatch_t = no_such_exception_t; #else using whattocatch_t = const exception; #endif #define MUCHSYNC_DEFDIR "/.notmuch/muchsync" const char muchsync_defdir[] = MUCHSYNC_DEFDIR; const char muchsync_dbpath[] = MUCHSYNC_DEFDIR "/state.db"; const char muchsync_trashdir[] = MUCHSYNC_DEFDIR "/trash"; const char muchsync_tmpdir[] = MUCHSYNC_DEFDIR "/tmp"; constexpr char shell[] = "/bin/sh"; // Probably no win from buffering more than 128MB of input data from net constexpr size_t max_buf_size = 0x8000000; bool opt_fullscan; bool opt_noscan; bool opt_init; bool opt_server; bool opt_upbg; bool opt_noup; bool opt_nonew; bool opt_newid; i64 opt_newid_value; int opt_verbose; int opt_upbg_fd = -1; string opt_ssh = "ssh -CTaxq"; string opt_remote_muchsync_path = "muchsync"; string opt_notmuch_config; string opt_init_dest; static bool muchsync_init (const string &maildir, bool create = false) { string trashbase = maildir + muchsync_trashdir + "/"; if (!access ((maildir + muchsync_tmpdir).c_str(), 0) && !access ((trashbase + "ff").c_str(), 0)) return true; if (create && mkdir (maildir.c_str(), 0777) && errno != EEXIST) { perror (maildir.c_str()); return false; } string notmuchdir = maildir + "/.notmuch"; if (create && access (notmuchdir.c_str(), 0) && errno == ENOENT) { notmuch_database_t *notmuch; if (!notmuch_database_create (maildir.c_str(), ¬much)) notmuch_database_destroy (notmuch); } string msdir = maildir + muchsync_defdir; for (string d : {msdir, maildir + muchsync_trashdir, maildir + muchsync_tmpdir}) { if (mkdir (d.c_str(), 0777) && errno != EEXIST) { perror (d.c_str()); return false; } } for (int i = 0; i < 0x100; i++) { ostringstream os; os << trashbase << hex << setfill('0') << setw(2) << i; if (mkdir (os.str().c_str(), 0777) && errno != EEXIST) { perror (os.str().c_str()); return false; } } return true; } static void tag_stderr(string tag) { infinistreambuf *isb = new infinistreambuf(new infinibuf_mt); streambuf *err = cerr.rdbuf(isb); thread t ([=]() { istream in (isb); ostream out (err); string line; while (getline(in, line)) out << tag << line << endl; }); t.detach(); cerr.rdbuf(isb); } //[[noreturn]] void usage (int code = 1) { (code ? cerr : cout) << "\ usage: muchsync\n\ muchsync server [server-options]\n\ muchsync --init maildir server [server-options]\n\ \n\ Additional options:\n\ -C file Specify path to notmuch config file\n\ -F Disable optimizations and do full maildir scan\n\ -v Increase verbosity\n\ -r path Specify path to notmuch executable on server\n\ -s ssh-cmd Specify ssh command and arguments\n\ --config file Specify path to notmuch config file (same as -C)\n\ --nonew Do not run notmuch new first\n\ --noup[load] Do not upload changes to server\n\ --upbg Download mail in forground, then upload in background\n\ --self Print local replica identifier and exit\n\ --newid Change local replica identifier and exit\n\ --version Print version number and exit\n\ --help Print usage\n"; exit (code); } static void id_request() { unique_ptr nmp; try { nmp.reset(new notmuch_db (opt_notmuch_config)); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit (1); } notmuch_db &nm = *nmp; string dbpath = nm.maildir + muchsync_dbpath; sqlite3 *db = dbopen(dbpath.c_str(), opt_newid); if (!db) exit(1); cleanup _c (sqlite3_close_v2, db); if (!opt_newid) cout << getconfig(db, "self") << '\n'; else { sqlexec (db, "BEGIN;"); i64 oldid = getconfig(db, "self"); sqlexec (db, "INSERT OR IGNORE INTO sync_vector (replica, version)" " VALUES (%lld, 1);", opt_newid_value); setconfig (db, "self", opt_newid_value); cout << "changing id from " << oldid << " to " << opt_newid_value << '\n'; sqlexec (db, "COMMIT;"); } } static void server() { ifdinfinistream ibin(0, max_buf_size); cleanup _fixbuf0 ([](streambuf *sb){ cin.rdbuf(sb); }, cin.rdbuf(ibin.rdbuf())); ofdstream ibout(1, [&ibin](bool blocked) { ibin.set_max_buf_size(blocked ? 0 : max_buf_size); }); cleanup _fixbuf1 ([](streambuf *sb){ cout.rdbuf(sb); }, cout.rdbuf(ibout.rdbuf())); tag_stderr("[SERVER] "); unique_ptr nmp; try { nmp.reset(new notmuch_db (opt_notmuch_config)); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit (1); } notmuch_db &nm = *nmp; string dbpath = nm.maildir + muchsync_dbpath; if (!opt_nonew) nm.run_new(); if (!muchsync_init (nm.maildir)) exit (1); sqlite3 *db = dbopen(dbpath.c_str()); if (!db) exit(1); cleanup _c (sqlite3_close_v2, db); try { if (!opt_noscan) sync_local_data(db, nm.maildir); muchsync_server(db, nm); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit(1); } } static void cmd_iofds (int fds[2], const string &cmd) { int ifds[2], ofds[2]; if (pipe (ifds)) throw runtime_error (string ("pipe: ") + strerror (errno)); if (pipe (ofds)) { close (ifds[0]); close (ifds[1]); throw runtime_error (string ("pipe: ") + strerror (errno)); } pid_t pid = fork(); switch (pid) { case -1: close (ifds[0]); close (ifds[1]); close (ofds[0]); close (ofds[1]); throw runtime_error (string ("fork: ") + strerror (errno)); break; case 0: close (ifds[0]); close (ofds[1]); if (ofds[0] != 0) { dup2 (ofds[0], 0); close (ofds[0]); } if (ifds[1] != 1) { dup2 (ifds[1], 1); close (ifds[1]); } execl (shell, shell, "-c", cmd.c_str(), nullptr); cerr << shell << ": " << strerror (errno) << '\n'; _exit (1); break; default: close (ifds[1]); close (ofds[0]); fcntl (ifds[0], F_SETFD, 1); fcntl (ofds[1], F_SETFD, 1); fds[0] = ifds[0]; fds[1] = ofds[1]; break; } } static void create_config(istream &in, ostream &out, string &maildir) { if (!maildir.size() || !maildir.front()) throw runtime_error ("illegal empty maildir path\n"); string line; out << "conffile\n"; get_response(in, line); get_response(in, line); size_t len = stoul(line.substr(4)); if (len <= 0) throw runtime_error ("server did not return configuration file\n"); string conf; conf.resize(len); if (!in.read(&conf.front(), len)) throw runtime_error ("cannot read configuration file from server\n"); int fd = open(opt_notmuch_config.c_str(), O_CREAT|O_TRUNC|O_WRONLY|O_EXCL, 0666); if (fd < 0) throw runtime_error (opt_notmuch_config + ": " + strerror (errno)); write(fd, conf.c_str(), conf.size()); close(fd); if (maildir[0] != '/') { const char *p = getenv("PWD"); if (!p) throw runtime_error ("no PWD in environment\n"); maildir = p + ("/" + maildir); } notmuch_db nm (opt_notmuch_config); nm.set_config ("database.path", maildir.c_str(), nullptr); } static void client(int ac, char **av) { unique_ptr nmp; struct stat sb; int err = stat(opt_notmuch_config.c_str(), &sb); if (opt_init) { if (!err) { cerr << opt_notmuch_config << " should not exist with --init option\n"; exit (1); } else if (errno != ENOENT) { cerr << opt_notmuch_config << ": " << strerror(errno) << '\n'; exit (1); } } else if (err) { cerr << opt_notmuch_config << ": " << strerror(errno) << '\n'; exit (1); } else { try { nmp.reset(new notmuch_db (opt_notmuch_config)); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit (1); } } if (ac == 0) { if (!nmp) usage(); if (!muchsync_init(nmp->maildir, true)) exit (1); if (!opt_nonew) nmp->run_new(); string dbpath = nmp->maildir + muchsync_dbpath; sqlite3 *db = dbopen(dbpath.c_str()); if (!db) exit (1); cleanup _c (sqlite3_close_v2, db); sync_local_data (db, nmp->maildir); exit(0); } ostringstream os; os << opt_ssh << ' ' << av[0] << ' ' << opt_remote_muchsync_path << " --server"; for (int i = 1; i < ac; i++) os << ' ' << av[i]; string cmd (os.str()); int fds[2]; cmd_iofds (fds, cmd); ifdinfinistream in (fds[0], max_buf_size); ofdstream out (fds[1], [&in](bool blocked){ in.set_max_buf_size(blocked ? 0 : max_buf_size); }); in.tie (&out); if (opt_init) { create_config(in, out, opt_init_dest); try { nmp.reset(new notmuch_db (opt_notmuch_config, true)); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit (1); } } if (!muchsync_init(nmp->maildir, true)) exit(1); if (!opt_nonew) nmp->run_new(); string dbpath = nmp->maildir + muchsync_dbpath; sqlite3 *db = dbopen(dbpath.c_str(), true); if (!db) exit (1); cleanup _c (sqlite3_close_v2, db); try { muchsync_client (db, *nmp, in, out); } catch (whattocatch_t &e) { cerr << e.what() << '\n'; exit (1); } } enum opttag { OPT_VERSION = 0x100, OPT_SERVER, OPT_NOSCAN, OPT_UPBG, OPT_NOUP, OPT_HELP, OPT_NONEW, OPT_SELF, OPT_NEWID, OPT_INIT }; static const struct option muchsync_options[] = { { "version", no_argument, nullptr, OPT_VERSION }, { "server", no_argument, nullptr, OPT_SERVER }, { "noscan", no_argument, nullptr, OPT_NOSCAN }, { "upbg", no_argument, nullptr, OPT_UPBG }, { "noup", no_argument, nullptr, OPT_NOUP }, { "noupload", no_argument, nullptr, OPT_NOUP }, { "nonew", no_argument, nullptr, OPT_NONEW }, { "init", required_argument, nullptr, OPT_INIT }, { "self", no_argument, nullptr, OPT_SELF }, { "newid", optional_argument, nullptr, OPT_NEWID }, { "config", required_argument, nullptr, 'C' }, { "help", no_argument, nullptr, OPT_HELP }, { nullptr, 0, nullptr, 0 } }; int main(int argc, char **argv) { umask (077); opt_notmuch_config = notmuch_db::default_notmuch_config(); bool opt_self = false; int opt; while ((opt = getopt_long(argc, argv, "+C:Fr:s:v", muchsync_options, nullptr)) != -1) switch (opt) { case 0: break; case 'C': opt_notmuch_config = optarg; break; case 'F': opt_fullscan = true; break; case 'r': opt_remote_muchsync_path = optarg; break; case 's': opt_ssh = optarg; break; case 'v': opt_verbose++; break; case OPT_VERSION: cout << PACKAGE_STRING << '\n'; exit (0); case OPT_SERVER: opt_server = true; break; case OPT_NOSCAN: opt_noscan = true; break; case OPT_UPBG: opt_upbg = true; break; case OPT_NOUP: opt_noup = true; break; case OPT_NONEW: opt_nonew = true; break; case OPT_SELF: opt_self = true; break; case OPT_NEWID: opt_newid = true; if (optarg) { opt_newid_value = std::stoll(optarg, nullptr, 10); if (opt_newid_value <= 0) { cerr << "invalid id " << optarg << '\n'; usage(); } } else opt_newid_value = create_random_id(); break; case OPT_INIT: opt_init = true; opt_init_dest = optarg; break; case OPT_HELP: usage(0); default: usage(); } if (opt_self || opt_newid) { if ((opt_self && opt_newid) || optind != argc || opt_init || opt_noup || opt_upbg) usage(); id_request(); } else if (opt_server) { if (opt_init || opt_noup || opt_upbg || optind != argc) usage(); server(); } else if (opt_upbg) { int fds[2]; if (pipe(fds)) { cerr << "pipe: " << strerror(errno) << '\n'; exit (1); } fcntl(fds[1], F_SETFD, 1); if (fork() > 0) { char c; close(fds[1]); read(fds[0], &c, 1); if (opt_verbose) cerr << "backgrounding\n"; exit(0); } close(fds[0]); opt_upbg_fd = fds[1]; client(argc - optind, argv + optind); } else client(argc - optind, argv + optind); return 0; } muchsync-7/cleanup.h0000644000175000017500000000407412325322107011576 00000000000000// -*- C++ -*- /** \file cleanup.h * \brief Classes to facilitate use of RIAA cleanup. */ #ifndef _CLEANUP_H_ #define _CLEANUP_H_ 1 #include inline std::function && voidify(std::function &&f) { return move(f); } inline const std::function & voidify(const std::function &f) { return f; } template inline std::function voidify(F &&f) { return [f]() { f(); }; } /** \brief Container for a cleanup action. */ class cleanup { std::function action_; static void nop() {} public: cleanup() : action_ (nop) {} cleanup(const cleanup &) = delete; cleanup(cleanup &&c) : action_(c.action_) { c.release(); } template cleanup(F &&f) : action_ (std::forward(f)) {} template cleanup(Args... args) : action_(voidify(std::bind(args...))) {} ~cleanup() { action_(); } cleanup &operator=(cleanup &&c) { action_.swap(c.action_); return *this; } void reset() { std::function old (action_); release(); old(); } template void reset(F &&f) { std::function old (action_); action_ = std::forward(f); old(); } template void reset(Args... args) { std::function old (action_); action_ = std::bind(args...); old(); } void release() { action_ = nop; } }; /** \brief Like a \ref std::unique_ptr, but half the size because the * cleanup function is specified as part of the type. */ template class unique_obj { T *obj_; public: unique_obj() noexcept : obj_(nullptr) {} explicit unique_obj(T *obj) noexcept : obj_(obj) {} unique_obj(unique_obj &&uo) noexcept : obj_(uo.obj_) { uo.obj_ = nullptr; } ~unique_obj() { if (obj_) destructor(obj_); } void reset(T *obj) { T *old = obj_; obj_ = obj; destructor(old); } T *release() noexcept { T *old = obj_; obj_ = nullptr; return old; } T *get() const noexcept { return obj_; } T *&get() noexcept { return obj_; } operator T*() const noexcept { return obj_; } }; #endif /* !_CLEANUP_H_ */ muchsync-7/missing0000755000175000017500000001533012302053374011374 00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2013-10-28.13; # UTC # Copyright (C) 1996-2013 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=http://www.perl.org/ flex_URL=http://flex.sourceforge.net/ gnu_software_URL=http://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'autom4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: muchsync-7/depcomp0000755000175000017500000005601612302053374011360 00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2013-05-30.07; # UTC # Copyright (C) 1999-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # Get the directory component of the given path, and save it in the # global variables '$dir'. Note that this directory component will # be either empty or ending with a '/' character. This is deliberate. set_dir_from () { case $1 in */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; *) dir=;; esac } # Get the suffix-stripped basename of the given path, and save it the # global variable '$base'. set_base_from () { base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` } # If no dependency file was actually created by the compiler invocation, # we still have to create a dummy depfile, to avoid errors with the # Makefile "include basename.Plo" scheme. make_dummy_depfile () { echo "#dummy" > "$depfile" } # Factor out some common post-processing of the generated depfile. # Requires the auxiliary global variable '$tmpdepfile' to be set. aix_post_process_depfile () { # If the compiler actually managed to produce a dependency file, # post-process it. if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependency.h'. # Do two passes, one to just change these to # $object: dependency.h # and one to simply output # dependency.h: # which is needed to avoid the deleted-header problem. { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" } > "$depfile" rm -f "$tmpdepfile" else make_dummy_depfile fi } # A tabulation character. tab=' ' # A newline character. nl=' ' # Character ranges might be problematic outside the C locale. # These definitions help. upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ lower=abcdefghijklmnopqrstuvwxyz digits=0123456789 alpha=${upper}${lower} if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Avoid interferences from the environment. gccflag= dashmflag= # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. ## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. ## (see the conditional assignment to $gccflag above). ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). Also, it might not be ## supported by the other compilers which use the 'gcc' depmode. ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The second -e expression handles DOS-style file names with drive # letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ | tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done aix_post_process_depfile ;; tcc) # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 # FIXME: That version still under development at the moment of writing. # Make that this statement remains true also for stable, released # versions. # It will wrap lines (doesn't matter whether long or short) with a # trailing '\', as in: # # foo.o : \ # foo.c \ # foo.h \ # # It will put a trailing '\' even on the last line, and will use leading # spaces rather than leading tabs (at least since its commit 0394caf7 # "Emit spaces for -MD"). "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. # We have to change lines of the first kind to '$object: \'. sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" # And for each line of the second kind, we have to emit a 'dep.h:' # dummy dependency, to avoid the deleted-header problem. sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; ## The order of this option in the case statement is important, since the ## shell code in configure will try each of these formats in the order ## listed in this file. A plain '-MD' option would be understood by many ## compilers, so we must ensure this comes after the gcc and icc options. pgcc) # Portland's C compiler understands '-MD'. # Will always output deps to 'file.d' where file is the root name of the # source file under compilation, even if file resides in a subdirectory. # The object file name does not affect the name of the '.d' file. # pgcc 10.2 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\' : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... set_dir_from "$object" # Use the source, not the object, to determine the base name, since # that's sadly what pgcc will do too. set_base_from "$source" tmpdepfile=$base.d # For projects that build the same source file twice into different object # files, the pgcc approach of using the *source* file root name can cause # problems in parallel builds. Use a locking strategy to avoid stomping on # the same $tmpdepfile. lockdir=$base.d-lock trap " echo '$0: caught signal, cleaning up...' >&2 rmdir '$lockdir' exit 1 " 1 2 13 15 numtries=100 i=$numtries while test $i -gt 0; do # mkdir is a portable test-and-set. if mkdir "$lockdir" 2>/dev/null; then # This process acquired the lock. "$@" -MD stat=$? # Release the lock. rmdir "$lockdir" break else # If the lock is being held by a different process, wait # until the winning process is done or we timeout. while test -d "$lockdir" && test $i -gt 0; do sleep 1 i=`expr $i - 1` done fi i=`expr $i - 1` done trap - 1 2 13 15 if test $i -le 0; then echo "$0: failed to acquire lock after $numtries attempts" >&2 echo "$0: check lockdir '$lockdir'" >&2 exit 1 fi if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then # Libtool generates 2 separate objects for the 2 libraries. These # two compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir$base.o.d # libtool 1.5 tmpdepfile2=$dir.libs/$base.o.d # Likewise. tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d "$@" -MD fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done # Same post-processing that is required for AIX mode. aix_post_process_depfile ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" echo >> "$depfile" # make sure the fragment doesn't end with a backslash rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this sed invocation # correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process the last invocation # correctly. Breaking it into two sed invocations is a workaround. sed '1,2d' "$tmpdepfile" \ | tr ' ' "$nl" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E \ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: muchsync-7/aclocal.m40000644000175000017500000015044014357577112011654 00000000000000# generated automatically by aclocal 1.16.5 -*- Autoconf -*- # Copyright (C) 1996-2021 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, [m4_warning([this file was generated for autoconf 2.71. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 11 (pkg-config-0.29.1) dnl Copyright © 2004 Scott James Remnant . dnl Copyright © 2012-2015 Dan Nicholson dnl dnl This program is free software; you can redistribute it and/or modify dnl it under the terms of the GNU General Public License as published by dnl the Free Software Foundation; either version 2 of the License, or dnl (at your option) any later version. dnl dnl This program is distributed in the hope that it will be useful, but dnl WITHOUT ANY WARRANTY; without even the implied warranty of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU dnl General Public License for more details. dnl dnl You should have received a copy of the GNU General Public License dnl along with this program; if not, write to the Free Software dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA dnl 02111-1307, USA. dnl dnl As a special exception to the GNU General Public License, if you dnl distribute this file as part of a program that contains a dnl configuration script generated by Autoconf, you may include it under dnl the same distribution terms that you use for the rest of that dnl program. dnl PKG_PREREQ(MIN-VERSION) dnl ----------------------- dnl Since: 0.29 dnl dnl Verify that the version of the pkg-config macros are at least dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's dnl installed version of pkg-config, this checks the developer's version dnl of pkg.m4 when generating configure. dnl dnl To ensure that this macro is defined, also add: dnl m4_ifndef([PKG_PREREQ], dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])]) dnl dnl See the "Since" comment for each macro you use to see what version dnl of the macros you require. m4_defun([PKG_PREREQ], [m4_define([PKG_MACROS_VERSION], [0.29.1]) m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1, [m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])]) ])dnl PKG_PREREQ dnl PKG_PROG_PKG_CONFIG([MIN-VERSION]) dnl ---------------------------------- dnl Since: 0.16 dnl dnl Search for the pkg-config tool and set the PKG_CONFIG variable to dnl first found in the path. Checks that the version of pkg-config found dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is dnl used since that's the first version where most current features of dnl pkg-config existed. AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])dnl PKG_PROG_PKG_CONFIG dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------------------------------- dnl Since: 0.18 dnl dnl Check to see whether a particular set of modules exists. Similar to dnl PKG_CHECK_MODULES(), but does not set variables or print errors. dnl dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) dnl only at the first occurence in configure.ac, so if the first place dnl it's called might be skipped (such as if it is within an "if", you dnl have to call PKG_CHECK_EXISTS manually AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) dnl --------------------------------------------- dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting dnl pkg_failed based on the result. m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])dnl _PKG_CONFIG dnl _PKG_SHORT_ERRORS_SUPPORTED dnl --------------------------- dnl Internal check to see if pkg-config supports short errors. AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])dnl _PKG_SHORT_ERRORS_SUPPORTED dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl -------------------------------------------------------------- dnl Since: 0.4.0 dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES might not happen, you should be sure to include an dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])dnl PKG_CHECK_MODULES dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl --------------------------------------------------------------------- dnl Since: 0.29 dnl dnl Checks for existence of MODULES and gathers its build flags with dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags dnl and VARIABLE-PREFIX_LIBS from --libs. dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to dnl include an explicit call to PKG_PROG_PKG_CONFIG in your dnl configure.ac. AC_DEFUN([PKG_CHECK_MODULES_STATIC], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl _save_PKG_CONFIG=$PKG_CONFIG PKG_CONFIG="$PKG_CONFIG --static" PKG_CHECK_MODULES($@) PKG_CONFIG=$_save_PKG_CONFIG[]dnl ])dnl PKG_CHECK_MODULES_STATIC dnl PKG_INSTALLDIR([DIRECTORY]) dnl ------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable pkgconfigdir as the location where a module dnl should install pkg-config .pc files. By default the directory is dnl $libdir/pkgconfig, but the default can be changed by passing dnl DIRECTORY. The user can override through the --with-pkgconfigdir dnl parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_INSTALLDIR dnl PKG_NOARCH_INSTALLDIR([DIRECTORY]) dnl -------------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable noarch_pkgconfigdir as the location where a dnl module should install arch-independent pkg-config .pc files. By dnl default the directory is $datadir/pkgconfig, but the default can be dnl changed by passing DIRECTORY. The user can override through the dnl --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_NOARCH_INSTALLDIR dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE, dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------- dnl Since: 0.28 dnl dnl Retrieves the value of the pkg-config variable for the given module. AC_DEFUN([PKG_CHECK_VAR], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl _PKG_CONFIG([$1], [variable="][$3]["], [$2]) AS_VAR_COPY([$1], [pkg_cv_][$1]) AS_VAR_IF([$1], [""], [$5], [$4])dnl ])dnl PKG_CHECK_VAR dnl PKG_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND], dnl [DESCRIPTION], [DEFAULT]) dnl ------------------------------------------ dnl dnl Prepare a "--with-" configure option using the lowercase dnl [VARIABLE-PREFIX] name, merging the behaviour of AC_ARG_WITH and dnl PKG_CHECK_MODULES in a single macro. AC_DEFUN([PKG_WITH_MODULES], [ m4_pushdef([with_arg], m4_tolower([$1])) m4_pushdef([description], [m4_default([$5], [build with ]with_arg[ support])]) m4_pushdef([def_arg], [m4_default([$6], [auto])]) m4_pushdef([def_action_if_found], [AS_TR_SH([with_]with_arg)=yes]) m4_pushdef([def_action_if_not_found], [AS_TR_SH([with_]with_arg)=no]) m4_case(def_arg, [yes],[m4_pushdef([with_without], [--without-]with_arg)], [m4_pushdef([with_without],[--with-]with_arg)]) AC_ARG_WITH(with_arg, AS_HELP_STRING(with_without, description[ @<:@default=]def_arg[@:>@]),, [AS_TR_SH([with_]with_arg)=def_arg]) AS_CASE([$AS_TR_SH([with_]with_arg)], [yes],[PKG_CHECK_MODULES([$1],[$2],$3,$4)], [auto],[PKG_CHECK_MODULES([$1],[$2], [m4_n([def_action_if_found]) $3], [m4_n([def_action_if_not_found]) $4])]) m4_popdef([with_arg]) m4_popdef([description]) m4_popdef([def_arg]) ])dnl PKG_WITH_MODULES dnl PKG_HAVE_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [DESCRIPTION], [DEFAULT]) dnl ----------------------------------------------- dnl dnl Convenience macro to trigger AM_CONDITIONAL after PKG_WITH_MODULES dnl check._[VARIABLE-PREFIX] is exported as make variable. AC_DEFUN([PKG_HAVE_WITH_MODULES], [ PKG_WITH_MODULES([$1],[$2],,,[$3],[$4]) AM_CONDITIONAL([HAVE_][$1], [test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"]) ])dnl PKG_HAVE_WITH_MODULES dnl PKG_HAVE_DEFINE_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [DESCRIPTION], [DEFAULT]) dnl ------------------------------------------------------ dnl dnl Convenience macro to run AM_CONDITIONAL and AC_DEFINE after dnl PKG_WITH_MODULES check. HAVE_[VARIABLE-PREFIX] is exported as make dnl and preprocessor variable. AC_DEFUN([PKG_HAVE_DEFINE_WITH_MODULES], [ PKG_HAVE_WITH_MODULES([$1],[$2],[$3],[$4]) AS_IF([test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"], [AC_DEFINE([HAVE_][$1], 1, [Enable ]m4_tolower([$1])[ support])]) ])dnl PKG_HAVE_DEFINE_WITH_MODULES # Copyright (C) 2002-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.16' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.16.5], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.16.5])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. # TODO: see whether this extra hack can be removed once we start # requiring Autoconf 2.70 or later. AS_CASE([$CONFIG_FILES], [*\'*], [eval set x "$CONFIG_FILES"], [*], [set x $CONFIG_FILES]) shift # Used to flag and report bootstrapping failures. am_rc=0 for am_mf do # Strip MF so we end up with the name of the file. am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile which includes # dependency-tracking related rules and includes. # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ || continue am_dirpart=`AS_DIRNAME(["$am_mf"])` am_filepart=`AS_BASENAME(["$am_mf"])` AM_RUN_LOG([cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles]) || am_rc=$? done if test $am_rc -ne 0; then AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments for automatic dependency tracking. If GNU make was not used, consider re-running the configure script with MAKE="gmake" (or whatever is necessary). You can also try re-running configure with the '--disable-dependency-tracking' option to at least be able to build the package (albeit without support for automatic dependency tracking).]) fi AS_UNSET([am_dirpart]) AS_UNSET([am_filepart]) AS_UNSET([am_mf]) AS_UNSET([am_rc]) rm -f conftest-deps.mk } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking is enabled. # This creates each '.Po' and '.Plo' makefile fragment that we'll need in # order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl m4_ifdef([_$0_ALREADY_INIT], [m4_fatal([$0 expanded multiple times ]m4_defn([_$0_ALREADY_INIT]))], [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) # Variables for tags utilities; see am/tags.am if test -z "$CTAGS"; then CTAGS=ctags fi AC_SUBST([CTAGS]) if test -z "$ETAGS"; then ETAGS=etags fi AC_SUBST([ETAGS]) if test -z "$CSCOPE"; then CSCOPE=cscope fi AC_SUBST([CSCOPE]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi dnl The trailing newline in this macro's definition is deliberate, for dnl backward compatibility and to allow trailing 'dnl'-style comments dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check whether make has an 'include' directive that can support all # the idioms we need for our automatic dependency tracking code. AC_DEFUN([AM_MAKE_INCLUDE], [AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) cat > confinc.mk << 'END' am__doit: @echo this is the am__doit target >confinc.out .PHONY: am__doit END am__include="#" am__quote= # BSD make does it like this. echo '.include "confinc.mk" # ignored' > confmf.BSD # Other make implementations (GNU, Solaris 10, AIX) do it like this. echo 'include confinc.mk # ignored' > confmf.GNU _am_result=no for s in GNU BSD; do AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) AS_CASE([$?:`cat confinc.out 2>/dev/null`], ['0:this is the am__doit target'], [AS_CASE([$s], [BSD], [am__include='.include' am__quote='"'], [am__include='include' am__quote=''])]) if test "$am__include" != "#"; then _am_result="yes ($s style)" break fi done rm -f confinc.* confmf.* AC_MSG_RESULT([${_am_result}]) AC_SUBST([am__include])]) AC_SUBST([am__quote])]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then MISSING="\${SHELL} '$am_aux_dir/missing'" fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/ax_append_compile_flags.m4]) m4_include([m4/ax_append_flag.m4]) m4_include([m4/ax_check_compile_flag.m4]) m4_include([m4/ax_cxx_compile_stdcxx_11.m4]) muchsync-7/Makefile.am0000644000175000017500000000125314357577451012053 00000000000000 ACLOCAL_AMFLAGS = ${ACLOCAL_FLAGS} -I m4 AM_CPPFLAGS = $(sqlite3_CFLAGS) $(libcrypto_CFLAGS) $(xapian_CPPFLAGS) LDADD = $(sqlite3_LIBS) $(libcrypto_LIBS) -lnotmuch $(xapian_LIBS) bin_PROGRAMS = muchsync muchsync_SOURCES = infinibuf.cc misc.cc muchsync.cc notmuch_db.cc \ protocol.cc sqlstmt.cc sql_db.cc xapian_sync.cc cleanup.h \ misc.h muchsync.h infinibuf.h notmuch_db.h sqlstmt.h sql_db.h CLEANFILES = *~ maintainer-clean-local: +@echo rm -rf `sed -ne 's!^/!!p' .gitignore` Makefile.in rm -rf `sed -ne 's!^/!!p' .gitignore` Makefile.in muchsync.1: muchsync.1.md pandoc -s -w man muchsync.1.md -o muchsync.1 man_MANS = muchsync.1 EXTRA_DIST = muchsync.1.md $(man_MANS) muchsync-7/xapian_sync.cc0000644000175000017500000005014713403557504012634 00000000000000#include #include #include #include #include #include #include #include #include #include #include #include "muchsync.h" #include "misc.h" using namespace std; // XXX - these things have to match notmuch-private.h constexpr int NOTMUCH_VALUE_TIMESTAMP = 0; constexpr int NOTMUCH_VALUE_MESSAGE_ID = 1; const string notmuch_ghost_term = "Tghost"; const string notmuch_tag_prefix = "K"; const string notmuch_directory_prefix = "XDIRECTORY"; const string notmuch_file_direntry_prefix = "XFDIRENTRY"; static void drop_triggers(sqlite3 *db) { for (const char *trigger : { "tag_delete", "tag_insert", "link_delete", "link_insert" }) sqlexec (db, "DROP TRIGGER IF EXISTS %s;", trigger); for (const char *table : { "modified_docids", "modified_xapian_dirs", "modified_hashes" }) sqlexec(db, "DROP TABLE IF EXISTS %s;", table); } static void set_triggers(sqlite3 *db) { drop_triggers (db); sqlexec(db, R"( CREATE TEMP TABLE IF NOT EXISTS modified_docids ( docid INTEGER PRIMARY KEY, new INTEGER); CREATE TEMP TRIGGER tag_delete AFTER DELETE ON main.tags WHEN old.docid NOT IN (SELECT docid FROM modified_docids) BEGIN INSERT INTO modified_docids (docid, new) VALUES (old.docid, 0); END; CREATE TEMP TRIGGER tag_insert AFTER INSERT ON main.tags WHEN new.docid NOT IN (SELECT docid FROM modified_docids) BEGIN INSERT INTO modified_docids (docid, new) VALUES (new.docid, 0); END; CREATE TEMP TABLE IF NOT EXISTS modified_xapian_dirs ( dir_docid INTEGER PRIMARY KEY); CREATE TEMP TABLE IF NOT EXISTS modified_hashes (hash_id INTEGER PRIMARY KEY); CREATE TEMP TRIGGER link_delete AFTER DELETE ON xapian_files WHEN old.hash_id NOT IN (SELECT hash_id FROM modified_hashes) BEGIN INSERT INTO modified_hashes (hash_id) VALUES (old.hash_id); END; CREATE TEMP TRIGGER link_insert AFTER INSERT ON xapian_files WHEN new.hash_id NOT IN (SELECT hash_id FROM modified_hashes) BEGIN INSERT INTO modified_hashes (hash_id) VALUES (new.hash_id); END; )"); } // Non-thread-safe unility to work around missing openat & friends. template R with_cwd(int dfd, R errval, function work) { int dot = open(".", O_RDONLY); if (dot < 0 || fchdir(dfd) < 0) return errval; cleanup _c ([dot]() { fchdir(dot); close(dot); }); return work(); } #if !HAVE_OPENAT #define openat fake_openat static int openat(int dfd, const char *entry, int mode) { return with_cwd(dfd, -1, [=]() { return open(entry, mode); }); } #define fstatat fake_fstatat static int fstatat(int dfd, const char *entry, struct stat *buf, int flag) { return with_cwd(dfd, -1, [=]() { return stat(entry, buf); }); } #endif // !HAVE_OPENAT #if !HAVE_FDOPENDIR #define fdopendir fake_fdopendir static DIR * fdopendir(int dfd) { return with_cwd(dfd, nullptr, []() { return opendir("."); }); } #endif // !HAVE_FDOPENDIR static string get_sha (int dfd, const char *direntry, i64 *sizep) { int fd = openat(dfd, direntry, O_RDONLY); if (fd < 0) throw runtime_error (string() + direntry + ": " + strerror (errno)); cleanup _c (close, fd); hash_ctx ctx; char buf[32768]; int n; i64 sz = 0; while ((n = read (fd, buf, sizeof (buf))) > 0) { ctx.update (buf, n); sz += n; } if (n < 0) throw runtime_error (string() + direntry + ": " + strerror (errno)); if (sizep) *sizep = sz; return ctx.final(); } template void sync_table (sqlstmt_t &s, T &t, T &te, function cmpfn, function update) { s.step(); while (s.row()) { int cmp {t == te ? -1 : cmpfn (s, t)}; if (cmp == 0) { update (&s, &t); s.step(); ++t; } else if (cmp < 0) { update (&s, nullptr); s.step(); } else { update (nullptr, &t); ++t; } } while (t != te) { update (nullptr, &t); ++t; } } static string tag_from_term (const string &term) { assert(!strncmp(term.c_str(), notmuch_tag_prefix.c_str(), notmuch_tag_prefix.length())); return term.substr(notmuch_tag_prefix.length()); } static void xapian_scan_tags (sqlite3 *sqldb, Xapian::Database &xdb, const writestamp &ws) { sqlexec(sqldb, "DROP TABLE IF EXISTS dead_tags; " "CREATE TEMP TABLE dead_tags (tag TEXT PRIMARY KEY); " "INSERT INTO dead_tags SELECT DISTINCT tag FROM tags;"); sqlstmt_t scan (sqldb, "SELECT docid, rowid FROM tags" " WHERE tag = ? ORDER BY docid ASC;"), add_tag (sqldb, "INSERT INTO tags (docid, tag) VALUES (?, ?);"), del_tag (sqldb, "DELETE FROM tags WHERE rowid = ?;"), record_tag (sqldb, "DELETE FROM dead_tags WHERE tag = ?;"); for (Xapian::TermIterator ti = xdb.allterms_begin(notmuch_tag_prefix), te = xdb.allterms_end(notmuch_tag_prefix); ti != te; ti++) { string tag = tag_from_term (*ti); if (opt_verbose > 1) cerr << " " << tag << "\n"; record_tag.reset().param(tag).step(); scan.reset().bind_text(1, tag); add_tag.reset().bind_text(2, tag); Xapian::PostingIterator pi = xdb.postlist_begin (*ti), pe = xdb.postlist_end (*ti); sync_table (scan, pi, pe, [] (sqlstmt_t &s, Xapian::PostingIterator &p) -> int { return s.integer(0) - *p; }, [&] (sqlstmt_t *sp, Xapian::PostingIterator *pp) { if (!sp) add_tag.reset().bind_int(1, **pp).step(); else if (!pp) del_tag.reset().bind_value(1, sp->value(1)).step(); }); } sqlexec(sqldb, "DELETE FROM tags WHERE tag IN (SELECT * FROM dead_tags);"); sqlexec(sqldb, "UPDATE message_ids SET replica = %lld, version = %lld" " WHERE docid IN (SELECT docid FROM modified_docids WHERE new = 0);", ws.first, ws.second); } static void xapian_scan_message_ids (sqlite3 *sqldb, const writestamp &ws, Xapian::Database xdb) { sqlstmt_t scan(sqldb, "SELECT message_id, docid FROM message_ids ORDER BY docid ASC;"), add_message(sqldb, "INSERT INTO message_ids (message_id, docid, replica, version)" " VALUES (?, ?, %lld, %lld);", ws.first, ws.second), flag_new_message(sqldb, "INSERT INTO modified_docids (docid, new)" " VALUES (?, 1);"), del_message(sqldb, "DELETE FROM message_ids WHERE docid = ?;"); Xapian::PostingIterator gi = xdb.postlist_begin(notmuch_ghost_term), ge = xdb.postlist_end(notmuch_ghost_term); Xapian::ValueIterator vi = xdb.valuestream_begin (NOTMUCH_VALUE_MESSAGE_ID), ve = xdb.valuestream_end (NOTMUCH_VALUE_MESSAGE_ID); sync_table (scan, vi, ve, [] (sqlstmt_t &s, Xapian::ValueIterator &vi) -> int { return s.integer(1) - vi.get_docid(); }, [&add_message,&del_message,&flag_new_message,&gi,&ge,&ve] (sqlstmt_t *sp, Xapian::ValueIterator *vip) { if (vip) { while (gi != ge && *gi < vip->get_docid()) ++gi; if (gi != ge && *gi == vip->get_docid()) { if (!sp) return; vip = nullptr; } } if (!sp) { i64 docid = vip->get_docid(); add_message.reset().param(**vip, docid).step(); flag_new_message.reset().param(docid).step(); } else if (!vip) del_message.reset().param(sp->value(1)).step(); else if (sp->str(0) != **vip) { // This should be really unusual cerr << "warning: message id changed from <" << sp->str(0) << "> to <" << **vip << ">\n"; del_message.reset().param(sp->value(1)).step(); add_message.reset().param(**vip, i64(vip->get_docid())).step(); } }); } static Xapian::docid xapian_get_unique_posting (const Xapian::Database &xdb, const string &term) { Xapian::PostingIterator pi = xdb.postlist_begin (term), pe = xdb.postlist_end (term); if (pi == pe) throw range_error (string("xapian term ") + term + " has no postings"); i64 ret = *pi; if (++pi != pe) cerr << "warning: xapian term " << term << " has multiple postings\n"; return ret; } static void xapian_scan_directories (sqlite3 *sqldb, Xapian::Database &xdb) { sqlstmt_t scandirs(sqldb, "SELECT dir_path, dir_docid, dir_mtime FROM xapian_dirs" " ORDER BY dir_path;"), deldir(sqldb, "DELETE FROM xapian_dirs WHERE dir_docid = ?;"), delfiles(sqldb, "DELETE FROM xapian_files WHERE dir_docid = ?;"), adddir(sqldb, "INSERT INTO xapian_dirs (dir_path, dir_docid, dir_mtime)" " VALUES (?, ?, ?);"), upddir(sqldb, "UPDATE xapian_dirs SET dir_mtime = ? WHERE dir_docid = ?;"), flagdir(sqldb, "INSERT INTO modified_xapian_dirs (dir_docid) VALUES (?);"); Xapian::TermIterator ti = xdb.allterms_begin(notmuch_directory_prefix), te = xdb.allterms_end(notmuch_directory_prefix); scandirs.step(); while (ti != te || scandirs.row()) { int d; // >0 if only sqlite valid, <0 if only xapian valid string dir; if (!scandirs.row()) { dir = (*ti).substr(notmuch_directory_prefix.length()); d = -1; } else if (ti == te) d = 1; else { dir = (*ti).substr(notmuch_directory_prefix.length()); d = dir.compare(scandirs.c_str(0)); } if (d > 0) { deldir.reset().param(scandirs.value(1)).step(); delfiles.reset().param(scandirs.value(1)).step(); scandirs.step(); continue; } if (dir.empty()) dir = "."; Xapian::docid dir_docid = xapian_get_unique_posting(xdb, *ti); if (d == 0 && dir_docid != scandirs.integer(1)) { deldir.reset().param(scandirs.value(1)).step(); delfiles.reset().param(scandirs.value(1)).step(); scandirs.step(); continue; } time_t mtime = Xapian::sortable_unserialise (xdb.get_document(dir_docid).get_value(NOTMUCH_VALUE_TIMESTAMP)); if (d < 0) { deldir.reset().param(i64(dir_docid)).step(); delfiles.reset().param(i64(dir_docid)).step(); adddir.reset().param(dir, i64(dir_docid), i64(mtime)).step(); flagdir.reset().param(i64(dir_docid)).step(); ++ti; continue; } if (mtime != scandirs.integer(2)) { flagdir.reset().param(i64(dir_docid)).step(); upddir.reset().param(i64(mtime), i64(dir_docid)).step(); } ++ti; scandirs.step(); } } class fileops { public: sqlstmt_t scan_dir_; private: sqlstmt_t get_msgid_; sqlstmt_t del_file_; sqlstmt_t add_file_; sqlstmt_t upd_file_; sqlstmt_t get_hashid_; sqlstmt_t get_hash_; sqlstmt_t add_hash_; sqlstmt_t upd_hash_; string get_msgid(i64 docid); i64 get_file_hash_id(int dfd, const string &file, i64 docid); public: fileops(sqlite3 *db, const writestamp &ws); void del_file(i64 rowid) { del_file_.reset().param(rowid).step(); } void add_file(const string &dir, int dfd, i64 dir_docid, string name, i64 docid); void check_file(const string &dir, int dfd, i64 dir_docid); }; fileops::fileops(sqlite3 *db, const writestamp &ws) : scan_dir_(db, "SELECT rowid, name, docid%s" " FROM xapian_files WHERE dir_docid = ? ORDER BY name;", opt_fullscan ? ", mtime, inode, hash_id" : ""), get_msgid_(db, "SELECT message_id FROM message_ids WHERE docid = ?;"), del_file_(db, "DELETE FROM xapian_files WHERE rowid = ?;"), add_file_(db, "INSERT INTO xapian_files" " (dir_docid, name, docid, mtime, inode, hash_id)" " VALUES (?, ?, ?, ?, ?, ?);"), upd_file_(db, "UPDATE xapian_files SET mtime = ?, inode = ?" " WHERE rowid = ?;"), get_hashid_(db, opt_fullscan ? "SELECT hash_id, size, message_id FROM maildir_hashes" " WHERE hash = ?;" : "SELECT hash_id FROM maildir_hashes WHERE hash = ?;"), get_hash_(db, "SELECT hash, size FROM maildir_hashes WHERE hash_id = ?;"), add_hash_(db, "INSERT OR REPLACE INTO maildir_hashes " " (hash, size, message_id, replica, version)" " VALUES (?, ?, ?, %lld, %lld);", ws.first, ws.second), upd_hash_(db, "UPDATE maildir_hashes SET size = ?, message_id = ?" " WHERE hash_id = ?;", ws.first, ws.second) { } string fileops::get_msgid(i64 docid) { get_msgid_.reset().param(docid).step(); if (!get_msgid_.row()) throw runtime_error ("xapian_fileops: unknown docid " + to_string(docid)); return get_msgid_.str(0); } i64 fileops::get_file_hash_id(int dfd, const string &name, i64 docid) { i64 sz; if (opt_verbose > 2) cerr << " " << name << '\n'; string hash = get_sha(dfd, name.c_str(), &sz); if (get_hashid_.reset().param(hash).step().row()) { i64 hash_id = get_hashid_.integer(0); if (!opt_fullscan) return hash_id; string msgid = get_msgid(docid); if (sz == get_hashid_.integer(1) && msgid == get_hashid_.str(2)) return hash_id; // This should almost never happen cerr << "size or message-id changed for hash " << hash << '\n'; upd_hash_.reset().param(sz, msgid, hash_id).step(); return hash_id; } add_hash_.reset().param(hash, sz, get_msgid(docid)).step(); return sqlite3_last_insert_rowid(add_hash_.getdb()); } void fileops::add_file(const string &dir, int dfd, i64 dir_docid, string name, i64 docid) { struct stat sb; if (fstatat(dfd, name.c_str(), &sb, 0)) { if (errno == ENOENT) return; throw runtime_error (dir + ": " + strerror(errno)); } if (!S_ISREG(sb.st_mode)) return; i64 hash_id = get_file_hash_id(dfd, name, docid); add_file_.reset() .param(dir_docid, name, docid, ts_to_double(sb.ST_MTIM), i64(sb.st_ino), hash_id).step(); } void fileops::check_file(const string &dir, int dfd, i64 dir_docid) { if (!opt_fullscan) return; string name = scan_dir_.str(1); struct stat sb; if (fstatat(dfd, name.c_str(), &sb, 0)) { if (errno == ENOENT) return; throw runtime_error (dir + ": " + strerror(errno)); } if (!S_ISREG(sb.st_mode)) return; double fs_mtim = ts_to_double(sb.ST_MTIM); i64 fs_inode = sb.st_ino, fs_size = sb.st_size; double db_mtim = scan_dir_.real(3); i64 db_inode = scan_dir_.integer(4); i64 db_hashid = scan_dir_.integer(5); if (!get_hash_.reset().param(db_hashid).step().row()) throw runtime_error ("invalid hash_id: " + to_string(db_hashid)); i64 db_size = get_hash_.integer(1); if (fs_mtim == db_mtim && fs_inode == db_inode && fs_size == db_size) return; i64 rowid = scan_dir_.integer(0), docid = scan_dir_.integer(2); i64 fs_hashid = get_file_hash_id(dfd, name, docid); if (db_hashid == fs_hashid) upd_file_.reset().param(fs_mtim, fs_inode, rowid).step(); else { del_file_.reset().param(rowid).step(); add_file_.reset().param(dir_docid, name, docid, fs_mtim, fs_inode, fs_hashid); } } static void xapian_scan_filenames (sqlite3 *db, const string &maildir, const writestamp &ws, Xapian::Database xdb) { sqlstmt_t dirscan (db, "SELECT dir_path, dir_docid FROM xapian_dirs%s;", opt_fullscan ? "" : " NATURAL JOIN modified_xapian_dirs"); fileops f (db, ws); while (dirscan.step().row()) { string dir = dirscan.str(0); if (opt_verbose > 1) cerr << " " << dir << '\n'; string dirpath = maildir + "/" + dir; int dfd = open(dirpath.c_str(), O_RDONLY); if (dfd == -1 && errno != ENOENT) { cerr << dirpath << ": " << strerror (errno) << '\n'; continue; } cleanup _close (close, dfd); i64 dir_docid = dirscan.integer(1); f.scan_dir_.reset().param(dir_docid).step(); string dirtermprefix = (notmuch_file_direntry_prefix + to_string (dir_docid) + ":"); Xapian::TermIterator ti = xdb.allterms_begin(dirtermprefix), te = xdb.allterms_end(dirtermprefix); size_t dirtermprefixlen = dirtermprefix.size(); unordered_map to_add; while (f.scan_dir_.row() && ti != te) { const char *dbname = f.scan_dir_.c_str(1); string term = *ti; const char *xname = &term[dirtermprefixlen]; int cmp = strcmp(dbname,xname); if (!cmp) { if (opt_fullscan) f.check_file(dir, dfd, dir_docid); f.scan_dir_.step(); ++ti; } else if (cmp < 0) { f.del_file(f.scan_dir_.integer(0)); f.scan_dir_.step(); } else { to_add.emplace(term.substr(dirtermprefixlen), xapian_get_unique_posting(xdb, term)); ++ti; } } while (f.scan_dir_.row()) { f.del_file(f.scan_dir_.integer(0)); f.scan_dir_.step(); } while (ti != te) { string term = *ti; to_add.emplace(term.substr(dirtermprefixlen), xapian_get_unique_posting(xdb, term)); ++ti; } // With a cold buffer cache, reading files to compute hashes goes // shockingly faster in the order of directory entries. if (!to_add.empty()) { _close.release(); DIR *d = fdopendir(dfd); cleanup _closedir (closedir, d); struct dirent *e; auto notfound = to_add.end(); while ((e = readdir(d)) && !to_add.empty()) { string name (e->d_name); auto action = to_add.find(name); if (action != notfound) { f.add_file(dir, dfd, dir_docid, action->first, action->second); to_add.erase(action); } } } } } static void xapian_adjust_nlinks(sqlite3 *db, writestamp ws) { sqlstmt_t newcount(db, "SELECT hash_id, dir_docid, count(*)" " FROM xapian_files NATURAL JOIN modified_hashes" " GROUP BY hash_id, dir_docid ORDER BY hash_id, dir_docid;"), oldcount(db, "SELECT hash_id, dir_docid, link_count, xapian_nlinks.rowid" " FROM xapian_nlinks NATURAL JOIN modified_hashes" " ORDER BY hash_id, dir_docid;"), updcount(db, "UPDATE xapian_nlinks SET link_count = ? WHERE rowid = ?;"), delcount(db, "DELETE FROM xapian_nlinks WHERE rowid = ?;"), addcount(db, "INSERT INTO xapian_nlinks (hash_id, dir_docid, link_count)" " VALUES (?, ?, ?);"), updhash(db, "UPDATE maildir_hashes SET replica = %lld, version = %lld" " WHERE hash_id = ?;", ws.first, ws.second); newcount.step(); oldcount.step(); while (newcount.row() || oldcount.row()) { i64 d; // < 0 only oldcount valid, > 0 only newcount valid if (!newcount.row()) d = -1; else if (!oldcount.row()) d = 1; else if (!(d = oldcount.integer(0) - newcount.integer(0))) d = oldcount.integer(1) - newcount.integer(1); if (d == 0) { i64 cnt = newcount.integer(2); if (cnt != oldcount.integer(2)) { updhash.reset().param(newcount.value(0)).step(); updcount.reset().param(cnt, oldcount.value(3)).step(); } oldcount.step(); newcount.step(); } else if (d < 0) { // file deleted and (hash_id, dir_id) not present newcount if (oldcount.integer(2)) updhash.reset().param(oldcount.value(0)).step(); delcount.reset().param(oldcount.value(3)).step(); oldcount.step(); } else { // file added and (hash_id, dir_id) not present in oldcount updhash.reset().param(newcount.value(0)).step(); addcount.reset().param(newcount.value(0), newcount.value(1), newcount.value(2)).step(); newcount.step(); } } } void xapian_scan(sqlite3 *sqldb, writestamp ws, string maildir) { while (maildir.size() > 1 && maildir.back() == '/') maildir.resize (maildir.size() - 1); if (maildir.empty()) maildir = "."; print_time ("starting scan of Xapian database"); Xapian::Database xdb (maildir + "/.notmuch/xapian"); set_triggers(sqldb); print_time ("opened Xapian"); xapian_scan_message_ids (sqldb, ws, xdb); print_time ("scanned message IDs"); xapian_scan_tags (sqldb, xdb, ws); print_time ("scanned tags"); xapian_scan_directories (sqldb, xdb); print_time ("scanned directories in xapian"); xapian_scan_filenames (sqldb, maildir, ws, xdb); print_time ("scanned filenames in xapian"); xapian_adjust_nlinks(sqldb, ws); print_time ("adjusted link counts"); } void sync_local_data (sqlite3 *sqldb, const string &maildir) { print_time ("synchronizing muchsync database with Xapian"); sqlexec (sqldb, "SAVEPOINT localsync;"); try { i64 self = getconfig(sqldb, "self"); sqlexec (sqldb, "UPDATE sync_vector" " SET version = version + 1 WHERE replica = %lld;", self); if (sqlite3_changes (sqldb) != 1) throw runtime_error ("My replica id (" + to_string (self) + ") not in sync vector"); versvector vv = get_sync_vector (sqldb); i64 vers = vv.at(self); writestamp ws { self, vers }; xapian_scan (sqldb, ws, maildir); } catch (exception &e) { sqlexec (sqldb, "ROLLBACK TO localsync;"); throw; } sqlexec (sqldb, "RELEASE localsync;"); print_time ("finished synchronizing muchsync database with Xapian"); } muchsync-7/infinibuf.cc0000644000175000017500000001351513227744306012271 00000000000000#include #include #include #include #include #include #include #include #include #include #include "infinibuf.h" #include using namespace std; infinibuf::~infinibuf() { for (char *p : data_) delete[] p; } void infinibuf::gbump(int n) { gpos_ += n; assert (gpos_ > 0 && gpos_ <= chunksize_); if (gpos_ == chunksize_) { assert (data_.size() > 1); delete[] data_.front(); data_.pop_front(); gpos_ = startpos_; notfull(); } } void infinibuf::pbump(int n) { if (n == 0) return; assert (n >= 0); assert (n <= psize()); assert (!eof_); bool wasempty (empty()); ppos_ += n; if (ppos_ == chunksize_) { char *chunk = new char[chunksize_]; memcpy(chunk, data_.back() + chunksize_ - startpos_, startpos_); data_.push_back(chunk); ppos_ = startpos_; } if (wasempty) notempty(); } static int set_nonblock(int fd) { int n; if ((n = fcntl (fd, F_GETFL)) == -1 || fcntl (fd, F_SETFL, n | O_NONBLOCK) == -1) return -1; return 0; } static void waitfd(int fd, int events) { struct pollfd pfd; pfd.fd = fd; pfd.events = events; poll(&pfd, 1, -1); } int infinibuf::output(int fd) { unique_lock lk (*this); for (;;) { char *p = gptr(); size_t nmax = gsize(); bool iseof = eof(); int error = err(); if (error) throw runtime_error (string("infinibuf::output: ") + strerror(error)); else if (!nmax && iseof) { assert (empty()); shutdown(fd, SHUT_WR); return 0; } if (!nmax) return 1; lk.unlock(); ssize_t n = write(fd, p, nmax); lk.lock(); if (n > 0) gbump(n); else { if (errno == EAGAIN) return -1; err(errno); } } } int infinibuf::input(int fd) { unique_lock lk (*this); char *p = pptr(); size_t nmax = psize(); if (int error = err()) throw runtime_error (string("infinibuf::input: ") + strerror(error)); lk.unlock(); ssize_t n = read(fd, p, nmax); lk.lock(); if (n < 0) { if (errno == EAGAIN) return -1; err(errno); throw runtime_error (string("infinibuf::input: ") + strerror(errno)); } if (n > 0) pbump(n); else peof(); return n > 0; } struct fd_closer { int fd_; fd_closer(int fd) : fd_(fd) {} ~fd_closer() { close(fd_); } }; void infinibuf::output_loop(shared_ptr ib, int fd, std::function oblocked) { fd_closer _c(fd); if (oblocked) set_nonblock(fd); try { for (;;) { int res = ib->output(fd); if (res > 0) { lock_guard _lk (*ib); ib->gwait(); } else if (res == 0) return; else { // EINTR if (oblocked) oblocked(true); waitfd(fd, POLLOUT); if (oblocked) oblocked(false); } } } catch (const runtime_error &) {} } void infinibuf::input_loop(shared_ptr ib, int fd) { fd_closer _c(fd); try { for (;;) { int res = ib->input(fd); if (res < 0) waitfd(fd, POLLIN); else if (res == 0) return; // Don't even bother checking flow control if less than 1MB allocated lock_guard lk (*ib); if (ib->buffer_size() >= 100000) ib->pwait(); } } catch (const runtime_error &) {} } infinibuf_infd::~infinibuf_infd() { close(fd_); } infinibuf_outfd::infinibuf_outfd (int fd, std::function oblocked) : infinibuf(0), fd_(fd), oblocked_(oblocked) { if (oblocked_) set_nonblock(fd_); } infinibuf_outfd::~infinibuf_outfd() { close(fd_); } void infinibuf_outfd::notempty() { while (output(fd_) < 0) { // EINTR if (oblocked_) oblocked_(true); waitfd(fd_, POLLOUT); if (oblocked_) oblocked_(false); } } infinistreambuf::int_type infinistreambuf::underflow() { lock_guard _lk (*ib_); ib_->gbump(gptr() - ib_->gptr()); while (ib_->gsize() == 0 && !ib_->eof()) ib_->gwait(); setg(ib_->eback(), ib_->gptr(), ib_->egptr()); bool eof = ib_->eof() && ib_->gsize() == 0; return eof ? traits_type::eof() : traits_type::to_int_type (*gptr()); } infinistreambuf::int_type infinistreambuf::overflow(int_type ch) { if (sync() == -1) return traits_type::eof(); *pptr() = ch; pbump(1); return traits_type::not_eof(ch); } int infinistreambuf::sync() { lock_guard _lk (*ib_); ib_->pbump(pptr() - ib_->pptr()); setp(ib_->pptr(), ib_->epptr()); int err = ib_->err(); return err ? -1 : 0; } infinistreambuf::infinistreambuf(shared_ptr ib) : ib_(ib) { lock_guard _lk (*ib_); setg(ib_->eback(), ib_->gptr(), ib_->egptr()); setp(ib_->pptr(), ib_->epptr()); } void infinistreambuf::sputeof() { sync(); lock_guard _lk (*ib_); ib_->peof(); } #if 0 int main (int argc, char **argv) { infinistreambuf inb (new infinibuf_mt); istream xin (&inb); thread it (infinibuf::input_loop, inb.get_infinibuf(), 0); infinistreambuf outb (new infinibuf_mt); ostream xout (&outb); thread ot (infinibuf::output_loop, outb.get_infinibuf(), 1); xin.tie (&xout); #if 0 char c; long count = 0; while (xin.get (c)) { count++; xout.put (c); } cerr << "flushing " << count << " bytes\n"; xout.flush(); #endif xout << xin.rdbuf() << flush; /* xout << "waiting for input\n"; string x; xin >> x; xout << "got " << x << "\n" << flush; */ auto oib = outb.get_infinibuf(); oib->lock(); oib->peof(); oib->unlock(); ot.join(); it.join(); return 0; } #endif #if 0 int main (int argc, char **argv) { ifdstream xin (0); ofdstream xout (1); xin.tie(&xout); //xout << xin.rdbuf(); #if 1 long count = 0; char c; while (xin.get (c)) { xout.put (c); count++; } cerr << "Total count " << count << '\n'; #endif xout << flush; } #endif /* c++ -g -std=c++11 -Wall -Werror -pthread infinibuf.cc */ muchsync-7/muchsync.10000644000175000017500000003576213403557511011730 00000000000000.\" Automatically generated by Pandoc 2.5 .\" .TH "muchsync" "1" "" "" "" .hy .SH NAME .PP muchsync \- synchronize maildirs and notmuch databases .SH SYNOPSIS .PP muchsync \f[I]options\f[R] .PD 0 .P .PD muchsync \f[I]options\f[R] \f[I]server\-name\f[R] \f[I]server\-options\f[R] .PD 0 .P .PD muchsync \f[I]options\f[R] \[en]init \f[I]maildir\f[R] \f[I]server\-name\f[R] \f[I]server\-options\f[R] .SH DESCRIPTION .PP muchsync synchronizes the contents of maildirs and notmuch tags across machines. Any given execution runs pairwise between two replicas, but the system scales to an arbitrary number of replicas synchronizing in arbitrary pairs. For efficiency, version vectors and logical timestamps are used to limit synchronization to items a peer may not yet know about. .PP To use muchsync, both muchsync and notmuch should be installed someplace in your PATH on two machines, and you must be able to access the remote machine via ssh. .PP In its simplest usage, you have a single notmuch database on some server \f[C]SERVER\f[R] and wish to start replicating that database on a client, where the client currently does not have any mailboxes. You can initialize a new replica in \f[C]$HOME/inbox\f[R] by running the following command: .IP .nf \f[C] muchsync \-\-init $HOME/inbox SERVER \f[R] .fi .PP This command may take some time, as it transfers the entire contents of your maildir from the server to the client and creates a new notmuch index on the client. Depending on your setup, you may be either bandwidth limited or CPU limited. (Sadly, the notmuch library on which muchsync is built is non\-reentrant and forces all indexing to happen on a single core at a rate of about 10,000 messages per minute.) .PP From then on, to synchronize the client with the server, just run: .IP .nf \f[C] muchsync SERVER \f[R] .fi .PP Since muchsync replicates the tags in the notmuch database itself, you should consider disabling maildir flag synchronization by executing: .IP .nf \f[C] notmuch config set maildir.synchronize_flags=false \f[R] .fi .PP The reason is that the synchronize_flags feature only works on a small subset of pre\-defined flags and so is not all that useful. Moreover, it marks flags by renaming files, which is not particularly efficient. muchsync was largely motivated by the need for better flag synchronization. If you are satisfied with the synchronize_flags feature, you might consider a tool such as offlineimap as an alternative to muchsync. .SS Synchronization algorithm .PP muchsync separately synchronizes two classes of information: the message\-to\-directory mapping (henceforth link counts) and the message\-id\-to\-tag mapping (henceforth tags). Using logical timestamps, it can detect update conflicts for each type of information. We describe link count and tag synchronization in turn. .PP Link count synchronization consists of ensuring that any given message (identified by its collision\-resistant content hash) appears the same number of times in the same subdirectories on each replica. Generally a message will appear only once in a single subdirectory. However, if the message is moved or deleted on one replica, this will propagate to other replicas. .PP If two replicas move or copy the same file between synchronization events (or one moves the file and the other deletes it), this constitutes an update conflict. Update conflicts are resolved by storing in each subdirectory a number of copies equal to the maximum of the number of copies in that subdirectory on the two replicas. This is conservative, in the sense that a file will never be deleted after a conflict, though you may get extra copies of files. (muchsync uses hard links, so at least these copies will not use too much disk space.) .PP For example, if one replica moves a message to subdirectory .box1/cur and another moves the same message to subdirectory .box2/cur, the conflict will be resolved by placing two links to the message on each replica, one in .box1/cur and one in .box2/cur. To respect the structure of maildirs, subdirectories ending \f[C]new\f[R] and \f[C]cur\f[R] are special\-cased; conflicts between sibling \f[C]new\f[R] and \f[C]cur\f[R] subdirectories are resolved in favor of \f[C]cur\f[R] without creating additional copies of messages. .PP Message tags are synchronized based on notmuch\[cq]s message\-ID (usually the Message\-ID header of a message), rather than message contents. On conflict, tags are combined as follows. Any tag in the notmuch configuration parameter \f[C]muchsync.and_tags\f[R] is removed from the message unless it appears on both replicas. Any other tag is added if it appears on any replica. In other words, tags in \f[C]muchsync.and_tags\f[R] are logically anded, while all other flags are logically ored. (This approach will give the most predictable results if \f[C]muchsync.and_tags\f[R] has the same value in all your replicas. The \f[C]\-\-init\f[R] option ensures uniform configurations initially, but subsequent changes to \f[C]muchsync.and_tags\f[R] must be manually propagated.) .PP If your configuration file does not specify a value for \f[C]muchsync.and_tags\f[R], the default is to use the set of tags specified in the \f[C]new.tags\f[R] configuration option. This should give intuitive results unless you use a two\-pass tagging system such as the afew tool, in which case \f[C]new.tags\f[R] is used to flag input to the second pass while you likely want \f[C]muchsync.and_tags\f[R] to reflect the output of the second pass. .SS File deletion .PP Because publishing software that actually deletes people\[cq]s email is a scary prospect, muchsync for the moment never actually deletes mail files. Though this may change in the future, for the moment muchsync moves any deleted messages to the directory \f[C].notmuch/muchsync/trash\f[R] under your mail directory (naming deleted messages by their content hash). If you really want to delete mail to reclaim disk space or for privacy reasons, you will need to run the following on each replica: .IP .nf \f[C] cd \[dq]$(notmuch config get database.path)\[dq] rm \-rf .notmuch/muchsync/trash \f[R] .fi .SH OPTIONS .TP .B \-C \f[I]file\f[R], \-\-config \f[I]file\f[R] Specify the path of the notmuch configuration file to use. If none is specified, the default is to use the contents of the environment variable $NOTMUCH_CONFIG, or if that variable is unset, the value $HOME/.notmuch\-config. (These are the same defaults as the notmuch command itself.) .TP .B \-F Check for modified files. Without this option, muchsync assumes that files in a maildir are never edited. \-F disables certain optimizations so as to make muchsync at least check the timestamp on every file, which will detect modified files at the cost of a longer startup time. If muchsync dies with the error \[lq]message received does not match hash,\[rq] you likely need to run it with the \-F option. .RS .PP Note that if your software regularly modifies the contents of mail files (e.g., because you are running offlineimap with \[lq]synclabels = yes\[rq]), then you will need to use \-F each time you run muchsync. Specify it as a server option (after the server name) if the editing happens server\-side. .RE .TP .B \-r /path/to/muchsync Specifies the path to muchsync on the server. Ordinarily, muchsync should be in the default PATH on the server so this option is not required. However, this option is useful if you have to install muchsync in a non\-standard place or wish to test development versions of the code. .TP .B \-s ssh\-cmd Specifies a command line to pass to /bin/sh to execute a command on another machine. The default value is \[lq]ssh \-CTaxq\[rq]. Note that because this string is passed to the shell, special characters including spaces may need to be escaped. .TP .B \-v The \-v option increases verbosity. The more times it is specified, the more verbose muchsync will become. .TP .B \-\-help Print a brief summary of muchsync\[cq]s command\-line options. .TP .B \-\-init \f[I]maildir\f[R] This option clones an existing mailbox on a remote server into \f[I]maildir\f[R] on the local machine. Neither \f[I]maildir\f[R] nor your notmuch configuration file (see \f[C]\-\-config\f[R] above) should exist when you run this command, as both will be created. The configuration file is copied from the server (adjusted to reflect the local maildir), while \f[I]maildir\f[R] is created as a replica of the maildir you have on the server. .TP .B \-\-nonew Ordinarily, muchsync begins by running \[lq]notmuch new\[rq]. This option says not to run \[lq]notmuch new\[rq] before starting the muchsync operation. It can be passed as either a client or a server option. For example: The command \[lq]\f[C]muchsync myserver \-\-nonew\f[R]\[rq] will run \[lq]\f[C]notmuch new\f[R]\[rq] locally but not on myserver. .TP .B \-\-noup, \-\-noupload Transfer files from the server to the client, but not vice versa. .TP .B \-\-upbg Transfer files from the server to the client in the foreground. Then fork into the background to upload any new files from the client to the server. This option is useful when checking new mail, if you want to begin reading your mail as soon as it has been downloaded while the upload continues. .TP .B \-\-self Print the 64\-bit replica ID of the local maildir replica and exit. Potentially useful in higher\-level scripts, such as the emacs notmuch\-poll\-script variable for identifying on which replica one is running, particularly if network file systems allow a replica to be accessed from multiple machines. .TP .B \-\-newid Muchsync requires every replica to have a unique 64\-bit identifier. If you ever copy a notmuch database to another machine, including the muchsync state, bad things will happen if both copies use muchsync, as they will both have the same identifier. Hence, after making such copy and before running muchsync to synchronize mail, run \f[C]muchsync \-\-newid\f[R] to change the identifier of one of the copies. .TP .B \-\-version Report on the muchsync version number .SH EXAMPLES .PP To initialize a the muchsync database, you can run: .IP .nf \f[C] muchsync \-vv \f[R] .fi .PP This first executes \[lq]\f[C]notmuch new\f[R]\[rq], then builds the initial muchsync database from the contents of your maildir (the directory specified as \f[C]database.path\f[R] in your notmuch configuration file). This command may take several minutes the first time it is run, as it must compute a content hash of every message in the database. Note that you do not need to run this command, as muchsync will initialize the database the first time a client tries to synchronize anyway. .IP .nf \f[C] muchsync \-\-init \[ti]/maildir myserver \f[R] .fi .PP First run \[lq]notmuch new\[rq] on myserver, then create a directory \f[C]\[ti]/maildir\f[R] containing a replica of your mailbox on myserver. Note that neither your configuration file (by default \f[C]\[ti]/.notmuch\-config\f[R]) nor \f[C]\[ti]/maildir\f[R] should exist before running this command, as both will be created. .PP To create a \f[C]notmuch\-poll\f[R] script that fetches mail from a remote server \f[C]myserver\f[R], but on that server just runs \f[C]notmuch new\f[R], do the following: First, run \f[C]muchsync \-\-self\f[R] on the server to get the replica ID. Then take the ID returned (e.g., \f[C]1968464194667562615\f[R]) and embed it in a shell script as follows: .IP .nf \f[C] #!/bin/sh self=$($HOME/muchsync \-\-self) || exit 1 if [ \[dq]$self\[dq] = 1968464194667562615 ]; then exec notmuch new else exec $HOME/muchsync \-r ./muchsync \-\-upbg myserver fi \f[R] .fi .PP The path of such a script is a good candidate for the emacs \f[C]notmuch\-poll\-script\f[R] variable. .PP Alternatively, to have the command \f[C]notmuch new\f[R] on a client automatically fetch new mail from server \f[C]myserver\f[R], you can place the following in the file \f[C].notmuch/hooks/post\-new\f[R] under your mail directory: .IP .nf \f[C] #!/bin/sh muchsync \-\-nonew \-\-upbg myserver \f[R] .fi .SH FILES .PP The default notmuch configuration file is \f[C]$HOME/.notmuch\-config\f[R]. .PP muchsync keeps all of its state in a subdirectory of your top maildir called \f[C].notmuch/muchsync\f[R]. .SH SEE ALSO .PP notmuch(1). .SH BUGS .PP muchsync expects initially to create replicas from scratch. If you have created a replica using another tool such as offlineimap and you try to use muchsync to synchronize them, muchsync will assume every file has an update conflict. This is okay if the two replicas are identical; if they are not, it will result in artifacts such as files deleted in only one replica reappearing. Ideally notmuch needs an option like \f[C]\-\-clobber\f[R] that makes a local replica identical to the remote one without touching the remote one, so that an old version of a mail directory can be used as a disposable cache to bootstrap initialization. .PP muchsync never deletes directories. If you want to remove a subdirectory completely, you must manually execute rmdir on all replicas. Even if you manually delete a subdirectory, it will live on in the notmuch database. .PP To synchronize deletions and re\-creations properly, muchsync never deletes content hashes and their message IDs from its database, even after the last copy of a message has disappeared. Such stale hashes should not consume an inordinate amount of disk space, but could conceivably pose a privacy risk if users believe deleting a message removes all traces of it. .PP Message tags are synchronized based on notmuch\[cq]s message\-ID (usually the Message\-ID header of a message), rather than based on message contents. This is slightly strange because very different messages can have the same Message\-ID header, meaning the user will likely only read one of many messages bearing the same Message\-ID header. It is conceivable that an attacker could suppress a message from a mailing list by sending another message with the same Message\-ID. This bug is in the design of notmuch, and hence not something that muchsync can work around. muchsync itself does not assume Message\-ID equivalence, relying instead on content hashes to synchronize link counts. Hence, any tools used to work around the problem should work on all replicas. .PP Because notmuch and Xapian do not keep any kind of modification time on database entries, every invocation of muchsync requires a complete scan of all tags in the Xapian database to detect any changed tags. Fortunately muchsync heavily optimizes the scan so that it should take well under a second for 100,000 mail messages. However, this means that interfaces such as those used by notmuch\-dump are not efficient enough (see the next paragraph). .PP muchsync makes certain assumptions about the structure of notmuch\[cq]s private types \f[C]notmuch_message_t\f[R] and \f[C]notmuch_directory_t\f[R]. In particular, it assumes that the Xapian document ID is the second field of these data structures. Sadly, there is no efficient and clean way to extract this information from the notmuch library interface. muchsync also makes other assumptions about how tokens are named in the Xapian database. These assumptions are necessary because the notmuch library interface and the notmuch dump utility are too slow to support synchronization every time you check mail. .SH AUTHORS David Mazieres. muchsync-7/sqlstmt.cc0000644000175000017500000000462213227744306012026 00000000000000 #include #include #include #include #include #include #include #include #include #include #include "sqlstmt.h" using namespace std; static void dbthrow (sqlite3 *db, const char *query) { const char *dbpath = sqlite3_db_filename (db, "main"); if (!dbpath) dbpath = "sqlite3 database"; ostringstream errbuf; if (query) errbuf << dbpath << ":\n Query: " << query << "\n Error: " << sqlite3_errmsg (db); else errbuf << dbpath << ": " << sqlite3_errmsg (db); throw sqlerr_t (errbuf.str ()); } sqlstmt_t & sqlstmt_t::set_status (int status) { status_ = status; if (status != SQLITE_OK && status != SQLITE_ROW && status != SQLITE_DONE) dbthrow (sqlite3_db_handle (stmt_), nullptr); return *this; } void sqlstmt_t::fail () { assert (status_ != SQLITE_OK); if (status_ == SQLITE_DONE) throw sqldone_t(string ("No rows left in query: ") + sqlite3_sql (stmt_)); else throw sqlerr_t(string ("sqlstmt_t::operator[]: used after error\n" " Query: ") + sqlite3_sql (stmt_) + "\n Error: " + sqlite3_errstr(status_)); } sqlstmt_t::sqlstmt_t (sqlite3 *db, const char *fmt, ...) { va_list ap; va_start (ap, fmt); char *query = sqlite3_vmprintf(fmt, ap); va_end (ap); if (!query) throw sqlerr_t ("sqlite3_vmprintf: out of memory"); unique_ptr _c (query, sqlite3_free); const char *tail; if (sqlite3_prepare_v2(db, query, -1, &stmt_, &tail)) dbthrow (db, query); if (tail && *tail) throw sqlerr_t (string("illegal compound query\n Query: ") + query); } sqlstmt_t::sqlstmt_t(const sqlstmt_t &l) { int err = sqlite3_prepare_v2(sqlite3_db_handle(l.stmt_), sqlite3_sql(l.stmt_), -1, &stmt_, nullptr); if (err) throw sqlerr_t (string("could not copy query\n Query: ") + sqlite3_sql(l.stmt_) + "\n Error: " + sqlite3_errstr(err) + "\n"); } void sqlexec (sqlite3 *db, const char *fmt, ...) { char *query; va_list ap; va_start (ap, fmt); query = sqlite3_vmprintf (fmt, ap); unique_ptr _c (query, sqlite3_free); va_end (ap); if (!query) throw sqlerr_t ("sqlite3_vmprintf: out of memory in sqlexec"); int err = sqlite3_exec (db, query, NULL, NULL, NULL); if (err != SQLITE_OK && err != SQLITE_DONE && err != SQLITE_ROW) dbthrow (db, query); } muchsync-7/README0000644000175000017500000000236612560332414010663 00000000000000 # Intro Muchsync is a mail synchronizer for notmuch. The project homepage is [www.muchsync.org](http://www.muchsync.org/). # To get latest the muchsync software, run: git clone http://www.muchsync.org/muchsync.git # To build it after a git checkout, run the following: ./autogen.sh ./configure make [Note: you need pandoc to build the man page, otherwise `autogen.sh` will fetch it from the web.] # Using Muchsync assumes that it's on your path on the server, but you can specify the path on the command line with -r. If you are tracking a development version, you can put a symlink to the executable in your home directory on all machines, and then run: ~/muchsync -r ./muchsync server Of course, initial clones are kind of slow, so you probably want to see what's happening. You can add -vv before server to see what is happening locally, and -vv after to see what is happening on the server. For example: ~/muchsync -r ./muchsync -vv server -vv # Getting started On the server, initialize the repository by running: muchsync -vv On the client, to create a new replica from scratch, run one of: muchsync --init=/path/to/new/inbox -vv server -vv muchsync -r ./muchsync --init=/path/to/new/inbox -vv server -vv muchsync-7/COPYING0000644000175000017500000000122214357054642011036 00000000000000Copyright (C) 2013-2014 David Mazieres Distribution permitted under the GNU General Public License (GPL) version 2 or later. Acceptable versions of this license are available at the following URLs: http://www.gnu.org/licenses/gpl-2.0.html http://www.gnu.org/licenses/gpl-3.0.html If the Free Software Foundation publishes a higher-numbered GPL license, you may at your option distribute this software under the terms of that license as well. Furthermore, the copyright holder grants you the right to classify the collection of crypgtographic hash functions in OpenSSL's libcrypto as a "system library" and a "major component of the operating system." muchsync-7/m4/0000755000175000017500000000000014357601161010400 500000000000000muchsync-7/m4/ax_append_flag.m40000644000175000017500000000530412302053223013500 00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_append_flag.html # =========================================================================== # # SYNOPSIS # # AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) # # DESCRIPTION # # FLAG is appended to the FLAGS-VARIABLE shell variable, with a space # added in between. # # If FLAGS-VARIABLE is not specified, the current language's flags (e.g. # CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains # FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly # FLAG. # # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. # # LICENSE # # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 2 AC_DEFUN([AX_APPEND_FLAG], [AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])])dnl AS_VAR_SET_IF(FLAGS, [case " AS_VAR_GET(FLAGS) " in *" $1 "*) AC_RUN_LOG([: FLAGS already contains $1]) ;; *) AC_RUN_LOG([: FLAGS="$FLAGS $1"]) AS_VAR_SET(FLAGS, ["AS_VAR_GET(FLAGS) $1"]) ;; esac], [AS_VAR_SET(FLAGS,["$1"])]) AS_VAR_POPDEF([FLAGS])dnl ])dnl AX_APPEND_FLAG muchsync-7/m4/ax_cxx_compile_stdcxx_11.m40000644000175000017500000001076012302053223015452 00000000000000# ============================================================================ # http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx_11.html # ============================================================================ # # SYNOPSIS # # AX_CXX_COMPILE_STDCXX_11([ext|noext],[mandatory|optional]) # # DESCRIPTION # # Check for baseline language coverage in the compiler for the C++11 # standard; if necessary, add switches to CXXFLAGS to enable support. # # The first argument, if specified, indicates whether you insist on an # extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. # -std=c++11). If neither is specified, you get whatever works, with # preference for an extended mode. # # The second argument, if specified 'mandatory' or if left unspecified, # indicates that baseline C++11 support is required and that the macro # should error out if no mode with that support is found. If specified # 'optional', then configuration proceeds regardless, after defining # HAVE_CXX11 if and only if a supporting mode is found. # # LICENSE # # Copyright (c) 2008 Benjamin Kosnik # Copyright (c) 2012 Zack Weinberg # Copyright (c) 2013 Roy Stogner # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 3 m4_define([_AX_CXX_COMPILE_STDCXX_11_testbody], [ template struct check { static_assert(sizeof(int) <= sizeof(T), "not big enough"); }; typedef check> right_angle_brackets; int a; decltype(a) b; typedef check check_type; check_type c; check_type&& cr = static_cast(c); auto d = a; ]) AC_DEFUN([AX_CXX_COMPILE_STDCXX_11], [dnl m4_if([$1], [], [], [$1], [ext], [], [$1], [noext], [], [m4_fatal([invalid argument `$1' to AX_CXX_COMPILE_STDCXX_11])])dnl m4_if([$2], [], [ax_cxx_compile_cxx11_required=true], [$2], [mandatory], [ax_cxx_compile_cxx11_required=true], [$2], [optional], [ax_cxx_compile_cxx11_required=false], [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX_11])]) AC_LANG_PUSH([C++])dnl ac_success=no AC_CACHE_CHECK(whether $CXX supports C++11 features by default, ax_cv_cxx_compile_cxx11, [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], [ax_cv_cxx_compile_cxx11=yes], [ax_cv_cxx_compile_cxx11=no])]) if test x$ax_cv_cxx_compile_cxx11 = xyes; then ac_success=yes fi m4_if([$1], [noext], [], [dnl if test x$ac_success = xno; then for switch in -std=gnu++11 -std=gnu++0x; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch]) AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch, $cachevar, [ac_save_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], [eval $cachevar=yes], [eval $cachevar=no]) CXXFLAGS="$ac_save_CXXFLAGS"]) if eval test x\$$cachevar = xyes; then CXXFLAGS="$CXXFLAGS $switch" ac_success=yes break fi done fi]) m4_if([$1], [ext], [], [dnl if test x$ac_success = xno; then for switch in -std=c++11 -std=c++0x; do cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch]) AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch, $cachevar, [ac_save_CXXFLAGS="$CXXFLAGS" CXXFLAGS="$CXXFLAGS $switch" AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], [eval $cachevar=yes], [eval $cachevar=no]) CXXFLAGS="$ac_save_CXXFLAGS"]) if eval test x\$$cachevar = xyes; then CXXFLAGS="$CXXFLAGS $switch" ac_success=yes break fi done fi]) AC_LANG_POP([C++]) if test x$ax_cxx_compile_cxx11_required = xtrue; then if test x$ac_success = xno; then AC_MSG_ERROR([*** A compiler with support for C++11 language features is required.]) fi else if test x$ac_success = xno; then HAVE_CXX11=0 AC_MSG_NOTICE([No compiler with C++11 support was found]) else HAVE_CXX11=1 AC_DEFINE(HAVE_CXX11,1, [define if the compiler supports basic C++11 syntax]) fi AC_SUBST(HAVE_CXX11) fi ]) muchsync-7/m4/ax_append_compile_flags.m40000644000175000017500000000551112302053223015373 00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_append_compile_flags.html # =========================================================================== # # SYNOPSIS # # AX_APPEND_COMPILE_FLAGS([FLAG1 FLAG2 ...], [FLAGS-VARIABLE], [EXTRA-FLAGS]) # # DESCRIPTION # # For every FLAG1, FLAG2 it is checked whether the compiler works with the # flag. If it does, the flag is added FLAGS-VARIABLE # # If FLAGS-VARIABLE is not specified, the current language's flags (e.g. # CFLAGS) is used. During the check the flag is always added to the # current language's flags. # # If EXTRA-FLAGS is defined, it is added to the current language's default # flags (e.g. CFLAGS) when the check is done. The check is thus made with # the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to # force the compiler to issue an error when a bad flag is given. # # NOTE: This macro depends on the AX_APPEND_FLAG and # AX_CHECK_COMPILE_FLAG. Please keep this macro in sync with # AX_APPEND_LINK_FLAGS. # # LICENSE # # Copyright (c) 2011 Maarten Bosmans # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 3 AC_DEFUN([AX_APPEND_COMPILE_FLAGS], [AC_REQUIRE([AX_CHECK_COMPILE_FLAG]) AC_REQUIRE([AX_APPEND_FLAG]) for flag in $1; do AX_CHECK_COMPILE_FLAG([$flag], [AX_APPEND_FLAG([$flag], [$2])], [], [$3]) done ])dnl AX_APPEND_COMPILE_FLAGS muchsync-7/m4/ax_check_compile_flag.m40000644000175000017500000000625112302053223015020 00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html # =========================================================================== # # SYNOPSIS # # AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) # # DESCRIPTION # # Check whether the given FLAG works with the current language's compiler # or gives an error. (Warnings, however, are ignored) # # ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on # success/failure. # # If EXTRA-FLAGS is defined, it is added to the current language's default # flags (e.g. CFLAGS) when the check is done. The check is thus made with # the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to # force the compiler to issue an error when a bad flag is given. # # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this # macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. # # LICENSE # # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 2 AC_DEFUN([AX_CHECK_COMPILE_FLAG], [AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [AS_VAR_SET(CACHEVAR,[yes])], [AS_VAR_SET(CACHEVAR,[no])]) _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], [m4_default([$2], :)], [m4_default([$3], :)]) AS_VAR_POPDEF([CACHEVAR])dnl ])dnl AX_CHECK_COMPILE_FLAGS muchsync-7/sql_db.h0000644000175000017500000000775312766462415011442 00000000000000// -*- C++ -*- #ifndef _SQL_DB_H #define _SQL_DB_H 1 /** \file sql_db.h * \brief Data structures representing information in SQL database. */ #include #include #include #include #include #include #include #include "sqlstmt.h" using std::string; extern const char dbvers[]; /** Writestamp is the pair (replica-id, version-number). */ using writestamp = std::pair; std::istream &read_writestamp (std::istream &in, writestamp &ws); /** A version vector is a set of ::writestamps with distinct * replica-ids. */ using versvector = std::unordered_map; string show_sync_vector (const versvector &vv); std::istream &read_sync_vector (std::istream &sb, versvector &vv); versvector get_sync_vector (sqlite3 *db); /** Open the SQL database containing muchsync state. * * If the file does not exist, it is created and initialized with a * fresh database. */ sqlite3 *dbopen (const char *path, bool exclusive = false); /** Retrieve a configuration value from the database. * * Example: `getconfig(db, "key")` */ template T getconfig (sqlite3 *db, const string &key) { static const char query[] = "SELECT value FROM configuration WHERE key = ?;"; return sqlstmt_t(db, query).param(key).step().template column(0); } /** Set a configuration value in database. */ template void setconfig (sqlite3 *db, const string &key, const T &value) { static const char query[] = "INSERT OR REPLACE INTO configuration VALUES (?, ?);"; sqlstmt_t(db, query).param(key, value).step(); } /** Structure representing all occurences of a file with a particular * content hash in the maildir. */ struct hash_info { string hash; i64 size = -1; string message_id; writestamp hash_stamp = {0, 0}; std::unordered_map dirs; }; /** Pre-formatted queries for looking up ::hash_info structures in * database. */ class hash_lookup { sqlstmt_t gethash_; sqlstmt_t getlinks_; sqlstmt_t makehash_; bool ok_ = false; hash_info hi_; i64 hash_id_; std::vector> links_; std::ifstream content_; i64 docid_; public: const string maildir; hash_lookup(const string &maildir, sqlite3 *db); bool lookup(const string &hash); void create(const hash_info &info); bool ok() const { return ok_; } i64 hash_id() const { assert (ok()); return hash_id_; } const hash_info &info() const { assert (ok()); return hi_; } const std::vector> &links() const { assert (ok()); return links_; } i64 docid() const { assert (nlinks()); return docid_; } int nlinks() const { return links().size(); } string link_path(int i) const { auto &lnk = links().at(i); return maildir + "/" + lnk.first + "/" + lnk.second; } bool get_pathname(string *path, bool *from_trash = nullptr) const; std::streambuf *content(); }; /** Structure representing all the tags associated with a particular * message ID in the database. * * Note that multiple content hashes may contain the same message ID. */ struct tag_info { string message_id; writestamp tag_stamp = {0, 0}; std::unordered_set tags; }; /** Pre-formatted queries for looking up ::tag_info structures in * database. */ class tag_lookup { sqlstmt_t getmsg_; sqlstmt_t gettags_; bool ok_ = false; tag_info ti_; i64 docid_; public: tag_lookup (sqlite3 *db); bool lookup(const string &msgid); bool ok() const { return ok_; } i64 docid() const { assert (ok()); return docid_; } const tag_info &info() const { assert (ok()); return ti_; } }; std::ostream &operator<< (std::ostream &os, const hash_info &hi); std::istream &operator>> (std::istream &is, hash_info &hi); std::ostream &operator<< (std::ostream &os, const tag_info &ti); std::istream &operator>> (std::istream &is, tag_info &ti); string trashname (const string &maildir, const string &hash); string permissive_percent_encode (const string &raw); i64 create_random_id (); #endif /* !_SQL_DB_H */ muchsync-7/muchsync.1.md0000644000175000017500000003445013403557504012322 00000000000000% muchsync(1) % David Mazieres % # NAME muchsync - synchronize maildirs and notmuch databases # SYNOPSIS muchsync _options_ \ muchsync _options_ _server-name_ _server-options_ \ muchsync _options_ --init _maildir_ _server-name_ _server-options_ # DESCRIPTION muchsync synchronizes the contents of maildirs and notmuch tags across machines. Any given execution runs pairwise between two replicas, but the system scales to an arbitrary number of replicas synchronizing in arbitrary pairs. For efficiency, version vectors and logical timestamps are used to limit synchronization to items a peer may not yet know about. To use muchsync, both muchsync and notmuch should be installed someplace in your PATH on two machines, and you must be able to access the remote machine via ssh. In its simplest usage, you have a single notmuch database on some server `SERVER` and wish to start replicating that database on a client, where the client currently does not have any mailboxes. You can initialize a new replica in `$HOME/inbox` by running the following command: muchsync --init $HOME/inbox SERVER This command may take some time, as it transfers the entire contents of your maildir from the server to the client and creates a new notmuch index on the client. Depending on your setup, you may be either bandwidth limited or CPU limited. (Sadly, the notmuch library on which muchsync is built is non-reentrant and forces all indexing to happen on a single core at a rate of about 10,000 messages per minute.) From then on, to synchronize the client with the server, just run: muchsync SERVER Since muchsync replicates the tags in the notmuch database itself, you should consider disabling maildir flag synchronization by executing: notmuch config set maildir.synchronize_flags=false The reason is that the synchronize\_flags feature only works on a small subset of pre-defined flags and so is not all that useful. Moreover, it marks flags by renaming files, which is not particularly efficient. muchsync was largely motivated by the need for better flag synchronization. If you are satisfied with the synchronize\_flags feature, you might consider a tool such as offlineimap as an alternative to muchsync. ## Synchronization algorithm muchsync separately synchronizes two classes of information: the message-to-directory mapping (henceforth link counts) and the message-id-to-tag mapping (henceforth tags). Using logical timestamps, it can detect update conflicts for each type of information. We describe link count and tag synchronization in turn. Link count synchronization consists of ensuring that any given message (identified by its collision-resistant content hash) appears the same number of times in the same subdirectories on each replica. Generally a message will appear only once in a single subdirectory. However, if the message is moved or deleted on one replica, this will propagate to other replicas. If two replicas move or copy the same file between synchronization events (or one moves the file and the other deletes it), this constitutes an update conflict. Update conflicts are resolved by storing in each subdirectory a number of copies equal to the maximum of the number of copies in that subdirectory on the two replicas. This is conservative, in the sense that a file will never be deleted after a conflict, though you may get extra copies of files. (muchsync uses hard links, so at least these copies will not use too much disk space.) For example, if one replica moves a message to subdirectory .box1/cur and another moves the same message to subdirectory .box2/cur, the conflict will be resolved by placing two links to the message on each replica, one in .box1/cur and one in .box2/cur. To respect the structure of maildirs, subdirectories ending `new` and `cur` are special-cased; conflicts between sibling `new` and `cur` subdirectories are resolved in favor of `cur` without creating additional copies of messages. Message tags are synchronized based on notmuch's message-ID (usually the Message-ID header of a message), rather than message contents. On conflict, tags are combined as follows. Any tag in the notmuch configuration parameter `muchsync.and_tags` is removed from the message unless it appears on both replicas. Any other tag is added if it appears on any replica. In other words, tags in `muchsync.and_tags` are logically anded, while all other flags are logically ored. (This approach will give the most predictable results if `muchsync.and_tags` has the same value in all your replicas. The `--init` option ensures uniform configurations initially, but subsequent changes to `muchsync.and_tags` must be manually propagated.) If your configuration file does not specify a value for `muchsync.and_tags`, the default is to use the set of tags specified in the `new.tags` configuration option. This should give intuitive results unless you use a two-pass tagging system such as the afew tool, in which case `new.tags` is used to flag input to the second pass while you likely want `muchsync.and_tags` to reflect the output of the second pass. ## File deletion Because publishing software that actually deletes people's email is a scary prospect, muchsync for the moment never actually deletes mail files. Though this may change in the future, for the moment muchsync moves any deleted messages to the directory `.notmuch/muchsync/trash` under your mail directory (naming deleted messages by their content hash). If you really want to delete mail to reclaim disk space or for privacy reasons, you will need to run the following on each replica: cd "$(notmuch config get database.path)" rm -rf .notmuch/muchsync/trash # OPTIONS \-C _file_, \--config _file_ : Specify the path of the notmuch configuration file to use. If none is specified, the default is to use the contents of the environment variable \$NOTMUCH_CONFIG, or if that variable is unset, the value \$HOME/.notmuch-config. (These are the same defaults as the notmuch command itself.) \-F : Check for modified files. Without this option, muchsync assumes that files in a maildir are never edited. -F disables certain optimizations so as to make muchsync at least check the timestamp on every file, which will detect modified files at the cost of a longer startup time. If muchsync dies with the error "message received does not match hash," you likely need to run it with the -F option. Note that if your software regularly modifies the contents of mail files (e.g., because you are running offlineimap with "synclabels = yes"), then you will need to use -F each time you run muchsync. Specify it as a server option (after the server name) if the editing happens server-side. \-r /path/to/muchsync : Specifies the path to muchsync on the server. Ordinarily, muchsync should be in the default PATH on the server so this option is not required. However, this option is useful if you have to install muchsync in a non-standard place or wish to test development versions of the code. \-s ssh-cmd : Specifies a command line to pass to /bin/sh to execute a command on another machine. The default value is "ssh -CTaxq". Note that because this string is passed to the shell, special characters including spaces may need to be escaped. \-v : The -v option increases verbosity. The more times it is specified, the more verbose muchsync will become. \--help : Print a brief summary of muchsync's command-line options. \--init _maildir_ : This option clones an existing mailbox on a remote server into _maildir_ on the local machine. Neither _maildir_ nor your notmuch configuration file (see ```--config``` above) should exist when you run this command, as both will be created. The configuration file is copied from the server (adjusted to reflect the local maildir), while _maildir_ is created as a replica of the maildir you have on the server. \--nonew : Ordinarily, muchsync begins by running "notmuch new". This option says not to run "notmuch new" before starting the muchsync operation. It can be passed as either a client or a server option. For example: The command "```muchsync myserver --nonew```" will run "```notmuch new```" locally but not on myserver. \--noup, \--noupload : Transfer files from the server to the client, but not vice versa. \--upbg : Transfer files from the server to the client in the foreground. Then fork into the background to upload any new files from the client to the server. This option is useful when checking new mail, if you want to begin reading your mail as soon as it has been downloaded while the upload continues. \--self : Print the 64-bit replica ID of the local maildir replica and exit. Potentially useful in higher-level scripts, such as the emacs notmuch-poll-script variable for identifying on which replica one is running, particularly if network file systems allow a replica to be accessed from multiple machines. \--newid : Muchsync requires every replica to have a unique 64-bit identifier. If you ever copy a notmuch database to another machine, including the muchsync state, bad things will happen if both copies use muchsync, as they will both have the same identifier. Hence, after making such copy and before running muchsync to synchronize mail, run `muchsync --newid` to change the identifier of one of the copies. \--version : Report on the muchsync version number # EXAMPLES To initialize a the muchsync database, you can run: muchsync -vv This first executes "`notmuch new`", then builds the initial muchsync database from the contents of your maildir (the directory specified as `database.path` in your notmuch configuration file). This command may take several minutes the first time it is run, as it must compute a content hash of every message in the database. Note that you do not need to run this command, as muchsync will initialize the database the first time a client tries to synchronize anyway. muchsync --init ~/maildir myserver First run "notmuch new" on myserver, then create a directory `~/maildir` containing a replica of your mailbox on myserver. Note that neither your configuration file (by default `~/.notmuch-config`) nor `~/maildir` should exist before running this command, as both will be created. To create a `notmuch-poll` script that fetches mail from a remote server `myserver`, but on that server just runs `notmuch new`, do the following: First, run `muchsync --self` on the server to get the replica ID. Then take the ID returned (e.g., `1968464194667562615`) and embed it in a shell script as follows: #!/bin/sh self=$($HOME/muchsync --self) || exit 1 if [ "$self" = 1968464194667562615 ]; then exec notmuch new else exec $HOME/muchsync -r ./muchsync --upbg myserver fi The path of such a script is a good candidate for the emacs `notmuch-poll-script` variable. Alternatively, to have the command ``notmuch new`` on a client automatically fetch new mail from server `myserver`, you can place the following in the file ``.notmuch/hooks/post-new`` under your mail directory: #!/bin/sh muchsync --nonew --upbg myserver # FILES The default notmuch configuration file is `$HOME/.notmuch-config`. muchsync keeps all of its state in a subdirectory of your top maildir called ```.notmuch/muchsync```. # SEE ALSO notmuch(1). # BUGS muchsync expects initially to create replicas from scratch. If you have created a replica using another tool such as offlineimap and you try to use muchsync to synchronize them, muchsync will assume every file has an update conflict. This is okay if the two replicas are identical; if they are not, it will result in artifacts such as files deleted in only one replica reappearing. Ideally notmuch needs an option like `--clobber` that makes a local replica identical to the remote one without touching the remote one, so that an old version of a mail directory can be used as a disposable cache to bootstrap initialization. muchsync never deletes directories. If you want to remove a subdirectory completely, you must manually execute rmdir on all replicas. Even if you manually delete a subdirectory, it will live on in the notmuch database. To synchronize deletions and re-creations properly, muchsync never deletes content hashes and their message IDs from its database, even after the last copy of a message has disappeared. Such stale hashes should not consume an inordinate amount of disk space, but could conceivably pose a privacy risk if users believe deleting a message removes all traces of it. Message tags are synchronized based on notmuch's message-ID (usually the Message-ID header of a message), rather than based on message contents. This is slightly strange because very different messages can have the same Message-ID header, meaning the user will likely only read one of many messages bearing the same Message-ID header. It is conceivable that an attacker could suppress a message from a mailing list by sending another message with the same Message-ID. This bug is in the design of notmuch, and hence not something that muchsync can work around. muchsync itself does not assume Message-ID equivalence, relying instead on content hashes to synchronize link counts. Hence, any tools used to work around the problem should work on all replicas. Because notmuch and Xapian do not keep any kind of modification time on database entries, every invocation of muchsync requires a complete scan of all tags in the Xapian database to detect any changed tags. Fortunately muchsync heavily optimizes the scan so that it should take well under a second for 100,000 mail messages. However, this means that interfaces such as those used by notmuch-dump are not efficient enough (see the next paragraph). muchsync makes certain assumptions about the structure of notmuch's private types `notmuch_message_t` and `notmuch_directory_t`. In particular, it assumes that the Xapian document ID is the second field of these data structures. Sadly, there is no efficient and clean way to extract this information from the notmuch library interface. muchsync also makes other assumptions about how tokens are named in the Xapian database. These assumptions are necessary because the notmuch library interface and the notmuch dump utility are too slow to support synchronization every time you check mail. muchsync-7/NEWS0000644000175000017500000000356014357577074010522 00000000000000* Changes in release 7 Fixed a performance bug in which SQL queries used bitwise & instead of logical AND, which caused a linear scan of a database table when files were moved or deleted. Clarified that it's okay to link against OpenSSL libcrypto. Update to format man page with recent versions of pandoc. Improve some error handing and error messages. * Changes in release 6 Clarified that it's okay to link against OpenSSL libcrypto. Update to format man page with recent versions of pandoc. Improve some error handing and error messages. * Changes in release 5 Fixed a race condition that could cause a core dump on fast networks when flow-control logic kicked in. * Changes in release 4 Updated for GCC 7 header changes (thanks Toke Høiland-Jørgensen). * Changes in release 3 Cleaned up a few compilation warnings. Added help command to --server mode. Added --newid command-line option. Limit receive buffering to 128 MiB in response to complaints from users with more email than swap space who cannot initialize replicas. 128 MiB should still be enough to get full network utilization. * Changes in release 2 Work around missing openat because of reports it is missing on some versions of Mac OS X. New configuration option muchsync.and_tags (from Brian Sniffen). * Changes in release 1 Various compabitility fixes for Debian Wheezy (thanks Simó Albert i Beltran). Allow ./configure WFLAGS="-Wall -Werror" to specify warnings (still use -Wall by default). For git only, make autogen.sh fetch pre-formatted muchsync.1 over http when pandoc not available. Fix a bug where tags containing '%' characters were not properly handled (thanks Brian Sniffen). Portability fixes for Mac OS X, which does not have clock_gettime. Clarify in man page that muchsync uses a trash directory instead of actually deleting files. Catch exceptions to exit rather than dump core on error. muchsync-7/INSTALL0000644000175000017500000003661012302053374011032 00000000000000Installation Instructions ************************* Copyright (C) 1994-1996, 1999-2002, 2004-2013 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without warranty of any kind. Basic Installation ================== Briefly, the shell command `./configure && make && make install' should configure, build, and install this package. The following more-detailed instructions are generic; see the `README' file for instructions specific to this package. Some packages provide this `INSTALL' file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). It can also use an optional file (typically called `config.cache' and enabled with `--cache-file=config.cache' or simply `-C') that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If you are using the cache, and at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.ac' (or `configure.in') is used to create `configure' by a program called `autoconf'. You need `configure.ac' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. Running `configure' might take a while. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package, generally using the just-built uninstalled binaries. 4. Type `make install' to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the `make install' phase executed with root privileges. 5. Optionally, type `make installcheck' to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior `make install' required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. 7. Often, you can also type `make uninstall' to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. 8. Some packages, particularly those that use Automake, provide `make distcheck', which can by used by developers to test that all other targets like `make install' and `make uninstall' work correctly. This target is generally not run by end users. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. Run `./configure --help' for details on some of the pertinent environment variables. You can give `configure' initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix *Note Defining Variables::, for more details. Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. This is known as a "VPATH" build. With a non-GNU `make', it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types--known as "fat" or "universal" binaries--by specifying multiple `-arch' options to the compiler but only a single `-arch' option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the `lipo' tool if you have problems. Installation Names ================== By default, `make install' installs the package's commands under `/usr/local/bin', include files under `/usr/local/include', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PREFIX', where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option `--exec-prefix=PREFIX' to `configure', the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=DIR' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of `${prefix}', so that specifying just `--prefix' will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to `configure'; however, many packages provide one or both of the following shortcuts of passing variable assignments to the `make install' command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, `make install prefix=/alternate/directory' will choose an alternate location for all directory configuration variables that were expressed in terms of `${prefix}'. Any directories that were specified during `configure', but not in terms of `${prefix}', must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the `DESTDIR' variable. For example, `make install DESTDIR=/alternate/directory' will prepend `/alternate/directory' before all installation names. The approach of `DESTDIR' overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of `${prefix}' at `configure' time. Optional Features ================= If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Some packages offer the ability to configure how verbose the execution of `make' will be. For these packages, running `./configure --enable-silent-rules' sets the default to minimal output, which can be overridden with `make V=1'; while running `./configure --disable-silent-rules' sets the default to verbose, which can be overridden with `make V=0'. Particular systems ================== On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn't work, install pre-built binaries of GCC for HP-UX. HP-UX `make' updates targets which have the same time stamps as their prerequisites, which makes it generally unusable when shipped generated files such as `configure' are involved. Use GNU `make' instead. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot parse its `' header file. The option `-nodtk' can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try ./configure CC="cc" and if that doesn't work, try ./configure CC="cc -nodtk" On Solaris, don't put `/usr/ucb' early in your `PATH'. This directory contains several dysfunctional programs; working variants of these programs are available in `/usr/bin'. So, if you need `/usr/ucb' in your `PATH', put it _after_ `/usr/bin'. On Haiku, software installed for all users goes in `/boot/common', not `/usr/local'. It is recommended to use the following options: ./configure --prefix=/boot/common Specifying the System Type ========================== There may be some features `configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, `configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the `--build=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the option `--target=TYPE' to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will eventually be run) with `--host=TYPE'. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the environment passed to `configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the `configure' command line, using `VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc causes the specified `gcc' to be used as the C compiler (unless it is overridden in the site shell script). Unfortunately, this technique does not work for `CONFIG_SHELL' due to an Autoconf limitation. Until the limitation is lifted, you can use this workaround: CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash `configure' Invocation ====================== `configure' recognizes the following options to control how it operates. `--help' `-h' Print a summary of all of the options to `configure', and exit. `--help=short' `--help=recursive' Print a summary of the options unique to this package's `configure', and exit. The `short' variant lists options used only in the top level, while the `recursive' variant lists options also present in any nested packages. `--version' `-V' Print the version of Autoconf used to generate the `configure' script, and exit. `--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, traditionally `config.cache'. FILE defaults to `/dev/null' to disable caching. `--config-cache' `-C' Alias for `--cache-file=config.cache'. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `--prefix=DIR' Use DIR as the installation prefix. *note Installation Names:: for more details, including other options available for fine-tuning the installation locations. `--no-create' `-n' Run the configure checks, but stop before creating any output files. `configure' also accepts some other, not widely useful, options. Run `configure --help' for more details. muchsync-7/misc.h0000644000175000017500000000207112536100426011077 00000000000000// -*- C++ -*- #ifndef _MUCHSYNC_MISC_H_ #define _MUCHSYNC_MISC_H_ 1 #include #include #include #include #include #ifndef ST_MTIM #define ST_MTIM 1 #endif //!ST_MTIM using std::string; extern int opt_verbose; template inline typename C::mapped_type find_default (typename C::mapped_type def, const C &c, typename C::key_type k) { auto i = c.find(k); return i == c.end() ? def : i->second; } std::istream &input_match (std::istream &in, char want); string percent_encode (const string &raw); string percent_decode (const string &escaped); class hash_ctx { SHA_CTX ctx_; public: static constexpr size_t output_bytes = SHA_DIGEST_LENGTH; hash_ctx() { init(); } void init() { SHA1_Init(&ctx_); } void update(const void *buf, size_t n) { SHA1_Update (&ctx_, buf, n); } string final(); }; bool hash_ok (const string &hash); constexpr double ts_to_double (const timespec &ts) { return ts.tv_sec + ts.tv_nsec / 1000000000.0; } void print_time (string msg); #endif /* !_MUCHSYNC_MISC_H_ 1 */ muchsync-7/sql_db.cc0000644000175000017500000002615213403557504011563 00000000000000 #include #include #include #include #include #include #include #include #include #include #include "misc.h" #include "sql_db.h" using namespace std; const char dbvers[] = "muchsync 0"; const char muchsync_schema[] = R"( -- General table CREATE TABLE configuration ( key TEXT PRIMARY KEY NOT NULL, value TEXT); CREATE TABLE sync_vector ( replica INTEGER PRIMARY KEY, version INTEGER); -- Shadow copy of the Xapian database to detect changes CREATE TABLE xapian_dirs ( dir_path TEXT UNIQUE NOT NULL, dir_docid INTEGER PRIMARY KEY, dir_mtime INTEGER); CREATE TABLE tags ( tag TEXT NOT NULL, docid INTEGER NOT NULL, UNIQUE (docid, tag), UNIQUE (tag, docid)); CREATE TABLE message_ids ( message_id TEXT UNIQUE NOT NULL, docid INTEGER PRIMARY KEY, replica INTEGER, version INTEGER); CREATE INDEX message_ids_writestamp ON message_ids (replica, version); CREATE TABLE xapian_files ( dir_docid INTEGER NOT NULL, name TEXT NOT NULL, docid INTEGER, mtime REAL, inode INTEGER, hash_id INGEGER, PRIMARY KEY (dir_docid, name)); CREATE INDEX xapian_files_hash_id ON xapian_files (hash_id, dir_docid); CREATE TABLE maildir_hashes ( hash_id INTEGER PRIMARY KEY, hash TEXT UNIQUE NOT NULL, size INTEGER, message_id TEXT, replica INTEGER, version INTEGER); CREATE INDEX maildir_hashes_message_id ON maildir_hashes (message_id); CREATE INDEX maildir_hashes_writestamp ON maildir_hashes (replica, version); CREATE TABLE xapian_nlinks ( hash_id INTEGER NOT NULL, dir_docid INTEGER NOT NULL, link_count INTEGER, PRIMARY KEY (hash_id, dir_docid)); )"; i64 create_random_id() { i64 id = 0; if (RAND_bytes ((unsigned char *) &id, sizeof (id)) == -1 || id == 0) { cerr << "RAND_pseudo_bytes failed\n"; return -1; } id &= ~(i64 (1) << 63); return id; } static sqlite3 * dbcreate (const char *path) { i64 self = create_random_id(); if (self <= 0) return nullptr; sqlite3 *db = nullptr; int err = sqlite3_open_v2 (path, &db, SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE, nullptr); if (err) { cerr << path << ": " << sqlite3_errstr (err) << '\n'; return nullptr; } sqlexec(db, "PRAGMA locking_mode=EXCLUSIVE;"); try { sqlexec (db, "BEGIN;"); sqlexec (db, muchsync_schema); setconfig (db, "dbvers", dbvers); setconfig (db, "self", self); sqlexec (db, "INSERT INTO sync_vector (replica, version)" " VALUES (%lld, 1);", self); sqlexec (db, "COMMIT;"); } catch (sqlerr_t &exc) { sqlite3_close_v2 (db); cerr << exc.what () << '\n'; return nullptr; } return db; } sqlite3 * dbopen (const char *path, bool exclusive) { sqlite3 *db = nullptr; if (access (path, 0) && errno == ENOENT) db = dbcreate (path); else { sqlite3_open_v2 (path, &db, SQLITE_OPEN_READWRITE, nullptr); if (exclusive) sqlexec(db, "PRAGMA locking_mode=EXCLUSIVE;"); } if (!db) return nullptr; sqlexec (db, "PRAGMA secure_delete = 0;"); try { if (getconfig (db, "dbvers") != dbvers) { cerr << path << ": invalid database version\n"; sqlite3_close_v2 (db); return nullptr; } getconfig (db, "self"); } catch (sqldone_t &) { cerr << path << ": invalid configuration\n"; sqlite3_close_v2 (db); return nullptr; } catch (sqlerr_t &e) { cerr << path << ": " << e.what() << '\n'; sqlite3_close_v2 (db); return nullptr; } return db; } istream & read_writestamp (istream &in, writestamp &ws) { input_match (in, 'R'); in >> ws.first; input_match (in, '='); in >> ws.second; return in; } istream & read_sync_vector (istream &in, versvector &vv) { input_match (in, '<'); vv.clear(); for (;;) { char c; if ((in >> c) && c == '>') return in; in.unget(); writestamp ws; if (!read_writestamp (in, ws)) break; vv.insert (ws); if (!(in >> c) || c == '>') break; if (c != ',') { in.setstate (ios_base::failbit); break; } } return in; } string show_sync_vector (const versvector &vv) { ostringstream sb; sb << '<'; bool first = true; for (auto ws : vv) { if (first) first = false; else sb << ","; sb << 'R' << ws.first << '=' << ws.second; } sb << '>'; return sb.str(); } string permissive_percent_encode (const string &raw) { ostringstream outbuf; outbuf.fill('0'); outbuf.setf(ios::hex, ios::basefield); for (char c : raw) if (c <= ' ' || c >= '\177' || c == '%' || c == '(' || c == ')') outbuf << '%' << setw(2) << int (uint8_t(c)); else outbuf << c; return outbuf.str(); } template inline void intercalate (const C &c, F &&each, function between) { auto i = c.begin(), end = c.end(); if (i != end) { each (i); while (++i != end) { between(); each(i); } } } ostream & operator<< (ostream &os, const hash_info &hi) { os << "L " << hi.hash << ' ' << hi.size << ' ' << permissive_percent_encode(hi.message_id) << " R" << hi.hash_stamp.first << '=' << hi.hash_stamp.second << " ("; intercalate (hi.dirs, [&](decltype(hi.dirs.begin()) i) { os << i->second << '*' << permissive_percent_encode(i->first); }, [&]() {os << ' ';}); os << ')'; return os; } istream & operator>> (istream &is, hash_info &hi) { string hash, msgid; size_t size; writestamp stamp; decltype(hi.dirs) d; input_match(is, 'L') >> hash >> size >> msgid; if (is && !hash_ok(hash)) is.setstate (ios_base::failbit); read_writestamp(is, stamp); input_match(is, '('); char c; while ((is >> skipws >> c) && c != ')') { is.putback (c); i64 nlinks; is >> nlinks; input_match(is, '*'); string dir; is >> dir; if (dir.back() == ')') { is.putback (')'); dir.resize(dir.size()-1); } if (!dir.empty()) d.emplace (percent_decode (dir), nlinks); } if (is.good()) { hi.hash = hash; hi.size = size; hi.message_id = percent_decode (msgid); hi.hash_stamp = stamp; hi.dirs = move(d); } return is; } ostream & operator<< (ostream &os, const tag_info &ti) { os << "T " << permissive_percent_encode(ti.message_id) << " R" << ti.tag_stamp.first << '=' << ti.tag_stamp.second << " ("; intercalate (ti.tags, [&](decltype(ti.tags.begin()) i) { os << *i; }, [&]() {os << ' ';}); os << ')'; return os; } istream & operator>> (istream &is, tag_info &ti) { { string msgid; input_match(is, 'T') >> msgid; ti.message_id = percent_decode (msgid); } read_writestamp(is, ti.tag_stamp); input_match(is, '('); ti.tags.clear(); char c; while ((is >> skipws >> c) && c != ')') { is.putback (c); string tag; is >> tag; if (tag.back() == ')') { is.putback (')'); tag.resize(tag.size()-1); } if (!tag.empty()) ti.tags.insert(tag); } return is; } hash_lookup::hash_lookup (const string &m, sqlite3 *db) : gethash_(db, "SELECT hash_id, size, message_id, replica, version" " FROM maildir_hashes WHERE hash = ?;"), getlinks_(db, "SELECT dir_path, name, docid" " FROM xapian_files JOIN xapian_dirs USING (dir_docid)" " WHERE hash_id = ?;"), makehash_(db, "INSERT INTO maildir_hashes" " (hash, size, message_id, replica, version)" " VALUES (?, ?, ?, ?, ?);"), maildir(m) { } bool hash_lookup::lookup (const string &hash) { ok_ = false; content_.close(); if (!gethash_.reset().param(hash).step().row()) return false; hash_id_ = gethash_.integer(0); hi_.hash = hash; hi_.size = gethash_.integer(1); hi_.message_id = gethash_.str(2); hi_.hash_stamp.first = gethash_.integer(3); hi_.hash_stamp.second = gethash_.integer(4); hi_.dirs.clear(); links_.clear(); docid_ = -1; for (getlinks_.reset().param(hash_id_).step(); getlinks_.row(); getlinks_.step()) { string dir = getlinks_.str(0), name = getlinks_.str(1); ++hi_.dirs[dir]; links_.emplace_back(dir, name); if (docid_ == -1) docid_ = getlinks_.integer(2); } return ok_ = true; } void hash_lookup::create (const hash_info &rhi) { ok_ = false; content_.close(); makehash_.reset().param(rhi.hash, rhi.size, rhi.message_id, rhi.hash_stamp.first, rhi.hash_stamp.second).step(); hi_.hash = rhi.hash; hi_.size = rhi.size; hi_.message_id = rhi.message_id; hi_.hash_stamp = rhi.hash_stamp; hi_.dirs.clear(); hash_id_ = sqlite3_last_insert_rowid(sqlite3_db_handle(makehash_.get())); ok_ = true; } bool hash_lookup::get_pathname(string *out, bool *from_trash) const { struct stat sb; string path; for (int i = 0, e = nlinks(); i < e; i++) { path = link_path(i); if (!stat(path.c_str(), &sb) && S_ISREG(sb.st_mode) && sb.st_size == hi_.size) { if (out) *out = move(path); if (from_trash) *from_trash = false; return true; } } path = trashname(maildir, hi_.hash); int fd = open (path.c_str(), O_RDWR); if (fd < 0) return false; // Check size [not really necessary] if (fstat(fd, &sb) || sb.st_size != hi_.size) { close (fd); cerr << "deleting file with bad size " << path << '\n'; unlink(path.c_str()); return false; } // Check hash int n; char buf[16384]; hash_ctx ctx; while ((n = read(fd, buf, sizeof(buf))) > 0) ctx.update(buf, n); if (hi_.hash != ctx.final()) { close(fd); cerr << "deleting corrupt file " << path << '\n'; unlink(path.c_str()); return false; } // Found it in the trash fsync(fd); // Might just have downloaded it close(fd); if (out) *out = move(path); if (from_trash) *from_trash = true; return true; } streambuf * hash_lookup::content() { if (content_.is_open()) { content_.seekg(0); return content_.rdbuf(); } for (int i = 0, e = nlinks(); i < e; i++) { content_.open (link_path(i)); if (content_.is_open()) return content_.rdbuf(); } if (opt_verbose > 1 && nlinks() > 0) cerr << link_path(nlinks()-1) << ": " << strerror(errno) << "\n"; return nullptr; } tag_lookup::tag_lookup (sqlite3 *db) : getmsg_(db, "SELECT docid, replica, version" " FROM message_ids WHERE message_id = ?;"), gettags_(db, "SELECT tag FROM tags WHERE docid = ?;") { } bool tag_lookup::lookup (const string &msgid) { ok_ = false; if (!getmsg_.reset().param(msgid).step().row()) return false; ti_.message_id = msgid; docid_ = getmsg_.integer(0); ti_.tag_stamp.first = getmsg_.integer(1); ti_.tag_stamp.second = getmsg_.integer(2); ti_.tags.clear(); for (gettags_.reset().param(docid_).step(); gettags_.row(); gettags_.step()) ti_.tags.insert(gettags_.str(0)); return ok_ = true; } versvector get_sync_vector (sqlite3 *db) { versvector vv; sqlstmt_t s (db, "SELECT replica, version FROM sync_vector;"); while (s.step().row()) vv.emplace (s.integer(0), s.integer(1)); return vv; } #include "muchsync.h" string trashname (const string &maildir, const string &hash) { if (!hash_ok(hash)) throw std::runtime_error ("illegal hash: " + hash); return maildir + muchsync_trashdir + "/" + hash.substr(0,2) + "/" + hash.substr(2); } muchsync-7/notmuch_db.cc0000644000175000017500000001575713403557504012452 00000000000000 #include #include #include #include #include #include #include #include #include #include #include #include #include "cleanup.h" #include "infinibuf.h" #include "notmuch_db.h" using namespace std; static unordered_set lines(const string &s) { istringstream is (s); string line; unordered_set ret; while (getline(is, line)) ret.insert(line); return ret; } static string chomp(string s) { while (s.length() && (s.back() == '\n' || s.back() == '\r')) s.resize(s.length() - 1); return s; } static bool conf_to_bool(string s) { s = chomp(s); if (s.empty() || s == "false" || s == "0") return false; return true; } notmuch_db::message_t notmuch_db::get_message(const char *msgid) { notmuch_message_t *message; nmtry("notmuch_database_find_message", notmuch_database_find_message (notmuch(), msgid, &message)); return message_t (message); } notmuch_db::message_t notmuch_db::add_message(const string &path, const tags_t *newtags, bool *was_new) { notmuch_status_t err; notmuch_message_t *message; #if LIBNOTMUCH_CHECK_VERSION(5,1,0) err = notmuch_database_index_file(notmuch(), path.c_str(), nullptr, &message); #else // libnotmuch < 5.1.0 err = notmuch_database_add_message(notmuch(), path.c_str(), &message); #endif // libnotmuch < 5.1.0 if (err != NOTMUCH_STATUS_DUPLICATE_MESSAGE_ID) { nmtry("notmuch_database_add_message", err); set_tags(message, newtags ? *newtags : new_tags); } if (was_new) *was_new = err != NOTMUCH_STATUS_DUPLICATE_MESSAGE_ID; return message_t (message); } void notmuch_db::remove_message(const string &path) { notmuch_status_t err = notmuch_database_remove_message(notmuch(), path.c_str()); if (err != NOTMUCH_STATUS_DUPLICATE_MESSAGE_ID) nmtry("notmuch_database_remove_message", err); } void notmuch_db::set_tags(notmuch_message_t *msg, const tags_t &tags) { // Deliberately don't unthaw message if we throw exception nmtry("notmuch_message_freeze", notmuch_message_freeze(msg)); nmtry("notmuch_message_remove_all_tags", notmuch_message_remove_all_tags(msg)); for (auto tag : tags) nmtry("notmuch_message_add_tag", notmuch_message_add_tag(msg, tag.c_str())); if (sync_flags) nmtry("notmuch_message_maildir_flags_to_tags", notmuch_message_tags_to_maildir_flags(msg)); nmtry("notmuch_message_thaw", notmuch_message_thaw(msg)); } string notmuch_db::default_notmuch_config() { char *p = getenv("NOTMUCH_CONFIG"); if (p && *p) return p; p = getenv("HOME"); if (p && *p) return string(p) + "/.notmuch-config"; throw runtime_error ("Cannot find HOME directory\n"); } string notmuch_db::get_config(const char *config, int *err) { const char *av[] { "notmuch", "config", "get", config, nullptr }; return run_notmuch(av, nullptr, err); } void notmuch_db::set_config(const char *config, ...) { va_list ap; va_start(ap, config); vector av { "notmuch", "config", "set", config }; const char *a; do { a = va_arg(ap, const char *); av.push_back(a); } while (a); run_notmuch(av.data(), "[notmuch] "); } notmuch_db::notmuch_db(string config, bool create) : notmuch_config (config), maildir (chomp(get_config("database.path"))), new_tags (lines(get_config("new.tags"))), and_tags (make_and_tags()), sync_flags (conf_to_bool(get_config("maildir.synchronize_flags"))) { if (maildir.empty()) throw runtime_error(notmuch_config + ": no database.path in config file"); if (create) { struct stat sb; string nmdir = maildir + "/.notmuch"; int err = stat(nmdir.c_str(), &sb); if (!err && S_ISDIR(sb.st_mode)) return; if (!err || errno != ENOENT) throw runtime_error(nmdir + ": cannot access directory"); mkdir(maildir.c_str(), 0777); nmtry("notmuch_database_create", notmuch_database_create(maildir.c_str(), ¬much_)); } } notmuch_db::~notmuch_db() { close(); } notmuch_db::tags_t notmuch_db::make_and_tags() { int err; string s = get_config("muchsync.and_tags", &err); return err ? new_tags : lines(s); } string notmuch_db::run_notmuch(const char *const *av, const char *errprefix, int *exit_value) { int fds[2]; if (pipe(fds) != 0) throw runtime_error (string("pipe: ") + strerror(errno)); pid_t pid = fork(); switch (pid) { case -1: { string err = string("fork: ") + strerror(errno); ::close(fds[0]); ::close(fds[1]); throw runtime_error (err); } case 0: ::close(fds[0]); if (errprefix && fds[1] != 2) dup2(fds[1], 2); if (fds[1] != 1) { dup2(fds[1], 1); if (errprefix && fds[1] != 2) ::close(fds[1]); } setenv("NOTMUCH_CONFIG", notmuch_config.c_str(), 1); int err = -1; if (exit_value) { // Since the caller is looking at exit value, suppress chatter err = dup(2); ::close(2); fcntl(err, F_SETFD, 1); ::open("/dev/null", O_WRONLY); } execvp("notmuch", const_cast (av)); if (err != -1) dup2(err, 2); cerr << "notmuch: " << strerror(errno) << endl; // Use SIGINT as hacky way to convey that exec failed raise(SIGINT); _exit(127); } ::close(fds[1]); ifdstream in (fds[0]); ostringstream os; if (errprefix) { string line; while (getline(in, line)) cerr << errprefix << line << '\n'; } else os << in.rdbuf(); int status; if (waitpid(pid, &status, 0) != pid) assert(!"waitpid failed waiting for notmuch"); else if (!WIFEXITED(status)) { if (WIFSIGNALED(status)) { if (WTERMSIG(status) == SIGINT) throw runtime_error ("could not run notmuch"); else throw runtime_error ("notmuch exited with signal " + std::to_string(WTERMSIG(status))); } else throw runtime_error ("notmuch exit status " + std::to_string(status)); } if (exit_value) *exit_value = WEXITSTATUS(status); return os.str(); } Xapian::docid notmuch_db::get_dir_docid(const char *path) { unique_obj dir; nmtry("notmuch_database_get_directory", notmuch_database_get_directory(notmuch(), path, &dir.get())); if (!dir) throw range_error (path + string (": directory not found in notmuch")); /* XXX -- evil evil */ struct fake_directory { notmuch_database_t *notmuch; Xapian::docid doc_id; }; return reinterpret_cast(dir.get())->doc_id; } notmuch_database_t * notmuch_db::notmuch () { if (!notmuch_) { notmuch_status_t err = notmuch_database_open (maildir.c_str(), NOTMUCH_DATABASE_MODE_READ_WRITE, ¬much_); if (err) throw runtime_error (maildir + ": " + notmuch_status_to_string(err)); } return notmuch_; } void notmuch_db::close() { if (notmuch_) notmuch_database_destroy (notmuch_); notmuch_ = nullptr; } void notmuch_db::run_new(const char *prefix) { const char *av[] = { "notmuch", "new", nullptr }; close(); run_notmuch(av, prefix); } muchsync-7/install-sh0000755000175000017500000003325512302053374012007 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2011-11-20.07; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: muchsync-7/AUTHORS0000644000175000017500000000001712302053223011032 00000000000000David Mazieres