fsvs-1.2.6/0000755000202400020240000000000012554717236011463 5ustar marekmarekfsvs-1.2.6/configure0000755000202400020240000064656212554717236013415 0ustar marekmarek#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for fsvs . # # Report bugs to . # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and $0: http://fsvs.tigris.org/ about your system, including $0: any error possibly output before this message. Then $0: install a modern shell, or manually run the script $0: under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='fsvs' PACKAGE_TARNAME='fsvs' PACKAGE_VERSION='' PACKAGE_STRING='fsvs ' PACKAGE_BUGREPORT='http://fsvs.tigris.org/' PACKAGE_URL='' # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_unique_file="src/actions.c" ac_header_list= ac_subst_vars='LTLIBOBJS LIBOBJS HAVE_UINT64_T HAVE_UINT32_T HAS_FASTCALL ENABLE_RELEASE CHROOTER_JAIL HAVE_LOCALES HAVE_FMEMOPEN NEED_ENVIRON_EXTERN HAVE_O_DIRECTORY ENABLE_GCOV ENABLE_DEBUG ENABLE_DEV_FAKE EXTRALIBS WAA_WC_MD5_CHARS EGREP GREP CPP OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking with_aprinc with_svninc with_waa_md5 with_aprlib with_svnlib enable_dev_fake enable_debug enable_gcov with_chroot enable_release enable_largefile ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures fsvs to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/fsvs] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of fsvs :";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-dev-fake Include fake definitions for MAJOR(), MINOR() and MKDEV(). Needed if none found. --enable-debug compile some extra debug checks in (valgrind, gdb) (default is no) --enable-gcov whether to compile with instrumentation for gcov (default is no) (needs --enable-debug) --enable-release whether to compile without debug messages. Makes image smaller (to about half size), but makes -d and -D inoperative. (Default is no) --disable-largefile omit support for large files Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-aprinc=PATH Specify an include directory for the APR headers. --with-svninc=PATH Specify an include directory for the subversion headers. --with-waa_md5=NUMBER Specifies how many hex characters of the MD5 of the working copy root should be used to address the data in the WAA. This may be increased if you have a lot of different working copies on a single machine. The default is 0; useful values are 0, and from 6 to 32. --with-aprlib=PATH Specify a directory containing APR libraries. --with-svnlib=PATH Specify a directory containing subversion libraries. --with-chroot=PATH Specify a chroot environment for the fsvs-chrooter helper. Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF fsvs configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## -------------------------------------- ## ## Report this to http://fsvs.tigris.org/ ## ## -------------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES # ---------------------------------------------------- # Tries to find if the field MEMBER exists in type AGGR, after including # INCLUDES, setting cache variable VAR accordingly. ac_fn_c_check_member () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 $as_echo_n "checking for $2.$3... " >&6; } if eval \${$4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (sizeof ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else eval "$4=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$4 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_member # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type # ac_fn_c_find_uintX_t LINENO BITS VAR # ------------------------------------ # Finds an unsigned integer type with width BITS, setting cache variable VAR # accordingly. ac_fn_c_find_uintX_t () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 $as_echo_n "checking for uint$2_t... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" # Order is important - never check a type that is potentially smaller # than half of the expected target width. for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : case $ac_type in #( uint$2_t) : eval "$3=yes" ;; #( *) : eval "$3=\$ac_type" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if eval test \"x\$"$3"\" = x"no"; then : else break fi done fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_find_uintX_t cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by fsvs $as_me , which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi as_fn_append ac_header_list " stdlib.h" as_fn_append ac_header_list " unistd.h" as_fn_append ac_header_list " sys/param.h" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" if test "x$ac_cv_header_minix_config_h" = xyes; then : MINIX=yes else MINIX= fi if test "$MINIX" = yes; then $as_echo "#define _POSIX_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h $as_echo "#define _MINIX 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 $as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } if ${ac_cv_safe_to_define___extensions__+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ # define __EXTENSIONS__ 1 $ac_includes_default int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_safe_to_define___extensions__=yes else ac_cv_safe_to_define___extensions__=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 $as_echo "$ac_cv_safe_to_define___extensions__" >&6; } test $ac_cv_safe_to_define___extensions__ = yes && $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h $as_echo "#define _ALL_SOURCE 1" >>confdefs.h $as_echo "#define _GNU_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h # if [[ "x$cache_file" == /dev/null ]] # then # cache_file=config.cache # fi # AC_CACHE_LOAD ac_config_headers="$ac_config_headers src/config.h" { $as_echo "$as_me:${as_lineno-$LINENO}: *** Now configuring FSVS ***" >&5 $as_echo "$as_me: *** Now configuring FSVS ***" >&6;} # Checks for programs. ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ##################################### Header files INCDIRS="/usr/local/include /usr/include /openpkg/include " # The subversion headers do a #include , so the APR libraries # *have* to be directly specified. # Furthermore there's apr-1/ as directory name, depending on apr version. # Is there something like this available for subversion? # Check whether --with-aprinc was given. if test "${with_aprinc+set}" = set; then : withval=$with_aprinc; INCDIRS="$INCDIRS $withval" else if APR=`apr-1-config --includedir || apr-config --includedir` then INCDIRS="$INCDIRS $APR" fi fi # Check whether --with-svninc was given. if test "${with_svninc+set}" = set; then : withval=$with_svninc; INCDIRS="$INCDIRS $withval" fi # Check whether --with-svninc was given. if test "${with_svninc+set}" = set; then : withval=$with_svninc; INCDIRS="$INCDIRS $withval" fi # Check whether --with-waa_md5 was given. if test "${with_waa_md5+set}" = set; then : withval=$with_waa_md5; # The shell gives an error on numeric comparision with a non-numeric # value. # We allow from 3 characters on, although it might not make much # sense. WAA_WC_MD5_CHARS=`perl -e '$_=0+shift; print $_+0 if $_==0 || ($_>3 && $_<=16)' "$withval"` if [ "$WAA_WC_MD5_CHARS" = "" ] then as_fn_error $? "The given value for --with-waa_md5 is invalid." "$LINENO" 5 fi else WAA_WC_MD5_CHARS=0 fi cat >>confdefs.h <<_ACEOF #define WAA_WC_MD5_CHARS $WAA_WC_MD5_CHARS _ACEOF CFLAGS="$CFLAGS -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64" for dir in $INCDIRS do # using -I would result in the files being _non_ system include # directories, ie. they'd clutter the dependency files. # That's why -idirafter is used. CFLAGS="$CFLAGS -idirafter $dir" done cat >>confdefs.h <<_ACEOF #define CFLAGS $CFLAGS _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: \"CFLAGS=$CFLAGS\"" >&5 $as_echo "$as_me: \"CFLAGS=$CFLAGS\"" >&6;} ##################################### Linker LIBDIRS="/usr/local/lib /openpkg/lib" # Check whether --with-aprlib was given. if test "${with_aprlib+set}" = set; then : withval=$with_aprlib; LIBDIRS="$LIBDIRS $withval" fi # Check whether --with-svnlib was given. if test "${with_svnlib+set}" = set; then : withval=$with_svnlib; LIBDIRS="$LIBDIRS $withval" fi for dir in $LIBDIRS do LDFLAGS="$LDFLAGS -L$dir" done cat >>confdefs.h <<_ACEOF #define LDFLAGS $LDFLAGS _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: \"LDFLAGS=$LDFLAGS\"" >&5 $as_echo "$as_me: \"LDFLAGS=$LDFLAGS\"" >&6;} EXTRALIBS="-laprutil-1 -lapr-1" if [ `uname -s` = "SunOS" ] then # Solaris 10, thanks Peter. EXTRALIBS="-lsocket -lnsl $EXTRALIBS" fi if [ `uname -s` = "Darwin" ] then # OSX 10.6 - thanks, Florian. EXTRALIBS="-liconv $EXTRALIBS" have_fmemopen=no fi cat >>confdefs.h <<_ACEOF #define EXTRALIBS $EXTRALIBS _ACEOF ##################################### Checks # Checks for libraries. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcre_compile in -lpcre" >&5 $as_echo_n "checking for pcre_compile in -lpcre... " >&6; } if ${ac_cv_lib_pcre_pcre_compile+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpcre $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pcre_compile (); int main () { return pcre_compile (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pcre_pcre_compile=yes else ac_cv_lib_pcre_pcre_compile=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pcre_pcre_compile" >&5 $as_echo "$ac_cv_lib_pcre_pcre_compile" >&6; } if test "x$ac_cv_lib_pcre_pcre_compile" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBPCRE 1 _ACEOF LIBS="-lpcre $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find PCRE. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for apr_md5_init in -laprutil-1" >&5 $as_echo_n "checking for apr_md5_init in -laprutil-1... " >&6; } if ${ac_cv_lib_aprutil_1_apr_md5_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-laprutil-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char apr_md5_init (); int main () { return apr_md5_init (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_aprutil_1_apr_md5_init=yes else ac_cv_lib_aprutil_1_apr_md5_init=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_aprutil_1_apr_md5_init" >&5 $as_echo "$ac_cv_lib_aprutil_1_apr_md5_init" >&6; } if test "x$ac_cv_lib_aprutil_1_apr_md5_init" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBAPRUTIL_1 1 _ACEOF LIBS="-laprutil-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find APR. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for svn_txdelta_apply in -lsvn_delta-1" >&5 $as_echo_n "checking for svn_txdelta_apply in -lsvn_delta-1... " >&6; } if ${ac_cv_lib_svn_delta_1_svn_txdelta_apply+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvn_delta-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char svn_txdelta_apply (); int main () { return svn_txdelta_apply (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svn_delta_1_svn_txdelta_apply=yes else ac_cv_lib_svn_delta_1_svn_txdelta_apply=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&5 $as_echo "$ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&6; } if test "x$ac_cv_lib_svn_delta_1_svn_txdelta_apply" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSVN_DELTA_1 1 _ACEOF LIBS="-lsvn_delta-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find subversion. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for svn_ra_initialize in -lsvn_ra-1" >&5 $as_echo_n "checking for svn_ra_initialize in -lsvn_ra-1... " >&6; } if ${ac_cv_lib_svn_ra_1_svn_ra_initialize+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvn_ra-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char svn_ra_initialize (); int main () { return svn_ra_initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svn_ra_1_svn_ra_initialize=yes else ac_cv_lib_svn_ra_1_svn_ra_initialize=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svn_ra_1_svn_ra_initialize" >&5 $as_echo "$ac_cv_lib_svn_ra_1_svn_ra_initialize" >&6; } if test "x$ac_cv_lib_svn_ra_1_svn_ra_initialize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSVN_RA_1 1 _ACEOF LIBS="-lsvn_ra-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find subversion. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gdbm_firstkey in -lgdbm" >&5 $as_echo_n "checking for gdbm_firstkey in -lgdbm... " >&6; } if ${ac_cv_lib_gdbm_gdbm_firstkey+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgdbm $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gdbm_firstkey (); int main () { return gdbm_firstkey (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gdbm_gdbm_firstkey=yes else ac_cv_lib_gdbm_gdbm_firstkey=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gdbm_gdbm_firstkey" >&5 $as_echo "$ac_cv_lib_gdbm_gdbm_firstkey" >&6; } if test "x$ac_cv_lib_gdbm_gdbm_firstkey" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGDBM 1 _ACEOF LIBS="-lgdbm $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find gdbm. See \`config.log' for more details" "$LINENO" 5; } fi # Checks for header files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi for ac_header in fcntl.h stddef.h stdlib.h string.h sys/time.h unistd.h pcre.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Needed header file not found. See \`config.log' for more details" "$LINENO" 5; } fi done #apr_file_io.h subversion-1/svn_md5.h]) ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_ac_Header=yes" else eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_ac_Header { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi ac_fn_c_check_member "$LINENO" "struct stat" "st_mtim" "ac_cv_member_struct_stat_st_mtim" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_mtim" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_MTIM 1 _ACEOF fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { VALGRIND_MAKE_MEM_DEFINED(0, 2); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : have_valgrind=yes else have_valgrind=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test x$have_valgrind = xyes ; then $as_echo "#define HAVE_VALGRIND 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No compatible valgrind version." >&5 $as_echo "$as_me: No compatible valgrind version." >&6;} fi # Check whether S_IFMT is dense, ie. a single block of binary ones. # If it isn't, the bitcount wouldn't tell the needed bits to represent the # data. # If S_IFMT is dense, the increment results in a single carry bit. # Checked via changing /usr/include/bits/stat.h. if test "$cross_compiling" = yes; then : { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run test program while cross compiling See \`config.log' for more details" "$LINENO" 5; } else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include "src/preproc.h" int main(int argc, char **args) { if (_BITCOUNT( (S_IFMT >> MODE_T_SHIFT_BITS) + 1) == 1) return 0; else return 1; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: S_IFMT is ok." >&5 $as_echo "$as_me: S_IFMT is ok." >&6;} else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "You have a sparse S_IFMT. Please tell the dev@ mailing list. See \`config.log' for more details" "$LINENO" 5; } fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi for ac_header in linux/kdev_t.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/kdev_t.h" "ac_cv_header_linux_kdev_t_h" "$ac_includes_default" if test "x$ac_cv_header_linux_kdev_t_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LINUX_KDEV_T_H 1 _ACEOF fi done # Check whether --enable-dev-fake was given. if test "${enable_dev_fake+set}" = set; then : enableval=$enable_dev_fake; $as_echo "#define ENABLE_DEV_FAKE 1" >>confdefs.h ENABLE_DEV_FAKE=1 fi # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; $as_echo "#define ENABLE_DEBUG 1" >>confdefs.h ENABLE_DEBUG=1 fi # Check whether --enable-gcov was given. if test "${enable_gcov+set}" = set; then : enableval=$enable_gcov; $as_echo "#define ENABLE_GCOV 1" >>confdefs.h ENABLE_GCOV=1 fi $as_echo "#define ENABLE_GCOV 1" >>confdefs.h cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int i=O_DIRECTORY; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : have_o_directory=yes else have_o_directory=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test x$have_o_directory = xyes ; then $as_echo "#define HAVE_O_DIRECTORY 1" >>confdefs.h fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char **environ; int main(void) { return environ == NULL; } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : need_environ_extern=no else need_environ_extern=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test x$need_environ_extern = xyes ; then $as_echo "#define NEED_ENVIRON_EXTERN 1" >>confdefs.h fi if test x$have_fmemopen = x then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int main(int argc, char *args[]) { return fmemopen(args[0], 2, args[1]) == NULL; } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : have_fmemopen=yes else have_fmemopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi if test x$have_fmemopen = xyes then $as_echo "#define HAVE_FMEMOPEN 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: fmemopen() not found. debug_buffer option not available." >&5 $as_echo "$as_me: WARNING: fmemopen() not found. debug_buffer option not available." >&2;} fi if locale -a > /dev/null 2>&1 then $as_echo "#define HAVE_LOCALES 1" >>confdefs.h fi # Check whether --with-chroot was given. if test "${with_chroot+set}" = set; then : withval=$with_chroot; if test "$withval" = "yes" ; then as_fn_error $? "--with-chroot requires an argument." "$LINENO" 5 else CHROOTER_JAIL=$withval cat >>confdefs.h <<_ACEOF #define CHROOTER_JAIL "$CHROOTER_JAIL" _ACEOF fi fi # Check whether --enable-release was given. if test "${enable_release+set}" = set; then : enableval=$enable_release; $as_echo "#define ENABLE_RELEASE 1" >>confdefs.h ENABLE_RELEASE=1 fi if [ "$ENABLE_RELEASE$ENABLE_DEBUG" = "11" ] then as_fn_error $? "--enable-debug and --enable-release are incompatibel. Use one or the other." "$LINENO" 5 fi for ac_func in getdents64 do : ac_fn_c_check_func "$LINENO" "getdents64" "ac_cv_func_getdents64" if test "x$ac_cv_func_getdents64" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETDENTS64 1 _ACEOF fi done for ac_header in linux/types.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/types.h" "ac_cv_header_linux_types_h" "$ac_includes_default" if test "x$ac_cv_header_linux_types_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LINUX_TYPES_H 1 _ACEOF fi done for ac_header in linux/unistd.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/unistd.h" "ac_cv_header_linux_unistd_h" "$ac_includes_default" if test "x$ac_cv_header_linux_unistd_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LINUX_UNISTD_H 1 _ACEOF fi done ac_fn_c_check_type "$LINENO" "comparison_fn_t" "ac_cv_type_comparison_fn_t" "$ac_includes_default" if test "x$ac_cv_type_comparison_fn_t" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_COMPARISON_FN_T 1 _ACEOF fi # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # Checks for typedefs, structures, and compiler characteristics. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } if ${ac_cv_c_inline+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_inline=$ac_kw fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 $as_echo "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac ac_fn_c_check_member "$LINENO" "struct stat" "st_rdev" "ac_cv_member_struct_stat_st_rdev" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_rdev" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_RDEV 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } if ${ac_cv_header_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { if ((struct tm *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_time=yes else ac_cv_header_time=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then $as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if ${ac_cv_struct_tm+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_tm=time.h else ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then $as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h fi $as_echo "#define HAS_FASTCALL 1" >>confdefs.h # Only i386 (32bit) has fastcall. if [ `uname -m` = i?86 ] then HAS_FASTCALL=1 fi ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t" case $ac_cv_c_uint32_t in #( no|yes) ;; #( *) $as_echo "#define _UINT32_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF #define uint32_t $ac_cv_c_uint32_t _ACEOF ;; esac # See config.h for an explanation. if [ "$ac_cv_c_uint32_t" = "yes" ] then ac_cv_c_uint32_t=uint32_t fi cat >>confdefs.h <<_ACEOF #define AC_CV_C_UINT32_T $ac_cv_c_uint32_t _ACEOF ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t" case $ac_cv_c_uint64_t in #( no|yes) ;; #( *) $as_echo "#define _UINT64_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF #define uint64_t $ac_cv_c_uint64_t _ACEOF ;; esac if [ "$ac_cv_c_uint64_t" = "yes" ] then ac_cv_c_uint64_t=uint64_t fi cat >>confdefs.h <<_ACEOF #define AC_CV_C_UINT64_T $ac_cv_c_uint64_t _ACEOF # Checks for library functions. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if ${ac_cv_type_uid_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then : ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then $as_echo "#define uid_t int" >>confdefs.h $as_echo "#define gid_t int" >>confdefs.h fi for ac_header in unistd.h do : ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default" if test "x$ac_cv_header_unistd_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNISTD_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5 $as_echo_n "checking for working chown... " >&6; } if ${ac_cv_func_chown_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_chown_works=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #include int main () { char *f = "conftest.chown"; struct stat before, after; if (creat (f, 0600) < 0) return 1; if (stat (f, &before) < 0) return 1; if (chown (f, (uid_t) -1, (gid_t) -1) == -1) return 1; if (stat (f, &after) < 0) return 1; return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_chown_works=yes else ac_cv_func_chown_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi rm -f conftest.chown fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5 $as_echo "$ac_cv_func_chown_works" >&6; } if test $ac_cv_func_chown_works = yes; then $as_echo "#define HAVE_CHOWN 1" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" if test "x$ac_cv_type_pid_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define pid_t int _ACEOF fi for ac_header in vfork.h do : ac_fn_c_check_header_mongrel "$LINENO" "vfork.h" "ac_cv_header_vfork_h" "$ac_includes_default" if test "x$ac_cv_header_vfork_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VFORK_H 1 _ACEOF fi done for ac_func in fork vfork do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done if test "x$ac_cv_func_fork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working fork" >&5 $as_echo_n "checking for working fork... " >&6; } if ${ac_cv_func_fork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_fork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* By Ruediger Kuhlmann. */ return fork () < 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_fork_works=yes else ac_cv_func_fork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_fork_works" >&5 $as_echo "$ac_cv_func_fork_works" >&6; } else ac_cv_func_fork_works=$ac_cv_func_fork fi if test "x$ac_cv_func_fork_works" = xcross; then case $host in *-*-amigaos* | *-*-msdosdjgpp*) # Override, as these systems have only a dummy fork() stub ac_cv_func_fork_works=no ;; *) ac_cv_func_fork_works=yes ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&2;} fi ac_cv_func_vfork_works=$ac_cv_func_vfork if test "x$ac_cv_func_vfork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working vfork" >&5 $as_echo_n "checking for working vfork... " >&6; } if ${ac_cv_func_vfork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_vfork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Thanks to Paul Eggert for this test. */ $ac_includes_default #include #ifdef HAVE_VFORK_H # include #endif /* On some sparc systems, changes by the child to local and incoming argument registers are propagated back to the parent. The compiler is told about this with #include , but some compilers (e.g. gcc -O) don't grok . Test for this by using a static variable whose address is put into a register that is clobbered by the vfork. */ static void #ifdef __cplusplus sparc_address_test (int arg) # else sparc_address_test (arg) int arg; #endif { static pid_t child; if (!child) { child = vfork (); if (child < 0) { perror ("vfork"); _exit(2); } if (!child) { arg = getpid(); write(-1, "", 0); _exit (arg); } } } int main () { pid_t parent = getpid (); pid_t child; sparc_address_test (0); child = vfork (); if (child == 0) { /* Here is another test for sparc vfork register problems. This test uses lots of local variables, at least as many local variables as main has allocated so far including compiler temporaries. 4 locals are enough for gcc 1.40.3 on a Solaris 4.1.3 sparc, but we use 8 to be safe. A buggy compiler should reuse the register of parent for one of the local variables, since it will think that parent can't possibly be used any more in this routine. Assigning to the local variable will thus munge parent in the parent process. */ pid_t p = getpid(), p1 = getpid(), p2 = getpid(), p3 = getpid(), p4 = getpid(), p5 = getpid(), p6 = getpid(), p7 = getpid(); /* Convince the compiler that p..p7 are live; otherwise, it might use the same hardware register for all 8 local variables. */ if (p != p1 || p != p2 || p != p3 || p != p4 || p != p5 || p != p6 || p != p7) _exit(1); /* On some systems (e.g. IRIX 3.3), vfork doesn't separate parent from child file descriptors. If the child closes a descriptor before it execs or exits, this munges the parent's descriptor as well. Test for this by closing stdout in the child. */ _exit(close(fileno(stdout)) != 0); } else { int status; struct stat st; while (wait(&status) != child) ; return ( /* Was there some problem with vforking? */ child < 0 /* Did the child fail? (This shouldn't happen.) */ || status /* Did the vfork/compiler bug occur? */ || parent != getpid() /* Did the file descriptor bug occur? */ || fstat(fileno(stdout), &st) != 0 ); } } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_vfork_works=yes else ac_cv_func_vfork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_vfork_works" >&5 $as_echo "$ac_cv_func_vfork_works" >&6; } fi; if test "x$ac_cv_func_fork_works" = xcross; then ac_cv_func_vfork_works=$ac_cv_func_vfork { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&2;} fi if test "x$ac_cv_func_vfork_works" = xyes; then $as_echo "#define HAVE_WORKING_VFORK 1" >>confdefs.h else $as_echo "#define vfork fork" >>confdefs.h fi if test "x$ac_cv_func_fork_works" = xyes; then $as_echo "#define HAVE_WORKING_FORK 1" >>confdefs.h fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } if ${ac_cv_func_malloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_malloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { return ! malloc (0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_malloc_0_nonnull=yes else ac_cv_func_malloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5 $as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } if test $ac_cv_func_malloc_0_nonnull = yes; then : $as_echo "#define HAVE_MALLOC 1" >>confdefs.h else $as_echo "#define HAVE_MALLOC 0" >>confdefs.h case " $LIBOBJS " in *" malloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS malloc.$ac_objext" ;; esac $as_echo "#define malloc rpl_malloc" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working memcmp" >&5 $as_echo_n "checking for working memcmp... " >&6; } if ${ac_cv_func_memcmp_working+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_memcmp_working=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Some versions of memcmp are not 8-bit clean. */ char c0 = '\100', c1 = '\200', c2 = '\201'; if (memcmp(&c0, &c2, 1) >= 0 || memcmp(&c1, &c2, 1) >= 0) return 1; /* The Next x86 OpenStep bug shows up only when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary. William Lewis provided this test program. */ { char foo[21]; char bar[21]; int i; for (i = 0; i < 4; i++) { char *a = foo + i; char *b = bar + i; strcpy (a, "--------01111111"); strcpy (b, "--------10000000"); if (memcmp (a, b, 16) >= 0) return 1; } return 0; } ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_memcmp_working=yes else ac_cv_func_memcmp_working=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_memcmp_working" >&5 $as_echo "$ac_cv_func_memcmp_working" >&6; } test $ac_cv_func_memcmp_working = no && case " $LIBOBJS " in *" memcmp.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS memcmp.$ac_objext" ;; esac for ac_header in $ac_header_list do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in getpagesize do : ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" if test "x$ac_cv_func_getpagesize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETPAGESIZE 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working mmap" >&5 $as_echo_n "checking for working mmap... " >&6; } if ${ac_cv_func_mmap_fixed_mapped+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_mmap_fixed_mapped=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default /* malloc might have been renamed as rpl_malloc. */ #undef malloc /* Thanks to Mike Haertel and Jim Avera for this test. Here is a matrix of mmap possibilities: mmap private not fixed mmap private fixed at somewhere currently unmapped mmap private fixed at somewhere already mapped mmap shared not fixed mmap shared fixed at somewhere currently unmapped mmap shared fixed at somewhere already mapped For private mappings, we should verify that changes cannot be read() back from the file, nor mmap's back from the file at a different address. (There have been systems where private was not correctly implemented like the infamous i386 svr4.0, and systems where the VM page cache was not coherent with the file system buffer cache like early versions of FreeBSD and possibly contemporary NetBSD.) For shared mappings, we should conversely verify that changes get propagated back to all the places they're supposed to be. Grep wants private fixed already mapped. The main things grep needs to know about mmap are: * does it exist and is it safe to write into the mmap'd area * how to use it (BSD variants) */ #include #include #if !defined STDC_HEADERS && !defined HAVE_STDLIB_H char *malloc (); #endif /* This mess was copied from the GNU getpagesize.h. */ #ifndef HAVE_GETPAGESIZE # ifdef _SC_PAGESIZE # define getpagesize() sysconf(_SC_PAGESIZE) # else /* no _SC_PAGESIZE */ # ifdef HAVE_SYS_PARAM_H # include # ifdef EXEC_PAGESIZE # define getpagesize() EXEC_PAGESIZE # else /* no EXEC_PAGESIZE */ # ifdef NBPG # define getpagesize() NBPG * CLSIZE # ifndef CLSIZE # define CLSIZE 1 # endif /* no CLSIZE */ # else /* no NBPG */ # ifdef NBPC # define getpagesize() NBPC # else /* no NBPC */ # ifdef PAGESIZE # define getpagesize() PAGESIZE # endif /* PAGESIZE */ # endif /* no NBPC */ # endif /* no NBPG */ # endif /* no EXEC_PAGESIZE */ # else /* no HAVE_SYS_PARAM_H */ # define getpagesize() 8192 /* punt totally */ # endif /* no HAVE_SYS_PARAM_H */ # endif /* no _SC_PAGESIZE */ #endif /* no HAVE_GETPAGESIZE */ int main () { char *data, *data2, *data3; const char *cdata2; int i, pagesize; int fd, fd2; pagesize = getpagesize (); /* First, make a file with some known garbage in it. */ data = (char *) malloc (pagesize); if (!data) return 1; for (i = 0; i < pagesize; ++i) *(data + i) = rand (); umask (0); fd = creat ("conftest.mmap", 0600); if (fd < 0) return 2; if (write (fd, data, pagesize) != pagesize) return 3; close (fd); /* Next, check that the tail of a page is zero-filled. File must have non-zero length, otherwise we risk SIGBUS for entire page. */ fd2 = open ("conftest.txt", O_RDWR | O_CREAT | O_TRUNC, 0600); if (fd2 < 0) return 4; cdata2 = ""; if (write (fd2, cdata2, 1) != 1) return 5; data2 = (char *) mmap (0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, 0L); if (data2 == MAP_FAILED) return 6; for (i = 0; i < pagesize; ++i) if (*(data2 + i)) return 7; close (fd2); if (munmap (data2, pagesize)) return 8; /* Next, try to mmap the file at a fixed address which already has something else allocated at it. If we can, also make sure that we see the same garbage. */ fd = open ("conftest.mmap", O_RDWR); if (fd < 0) return 9; if (data2 != mmap (data2, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd, 0L)) return 10; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data2 + i)) return 11; /* Finally, make sure that changes to the mapped area do not percolate back to the file as seen by read(). (This is a bug on some variants of i386 svr4.0.) */ for (i = 0; i < pagesize; ++i) *(data2 + i) = *(data2 + i) + 1; data3 = (char *) malloc (pagesize); if (!data3) return 12; if (read (fd, data3, pagesize) != pagesize) return 13; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data3 + i)) return 14; close (fd); return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_mmap_fixed_mapped=yes else ac_cv_func_mmap_fixed_mapped=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_fixed_mapped" >&5 $as_echo "$ac_cv_func_mmap_fixed_mapped" >&6; } if test $ac_cv_func_mmap_fixed_mapped = yes; then $as_echo "#define HAVE_MMAP 1" >>confdefs.h fi rm -f conftest.mmap conftest.txt for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } if ${ac_cv_func_realloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_realloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *realloc (); #endif int main () { return ! realloc (0, 0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_realloc_0_nonnull=yes else ac_cv_func_realloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5 $as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } if test $ac_cv_func_realloc_0_nonnull = yes; then : $as_echo "#define HAVE_REALLOC 1" >>confdefs.h else $as_echo "#define HAVE_REALLOC 0" >>confdefs.h case " $LIBOBJS " in *" realloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS realloc.$ac_objext" ;; esac $as_echo "#define realloc rpl_realloc" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if ${ac_cv_type_signal+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { return *(signal (0, 0)) (0) == 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_type_signal=int else ac_cv_type_signal=void fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF #define RETSIGTYPE $ac_cv_type_signal _ACEOF for ac_func in vprintf do : ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" if test "x$ac_cv_func_vprintf" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VPRINTF 1 _ACEOF ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" if test "x$ac_cv_func__doprnt" = xyes; then : $as_echo "#define HAVE_DOPRNT 1" >>confdefs.h fi fi done for ac_func in fchdir getcwd gettimeofday memmove memset mkdir munmap rmdir strchr strdup strerror strrchr strtoul strtoull alphasort dirfd lchown lutimes strsep do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done # AC_CACHE_SAVE ac_config_files="$ac_config_files src/Makefile tests/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by fsvs $as_me , which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ fsvs config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "src/config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/config.h" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi # Cause a recompile touch src/config.h if [ "$ac_cv_header_linux_kdev_t_h" = "no" -a "x$ENABLE_DEV_FAKE" = "x" ] then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: * MAJOR(), MINOR() and MAKEDEV() definitions not found. * Fake a definition, but that could make problems for ignore patterns * and commits/updates of device nodes, so these will be disabled. * Please contact dev@fsvs.tigris.org for help, or, if you know your * systems' way, to report the correct header name. * * If you *really* need to use device compares, and have *no* other way, * you could try using the --enable-dev-fake option on ./configure." >&5 $as_echo "$as_me: WARNING: * MAJOR(), MINOR() and MAKEDEV() definitions not found. * Fake a definition, but that could make problems for ignore patterns * and commits/updates of device nodes, so these will be disabled. * Please contact dev@fsvs.tigris.org for help, or, if you know your * systems' way, to report the correct header name. * * If you *really* need to use device compares, and have *no* other way, * you could try using the --enable-dev-fake option on ./configure." >&2;} fi # vi: ts=3 sw=3 fsvs-1.2.6/autom4te.cache/0000755000202400020240000000000012554717236014267 5ustar marekmarekfsvs-1.2.6/autom4te.cache/output.00000644000202400020240000065111512554717236015721 0ustar marekmarek@%:@! /bin/sh @%:@ Guess values for system-dependent variables and create Makefiles. @%:@ Generated by GNU Autoconf 2.69 for fsvs . @%:@ @%:@ Report bugs to . @%:@ @%:@ @%:@ Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @%:@ @%:@ @%:@ This configure script is free software; the Free Software Foundation @%:@ gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in @%:@( *posix*) : set -o posix ;; @%:@( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in @%:@( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in @%:@(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in @%:@ (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in @%:@( *posix*) : set -o posix ;; @%:@( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in @%:@( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in @%:@ (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and $0: http://fsvs.tigris.org/ about your system, including $0: any error possibly output before this message. Then $0: install a modern shell, or manually run the script $0: under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## @%:@ as_fn_unset VAR @%:@ --------------- @%:@ Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset @%:@ as_fn_set_status STATUS @%:@ ----------------------- @%:@ Set @S|@? to STATUS, without forking. as_fn_set_status () { return $1 } @%:@ as_fn_set_status @%:@ as_fn_exit STATUS @%:@ ----------------- @%:@ Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } @%:@ as_fn_exit @%:@ as_fn_mkdir_p @%:@ ------------- @%:@ Create "@S|@as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } @%:@ as_fn_mkdir_p @%:@ as_fn_executable_p FILE @%:@ ----------------------- @%:@ Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } @%:@ as_fn_executable_p @%:@ as_fn_append VAR VALUE @%:@ ---------------------- @%:@ Append the text in VALUE to the end of the definition contained in VAR. Take @%:@ advantage of any shell optimizations that allow amortized linear growth over @%:@ repeated appends, instead of the typical quadratic growth present in naive @%:@ implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append @%:@ as_fn_arith ARG... @%:@ ------------------ @%:@ Perform arithmetic evaluation on the ARGs, and store the result in the @%:@ global @S|@as_val. Take advantage of shells that can avoid forks. The arguments @%:@ must be portable across @S|@(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith @%:@ as_fn_error STATUS ERROR [LINENO LOG_FD] @%:@ ---------------------------------------- @%:@ Output "`basename @S|@0`: error: ERROR" to stderr. If LINENO and LOG_FD are @%:@ provided, also output the error to LOG_FD, referencing LINENO. Then exit the @%:@ script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } @%:@ as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in @%:@((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIB@&t@OBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='fsvs' PACKAGE_TARNAME='fsvs' PACKAGE_VERSION='' PACKAGE_STRING='fsvs ' PACKAGE_BUGREPORT='http://fsvs.tigris.org/' PACKAGE_URL='' # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_unique_file="src/actions.c" ac_header_list= ac_subst_vars='LTLIBOBJS LIB@&t@OBJS HAVE_UINT64_T HAVE_UINT32_T HAS_FASTCALL ENABLE_RELEASE CHROOTER_JAIL HAVE_LOCALES HAVE_FMEMOPEN NEED_ENVIRON_EXTERN HAVE_O_DIRECTORY ENABLE_GCOV ENABLE_DEBUG ENABLE_DEV_FAKE EXTRALIBS WAA_WC_MD5_CHARS EGREP GREP CPP OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking with_aprinc with_svninc with_waa_md5 with_aprlib with_svnlib enable_dev_fake enable_debug enable_gcov with_chroot enable_release enable_largefile ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures fsvs to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX @<:@@S|@ac_default_prefix@:>@ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX @<:@PREFIX@:>@ By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root @<:@DATAROOTDIR/doc/fsvs@:>@ --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of fsvs :";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-dev-fake Include fake definitions for MAJOR(), MINOR() and MKDEV(). Needed if none found. --enable-debug compile some extra debug checks in (valgrind, gdb) (default is no) --enable-gcov whether to compile with instrumentation for gcov (default is no) (needs --enable-debug) --enable-release whether to compile without debug messages. Makes image smaller (to about half size), but makes -d and -D inoperative. (Default is no) --disable-largefile omit support for large files Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-aprinc=PATH Specify an include directory for the APR headers. --with-svninc=PATH Specify an include directory for the subversion headers. --with-waa_md5=NUMBER Specifies how many hex characters of the MD5 of the working copy root should be used to address the data in the WAA. This may be increased if you have a lot of different working copies on a single machine. The default is 0; useful values are 0, and from 6 to 32. --with-aprlib=PATH Specify a directory containing APR libraries. --with-svnlib=PATH Specify a directory containing subversion libraries. --with-chroot=PATH Specify a chroot environment for the fsvs-chrooter helper. Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF fsvs configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## @%:@ ac_fn_c_try_compile LINENO @%:@ -------------------------- @%:@ Try to compile conftest.@S|@ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } @%:@ ac_fn_c_try_compile @%:@ ac_fn_c_try_cpp LINENO @%:@ ---------------------- @%:@ Try to preprocess conftest.@S|@ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } @%:@ ac_fn_c_try_cpp @%:@ ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES @%:@ ------------------------------------------------------- @%:@ Tests whether HEADER exists, giving a warning if it cannot be compiled using @%:@ the include files in INCLUDES and setting the cache variable VAR @%:@ accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 @%:@include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## -------------------------------------- ## ## Report this to http://fsvs.tigris.org/ ## ## -------------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_check_header_mongrel @%:@ ac_fn_c_try_run LINENO @%:@ ---------------------- @%:@ Try to link conftest.@S|@ac_ext, and return whether this succeeded. Assumes @%:@ that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } @%:@ ac_fn_c_try_run @%:@ ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES @%:@ ------------------------------------------------------- @%:@ Tests whether HEADER exists and can be compiled using the include files in @%:@ INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 @%:@include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_check_header_compile @%:@ ac_fn_c_try_link LINENO @%:@ ----------------------- @%:@ Try to link conftest.@S|@ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } @%:@ ac_fn_c_try_link @%:@ ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES @%:@ ---------------------------------------------------- @%:@ Tries to find if the field MEMBER exists in type AGGR, after including @%:@ INCLUDES, setting cache variable VAR accordingly. ac_fn_c_check_member () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 $as_echo_n "checking for $2.$3... " >&6; } if eval \${$4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (sizeof ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else eval "$4=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$4 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_check_member @%:@ ac_fn_c_check_func LINENO FUNC VAR @%:@ ---------------------------------- @%:@ Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_check_func @%:@ ac_fn_c_check_type LINENO TYPE VAR INCLUDES @%:@ ------------------------------------------- @%:@ Tests whether TYPE exists after having included INCLUDES, setting cache @%:@ variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_check_type @%:@ ac_fn_c_find_uintX_t LINENO BITS VAR @%:@ ------------------------------------ @%:@ Finds an unsigned integer type with width BITS, setting cache variable VAR @%:@ accordingly. ac_fn_c_find_uintX_t () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 $as_echo_n "checking for uint$2_t... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" # Order is important - never check a type that is potentially smaller # than half of the expected target width. for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { static int test_array @<:@1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)@:>@; test_array @<:@0@:>@ = 0; return test_array @<:@0@:>@; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : case $ac_type in @%:@( uint$2_t) : eval "$3=yes" ;; @%:@( *) : eval "$3=\$ac_type" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if eval test \"x\$"$3"\" = x"no"; then : else break fi done fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } @%:@ ac_fn_c_find_uintX_t cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by fsvs $as_me , which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF @%:@define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in @%:@(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi as_fn_append ac_header_list " stdlib.h" as_fn_append ac_header_list " unistd.h" as_fn_append ac_header_list " sys/param.h" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $@%:@ != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@ifdef __STDC__ @%:@ include @%:@else @%:@ include @%:@endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@ifdef __STDC__ @%:@ include @%:@else @%:@ include @%:@endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "@%:@define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" if test "x$ac_cv_header_minix_config_h" = xyes; then : MINIX=yes else MINIX= fi if test "$MINIX" = yes; then $as_echo "@%:@define _POSIX_SOURCE 1" >>confdefs.h $as_echo "@%:@define _POSIX_1_SOURCE 2" >>confdefs.h $as_echo "@%:@define _MINIX 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 $as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } if ${ac_cv_safe_to_define___extensions__+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ # define __EXTENSIONS__ 1 $ac_includes_default int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_safe_to_define___extensions__=yes else ac_cv_safe_to_define___extensions__=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 $as_echo "$ac_cv_safe_to_define___extensions__" >&6; } test $ac_cv_safe_to_define___extensions__ = yes && $as_echo "@%:@define __EXTENSIONS__ 1" >>confdefs.h $as_echo "@%:@define _ALL_SOURCE 1" >>confdefs.h $as_echo "@%:@define _GNU_SOURCE 1" >>confdefs.h $as_echo "@%:@define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h $as_echo "@%:@define _TANDEM_SOURCE 1" >>confdefs.h # if [[ "x$cache_file" == /dev/null ]] # then # cache_file=config.cache # fi # AC_CACHE_LOAD ac_config_headers="$ac_config_headers src/config.h" { $as_echo "$as_me:${as_lineno-$LINENO}: *** Now configuring FSVS ***" >&5 $as_echo "$as_me: *** Now configuring FSVS ***" >&6;} # Checks for programs. ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $@%:@ != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@ifdef __STDC__ @%:@ include @%:@else @%:@ include @%:@endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@ifdef __STDC__ @%:@ include @%:@else @%:@ include @%:@endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ##################################### Header files INCDIRS="/usr/local/include /usr/include /openpkg/include " # The subversion headers do a #include , so the APR libraries # *have* to be directly specified. # Furthermore there's apr-1/ as directory name, depending on apr version. # Is there something like this available for subversion? @%:@ Check whether --with-aprinc was given. if test "${with_aprinc+set}" = set; then : withval=$with_aprinc; INCDIRS="$INCDIRS $withval" else if APR=`apr-1-config --includedir || apr-config --includedir` then INCDIRS="$INCDIRS $APR" fi fi @%:@ Check whether --with-svninc was given. if test "${with_svninc+set}" = set; then : withval=$with_svninc; INCDIRS="$INCDIRS $withval" fi @%:@ Check whether --with-svninc was given. if test "${with_svninc+set}" = set; then : withval=$with_svninc; INCDIRS="$INCDIRS $withval" fi @%:@ Check whether --with-waa_md5 was given. if test "${with_waa_md5+set}" = set; then : withval=$with_waa_md5; # The shell gives an error on numeric comparision with a non-numeric # value. # We allow from 3 characters on, although it might not make much # sense. WAA_WC_MD5_CHARS=`perl -e '$_=0+shift; print $_+0 if $_==0 || ($_>3 && $_<=16)' "$withval"` if [ "$WAA_WC_MD5_CHARS" = "" ] then as_fn_error $? "The given value for --with-waa_md5 is invalid." "$LINENO" 5 fi else WAA_WC_MD5_CHARS=0 fi cat >>confdefs.h <<_ACEOF @%:@define WAA_WC_MD5_CHARS $WAA_WC_MD5_CHARS _ACEOF CFLAGS="$CFLAGS -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64" for dir in $INCDIRS do # using -I would result in the files being _non_ system include # directories, ie. they'd clutter the dependency files. # That's why -idirafter is used. CFLAGS="$CFLAGS -idirafter $dir" done cat >>confdefs.h <<_ACEOF @%:@define CFLAGS $CFLAGS _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: \"CFLAGS=$CFLAGS\"" >&5 $as_echo "$as_me: \"CFLAGS=$CFLAGS\"" >&6;} ##################################### Linker LIBDIRS="/usr/local/lib /openpkg/lib" @%:@ Check whether --with-aprlib was given. if test "${with_aprlib+set}" = set; then : withval=$with_aprlib; LIBDIRS="$LIBDIRS $withval" fi @%:@ Check whether --with-svnlib was given. if test "${with_svnlib+set}" = set; then : withval=$with_svnlib; LIBDIRS="$LIBDIRS $withval" fi for dir in $LIBDIRS do LDFLAGS="$LDFLAGS -L$dir" done cat >>confdefs.h <<_ACEOF @%:@define LDFLAGS $LDFLAGS _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: \"LDFLAGS=$LDFLAGS\"" >&5 $as_echo "$as_me: \"LDFLAGS=$LDFLAGS\"" >&6;} EXTRALIBS="-laprutil-1 -lapr-1" if [ `uname -s` = "SunOS" ] then # Solaris 10, thanks Peter. EXTRALIBS="-lsocket -lnsl $EXTRALIBS" fi if [ `uname -s` = "Darwin" ] then # OSX 10.6 - thanks, Florian. EXTRALIBS="-liconv $EXTRALIBS" have_fmemopen=no fi cat >>confdefs.h <<_ACEOF @%:@define EXTRALIBS $EXTRALIBS _ACEOF ##################################### Checks # Checks for libraries. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcre_compile in -lpcre" >&5 $as_echo_n "checking for pcre_compile in -lpcre... " >&6; } if ${ac_cv_lib_pcre_pcre_compile+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpcre $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pcre_compile (); int main () { return pcre_compile (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pcre_pcre_compile=yes else ac_cv_lib_pcre_pcre_compile=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pcre_pcre_compile" >&5 $as_echo "$ac_cv_lib_pcre_pcre_compile" >&6; } if test "x$ac_cv_lib_pcre_pcre_compile" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LIBPCRE 1 _ACEOF LIBS="-lpcre $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find PCRE. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for apr_md5_init in -laprutil-1" >&5 $as_echo_n "checking for apr_md5_init in -laprutil-1... " >&6; } if ${ac_cv_lib_aprutil_1_apr_md5_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-laprutil-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char apr_md5_init (); int main () { return apr_md5_init (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_aprutil_1_apr_md5_init=yes else ac_cv_lib_aprutil_1_apr_md5_init=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_aprutil_1_apr_md5_init" >&5 $as_echo "$ac_cv_lib_aprutil_1_apr_md5_init" >&6; } if test "x$ac_cv_lib_aprutil_1_apr_md5_init" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LIBAPRUTIL_1 1 _ACEOF LIBS="-laprutil-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find APR. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for svn_txdelta_apply in -lsvn_delta-1" >&5 $as_echo_n "checking for svn_txdelta_apply in -lsvn_delta-1... " >&6; } if ${ac_cv_lib_svn_delta_1_svn_txdelta_apply+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvn_delta-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char svn_txdelta_apply (); int main () { return svn_txdelta_apply (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svn_delta_1_svn_txdelta_apply=yes else ac_cv_lib_svn_delta_1_svn_txdelta_apply=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&5 $as_echo "$ac_cv_lib_svn_delta_1_svn_txdelta_apply" >&6; } if test "x$ac_cv_lib_svn_delta_1_svn_txdelta_apply" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LIBSVN_DELTA_1 1 _ACEOF LIBS="-lsvn_delta-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find subversion. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for svn_ra_initialize in -lsvn_ra-1" >&5 $as_echo_n "checking for svn_ra_initialize in -lsvn_ra-1... " >&6; } if ${ac_cv_lib_svn_ra_1_svn_ra_initialize+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvn_ra-1 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char svn_ra_initialize (); int main () { return svn_ra_initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svn_ra_1_svn_ra_initialize=yes else ac_cv_lib_svn_ra_1_svn_ra_initialize=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svn_ra_1_svn_ra_initialize" >&5 $as_echo "$ac_cv_lib_svn_ra_1_svn_ra_initialize" >&6; } if test "x$ac_cv_lib_svn_ra_1_svn_ra_initialize" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LIBSVN_RA_1 1 _ACEOF LIBS="-lsvn_ra-1 $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find subversion. See \`config.log' for more details" "$LINENO" 5; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gdbm_firstkey in -lgdbm" >&5 $as_echo_n "checking for gdbm_firstkey in -lgdbm... " >&6; } if ${ac_cv_lib_gdbm_gdbm_firstkey+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgdbm $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gdbm_firstkey (); int main () { return gdbm_firstkey (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gdbm_gdbm_firstkey=yes else ac_cv_lib_gdbm_gdbm_firstkey=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gdbm_gdbm_firstkey" >&5 $as_echo "$ac_cv_lib_gdbm_gdbm_firstkey" >&6; } if test "x$ac_cv_lib_gdbm_gdbm_firstkey" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LIBGDBM 1 _ACEOF LIBS="-lgdbm $LIBS" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Sorry, can't find gdbm. See \`config.log' for more details" "$LINENO" 5; } fi # Checks for header files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "@%:@define STDC_HEADERS 1" >>confdefs.h fi for ac_header in fcntl.h stddef.h stdlib.h string.h sys/time.h unistd.h pcre.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Needed header file not found. See \`config.log' for more details" "$LINENO" 5; } fi done #apr_file_io.h subversion-1/svn_md5.h]) ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_ac_Header=yes" else eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_ac_Header { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi ac_fn_c_check_member "$LINENO" "struct stat" "st_mtim" "ac_cv_member_struct_stat_st_mtim" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_mtim" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_STRUCT_STAT_ST_MTIM 1 _ACEOF fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { VALGRIND_MAKE_MEM_DEFINED(0, 2); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : have_valgrind=yes else have_valgrind=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test x$have_valgrind = xyes ; then $as_echo "@%:@define HAVE_VALGRIND 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No compatible valgrind version." >&5 $as_echo "$as_me: No compatible valgrind version." >&6;} fi # Check whether S_IFMT is dense, ie. a single block of binary ones. # If it isn't, the bitcount wouldn't tell the needed bits to represent the # data. # If S_IFMT is dense, the increment results in a single carry bit. # Checked via changing /usr/include/bits/stat.h. if test "$cross_compiling" = yes; then : { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run test program while cross compiling See \`config.log' for more details" "$LINENO" 5; } else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include "src/preproc.h" int main(int argc, char **args) { if (_BITCOUNT( (S_IFMT >> MODE_T_SHIFT_BITS) + 1) == 1) return 0; else return 1; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: S_IFMT is ok." >&5 $as_echo "$as_me: S_IFMT is ok." >&6;} else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "You have a sparse S_IFMT. Please tell the dev@ mailing list. See \`config.log' for more details" "$LINENO" 5; } fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi for ac_header in linux/kdev_t.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/kdev_t.h" "ac_cv_header_linux_kdev_t_h" "$ac_includes_default" if test "x$ac_cv_header_linux_kdev_t_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LINUX_KDEV_T_H 1 _ACEOF fi done @%:@ Check whether --enable-dev-fake was given. if test "${enable_dev_fake+set}" = set; then : enableval=$enable_dev_fake; $as_echo "@%:@define ENABLE_DEV_FAKE 1" >>confdefs.h ENABLE_DEV_FAKE=1 fi @%:@ Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; $as_echo "@%:@define ENABLE_DEBUG 1" >>confdefs.h ENABLE_DEBUG=1 fi @%:@ Check whether --enable-gcov was given. if test "${enable_gcov+set}" = set; then : enableval=$enable_gcov; $as_echo "@%:@define ENABLE_GCOV 1" >>confdefs.h ENABLE_GCOV=1 fi $as_echo "@%:@define ENABLE_GCOV 1" >>confdefs.h cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int i=O_DIRECTORY; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : have_o_directory=yes else have_o_directory=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test x$have_o_directory = xyes ; then $as_echo "@%:@define HAVE_O_DIRECTORY 1" >>confdefs.h fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char **environ; int main(void) { return environ == NULL; } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : need_environ_extern=no else need_environ_extern=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test x$need_environ_extern = xyes ; then $as_echo "@%:@define NEED_ENVIRON_EXTERN 1" >>confdefs.h fi if test x$have_fmemopen = x then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int main(int argc, char *args[]) { return fmemopen(args[0], 2, args[1]) == NULL; } ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : have_fmemopen=yes else have_fmemopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi if test x$have_fmemopen = xyes then $as_echo "@%:@define HAVE_FMEMOPEN 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: fmemopen() not found. debug_buffer option not available." >&5 $as_echo "$as_me: WARNING: fmemopen() not found. debug_buffer option not available." >&2;} fi if locale -a > /dev/null 2>&1 then $as_echo "@%:@define HAVE_LOCALES 1" >>confdefs.h fi @%:@ Check whether --with-chroot was given. if test "${with_chroot+set}" = set; then : withval=$with_chroot; if test "$withval" = "yes" ; then as_fn_error $? "--with-chroot requires an argument." "$LINENO" 5 else CHROOTER_JAIL=$withval cat >>confdefs.h <<_ACEOF @%:@define CHROOTER_JAIL "$CHROOTER_JAIL" _ACEOF fi fi @%:@ Check whether --enable-release was given. if test "${enable_release+set}" = set; then : enableval=$enable_release; $as_echo "@%:@define ENABLE_RELEASE 1" >>confdefs.h ENABLE_RELEASE=1 fi if [ "$ENABLE_RELEASE$ENABLE_DEBUG" = "11" ] then as_fn_error $? "--enable-debug and --enable-release are incompatibel. Use one or the other." "$LINENO" 5 fi for ac_func in getdents64 do : ac_fn_c_check_func "$LINENO" "getdents64" "ac_cv_func_getdents64" if test "x$ac_cv_func_getdents64" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_GETDENTS64 1 _ACEOF fi done for ac_header in linux/types.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/types.h" "ac_cv_header_linux_types_h" "$ac_includes_default" if test "x$ac_cv_header_linux_types_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LINUX_TYPES_H 1 _ACEOF fi done for ac_header in linux/unistd.h do : ac_fn_c_check_header_mongrel "$LINENO" "linux/unistd.h" "ac_cv_header_linux_unistd_h" "$ac_includes_default" if test "x$ac_cv_header_linux_unistd_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_LINUX_UNISTD_H 1 _ACEOF fi done ac_fn_c_check_type "$LINENO" "comparison_fn_t" "ac_cv_type_comparison_fn_t" "$ac_includes_default" if test "x$ac_cv_type_comparison_fn_t" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_COMPARISON_FN_T 1 _ACEOF fi @%:@ Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ @%:@define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ @%:@define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@define _FILE_OFFSET_BITS 64 @%:@include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ @%:@define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF @%:@define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ @%:@define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @%:@define _LARGE_FILES 1 @%:@include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ @%:@define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF @%:@define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # Checks for typedefs, structures, and compiler characteristics. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "@%:@define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } if ${ac_cv_c_inline+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_inline=$ac_kw fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 $as_echo "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac ac_fn_c_check_member "$LINENO" "struct stat" "st_rdev" "ac_cv_member_struct_stat_st_rdev" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_rdev" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_STRUCT_STAT_ST_RDEV 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } if ${ac_cv_header_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { if ((struct tm *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_time=yes else ac_cv_header_time=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then $as_echo "@%:@define TIME_WITH_SYS_TIME 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if ${ac_cv_struct_tm+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_tm=time.h else ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then $as_echo "@%:@define TM_IN_SYS_TIME 1" >>confdefs.h fi $as_echo "@%:@define HAS_FASTCALL 1" >>confdefs.h # Only i386 (32bit) has fastcall. if [ `uname -m` = i?86 ] then HAS_FASTCALL=1 fi ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t" case $ac_cv_c_uint32_t in #( no|yes) ;; #( *) $as_echo "@%:@define _UINT32_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF @%:@define uint32_t $ac_cv_c_uint32_t _ACEOF ;; esac # See config.h for an explanation. if [ "$ac_cv_c_uint32_t" = "yes" ] then ac_cv_c_uint32_t=uint32_t fi cat >>confdefs.h <<_ACEOF @%:@define AC_CV_C_UINT32_T $ac_cv_c_uint32_t _ACEOF ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t" case $ac_cv_c_uint64_t in #( no|yes) ;; #( *) $as_echo "@%:@define _UINT64_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF @%:@define uint64_t $ac_cv_c_uint64_t _ACEOF ;; esac if [ "$ac_cv_c_uint64_t" = "yes" ] then ac_cv_c_uint64_t=uint64_t fi cat >>confdefs.h <<_ACEOF @%:@define AC_CV_C_UINT64_T $ac_cv_c_uint64_t _ACEOF # Checks for library functions. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if ${ac_cv_type_uid_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then : ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then $as_echo "@%:@define uid_t int" >>confdefs.h $as_echo "@%:@define gid_t int" >>confdefs.h fi for ac_header in unistd.h do : ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default" if test "x$ac_cv_header_unistd_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_UNISTD_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5 $as_echo_n "checking for working chown... " >&6; } if ${ac_cv_func_chown_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_chown_works=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #include int main () { char *f = "conftest.chown"; struct stat before, after; if (creat (f, 0600) < 0) return 1; if (stat (f, &before) < 0) return 1; if (chown (f, (uid_t) -1, (gid_t) -1) == -1) return 1; if (stat (f, &after) < 0) return 1; return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_chown_works=yes else ac_cv_func_chown_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi rm -f conftest.chown fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5 $as_echo "$ac_cv_func_chown_works" >&6; } if test $ac_cv_func_chown_works = yes; then $as_echo "@%:@define HAVE_CHOWN 1" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" if test "x$ac_cv_type_pid_t" = xyes; then : else cat >>confdefs.h <<_ACEOF @%:@define pid_t int _ACEOF fi for ac_header in vfork.h do : ac_fn_c_check_header_mongrel "$LINENO" "vfork.h" "ac_cv_header_vfork_h" "$ac_includes_default" if test "x$ac_cv_header_vfork_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_VFORK_H 1 _ACEOF fi done for ac_func in fork vfork do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done if test "x$ac_cv_func_fork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working fork" >&5 $as_echo_n "checking for working fork... " >&6; } if ${ac_cv_func_fork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_fork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* By Ruediger Kuhlmann. */ return fork () < 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_fork_works=yes else ac_cv_func_fork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_fork_works" >&5 $as_echo "$ac_cv_func_fork_works" >&6; } else ac_cv_func_fork_works=$ac_cv_func_fork fi if test "x$ac_cv_func_fork_works" = xcross; then case $host in *-*-amigaos* | *-*-msdosdjgpp*) # Override, as these systems have only a dummy fork() stub ac_cv_func_fork_works=no ;; *) ac_cv_func_fork_works=yes ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&2;} fi ac_cv_func_vfork_works=$ac_cv_func_vfork if test "x$ac_cv_func_vfork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working vfork" >&5 $as_echo_n "checking for working vfork... " >&6; } if ${ac_cv_func_vfork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_vfork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Thanks to Paul Eggert for this test. */ $ac_includes_default #include #ifdef HAVE_VFORK_H # include #endif /* On some sparc systems, changes by the child to local and incoming argument registers are propagated back to the parent. The compiler is told about this with #include , but some compilers (e.g. gcc -O) don't grok . Test for this by using a static variable whose address is put into a register that is clobbered by the vfork. */ static void #ifdef __cplusplus sparc_address_test (int arg) # else sparc_address_test (arg) int arg; #endif { static pid_t child; if (!child) { child = vfork (); if (child < 0) { perror ("vfork"); _exit(2); } if (!child) { arg = getpid(); write(-1, "", 0); _exit (arg); } } } int main () { pid_t parent = getpid (); pid_t child; sparc_address_test (0); child = vfork (); if (child == 0) { /* Here is another test for sparc vfork register problems. This test uses lots of local variables, at least as many local variables as main has allocated so far including compiler temporaries. 4 locals are enough for gcc 1.40.3 on a Solaris 4.1.3 sparc, but we use 8 to be safe. A buggy compiler should reuse the register of parent for one of the local variables, since it will think that parent can't possibly be used any more in this routine. Assigning to the local variable will thus munge parent in the parent process. */ pid_t p = getpid(), p1 = getpid(), p2 = getpid(), p3 = getpid(), p4 = getpid(), p5 = getpid(), p6 = getpid(), p7 = getpid(); /* Convince the compiler that p..p7 are live; otherwise, it might use the same hardware register for all 8 local variables. */ if (p != p1 || p != p2 || p != p3 || p != p4 || p != p5 || p != p6 || p != p7) _exit(1); /* On some systems (e.g. IRIX 3.3), vfork doesn't separate parent from child file descriptors. If the child closes a descriptor before it execs or exits, this munges the parent's descriptor as well. Test for this by closing stdout in the child. */ _exit(close(fileno(stdout)) != 0); } else { int status; struct stat st; while (wait(&status) != child) ; return ( /* Was there some problem with vforking? */ child < 0 /* Did the child fail? (This shouldn't happen.) */ || status /* Did the vfork/compiler bug occur? */ || parent != getpid() /* Did the file descriptor bug occur? */ || fstat(fileno(stdout), &st) != 0 ); } } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_vfork_works=yes else ac_cv_func_vfork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_vfork_works" >&5 $as_echo "$ac_cv_func_vfork_works" >&6; } fi; if test "x$ac_cv_func_fork_works" = xcross; then ac_cv_func_vfork_works=$ac_cv_func_vfork { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&2;} fi if test "x$ac_cv_func_vfork_works" = xyes; then $as_echo "@%:@define HAVE_WORKING_VFORK 1" >>confdefs.h else $as_echo "@%:@define vfork fork" >>confdefs.h fi if test "x$ac_cv_func_fork_works" = xyes; then $as_echo "@%:@define HAVE_WORKING_FORK 1" >>confdefs.h fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } if ${ac_cv_func_malloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_malloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { return ! malloc (0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_malloc_0_nonnull=yes else ac_cv_func_malloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5 $as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } if test $ac_cv_func_malloc_0_nonnull = yes; then : $as_echo "@%:@define HAVE_MALLOC 1" >>confdefs.h else $as_echo "@%:@define HAVE_MALLOC 0" >>confdefs.h case " $LIB@&t@OBJS " in *" malloc.$ac_objext "* ) ;; *) LIB@&t@OBJS="$LIB@&t@OBJS malloc.$ac_objext" ;; esac $as_echo "@%:@define malloc rpl_malloc" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working memcmp" >&5 $as_echo_n "checking for working memcmp... " >&6; } if ${ac_cv_func_memcmp_working+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_memcmp_working=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Some versions of memcmp are not 8-bit clean. */ char c0 = '\100', c1 = '\200', c2 = '\201'; if (memcmp(&c0, &c2, 1) >= 0 || memcmp(&c1, &c2, 1) >= 0) return 1; /* The Next x86 OpenStep bug shows up only when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary. William Lewis provided this test program. */ { char foo[21]; char bar[21]; int i; for (i = 0; i < 4; i++) { char *a = foo + i; char *b = bar + i; strcpy (a, "--------01111111"); strcpy (b, "--------10000000"); if (memcmp (a, b, 16) >= 0) return 1; } return 0; } ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_memcmp_working=yes else ac_cv_func_memcmp_working=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_memcmp_working" >&5 $as_echo "$ac_cv_func_memcmp_working" >&6; } test $ac_cv_func_memcmp_working = no && case " $LIB@&t@OBJS " in *" memcmp.$ac_objext "* ) ;; *) LIB@&t@OBJS="$LIB@&t@OBJS memcmp.$ac_objext" ;; esac for ac_header in $ac_header_list do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in getpagesize do : ac_fn_c_check_func "$LINENO" "getpagesize" "ac_cv_func_getpagesize" if test "x$ac_cv_func_getpagesize" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_GETPAGESIZE 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working mmap" >&5 $as_echo_n "checking for working mmap... " >&6; } if ${ac_cv_func_mmap_fixed_mapped+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_mmap_fixed_mapped=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default /* malloc might have been renamed as rpl_malloc. */ #undef malloc /* Thanks to Mike Haertel and Jim Avera for this test. Here is a matrix of mmap possibilities: mmap private not fixed mmap private fixed at somewhere currently unmapped mmap private fixed at somewhere already mapped mmap shared not fixed mmap shared fixed at somewhere currently unmapped mmap shared fixed at somewhere already mapped For private mappings, we should verify that changes cannot be read() back from the file, nor mmap's back from the file at a different address. (There have been systems where private was not correctly implemented like the infamous i386 svr4.0, and systems where the VM page cache was not coherent with the file system buffer cache like early versions of FreeBSD and possibly contemporary NetBSD.) For shared mappings, we should conversely verify that changes get propagated back to all the places they're supposed to be. Grep wants private fixed already mapped. The main things grep needs to know about mmap are: * does it exist and is it safe to write into the mmap'd area * how to use it (BSD variants) */ #include #include #if !defined STDC_HEADERS && !defined HAVE_STDLIB_H char *malloc (); #endif /* This mess was copied from the GNU getpagesize.h. */ #ifndef HAVE_GETPAGESIZE # ifdef _SC_PAGESIZE # define getpagesize() sysconf(_SC_PAGESIZE) # else /* no _SC_PAGESIZE */ # ifdef HAVE_SYS_PARAM_H # include # ifdef EXEC_PAGESIZE # define getpagesize() EXEC_PAGESIZE # else /* no EXEC_PAGESIZE */ # ifdef NBPG # define getpagesize() NBPG * CLSIZE # ifndef CLSIZE # define CLSIZE 1 # endif /* no CLSIZE */ # else /* no NBPG */ # ifdef NBPC # define getpagesize() NBPC # else /* no NBPC */ # ifdef PAGESIZE # define getpagesize() PAGESIZE # endif /* PAGESIZE */ # endif /* no NBPC */ # endif /* no NBPG */ # endif /* no EXEC_PAGESIZE */ # else /* no HAVE_SYS_PARAM_H */ # define getpagesize() 8192 /* punt totally */ # endif /* no HAVE_SYS_PARAM_H */ # endif /* no _SC_PAGESIZE */ #endif /* no HAVE_GETPAGESIZE */ int main () { char *data, *data2, *data3; const char *cdata2; int i, pagesize; int fd, fd2; pagesize = getpagesize (); /* First, make a file with some known garbage in it. */ data = (char *) malloc (pagesize); if (!data) return 1; for (i = 0; i < pagesize; ++i) *(data + i) = rand (); umask (0); fd = creat ("conftest.mmap", 0600); if (fd < 0) return 2; if (write (fd, data, pagesize) != pagesize) return 3; close (fd); /* Next, check that the tail of a page is zero-filled. File must have non-zero length, otherwise we risk SIGBUS for entire page. */ fd2 = open ("conftest.txt", O_RDWR | O_CREAT | O_TRUNC, 0600); if (fd2 < 0) return 4; cdata2 = ""; if (write (fd2, cdata2, 1) != 1) return 5; data2 = (char *) mmap (0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, 0L); if (data2 == MAP_FAILED) return 6; for (i = 0; i < pagesize; ++i) if (*(data2 + i)) return 7; close (fd2); if (munmap (data2, pagesize)) return 8; /* Next, try to mmap the file at a fixed address which already has something else allocated at it. If we can, also make sure that we see the same garbage. */ fd = open ("conftest.mmap", O_RDWR); if (fd < 0) return 9; if (data2 != mmap (data2, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd, 0L)) return 10; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data2 + i)) return 11; /* Finally, make sure that changes to the mapped area do not percolate back to the file as seen by read(). (This is a bug on some variants of i386 svr4.0.) */ for (i = 0; i < pagesize; ++i) *(data2 + i) = *(data2 + i) + 1; data3 = (char *) malloc (pagesize); if (!data3) return 12; if (read (fd, data3, pagesize) != pagesize) return 13; for (i = 0; i < pagesize; ++i) if (*(data + i) != *(data3 + i)) return 14; close (fd); return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_mmap_fixed_mapped=yes else ac_cv_func_mmap_fixed_mapped=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_mmap_fixed_mapped" >&5 $as_echo "$ac_cv_func_mmap_fixed_mapped" >&6; } if test $ac_cv_func_mmap_fixed_mapped = yes; then $as_echo "@%:@define HAVE_MMAP 1" >>confdefs.h fi rm -f conftest.mmap conftest.txt for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } if ${ac_cv_func_realloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_realloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *realloc (); #endif int main () { return ! realloc (0, 0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_realloc_0_nonnull=yes else ac_cv_func_realloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5 $as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } if test $ac_cv_func_realloc_0_nonnull = yes; then : $as_echo "@%:@define HAVE_REALLOC 1" >>confdefs.h else $as_echo "@%:@define HAVE_REALLOC 0" >>confdefs.h case " $LIB@&t@OBJS " in *" realloc.$ac_objext "* ) ;; *) LIB@&t@OBJS="$LIB@&t@OBJS realloc.$ac_objext" ;; esac $as_echo "@%:@define realloc rpl_realloc" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if ${ac_cv_type_signal+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { return *(signal (0, 0)) (0) == 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_type_signal=int else ac_cv_type_signal=void fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF @%:@define RETSIGTYPE $ac_cv_type_signal _ACEOF for ac_func in vprintf do : ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" if test "x$ac_cv_func_vprintf" = xyes; then : cat >>confdefs.h <<_ACEOF @%:@define HAVE_VPRINTF 1 _ACEOF ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" if test "x$ac_cv_func__doprnt" = xyes; then : $as_echo "@%:@define HAVE_DOPRNT 1" >>confdefs.h fi fi done for ac_func in fchdir getcwd gettimeofday memmove memset mkdir munmap rmdir strchr strdup strerror strrchr strtoul strtoull alphasort dirfd lchown lutimes strsep do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF @%:@define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done # AC_CACHE_SAVE ac_config_files="$ac_config_files src/Makefile tests/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIB@&t@OBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIB@&t@OBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in @%:@( *posix*) : set -o posix ;; @%:@( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in @%:@( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in @%:@(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH @%:@ as_fn_error STATUS ERROR [LINENO LOG_FD] @%:@ ---------------------------------------- @%:@ Output "`basename @S|@0`: error: ERROR" to stderr. If LINENO and LOG_FD are @%:@ provided, also output the error to LOG_FD, referencing LINENO. Then exit the @%:@ script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } @%:@ as_fn_error @%:@ as_fn_set_status STATUS @%:@ ----------------------- @%:@ Set @S|@? to STATUS, without forking. as_fn_set_status () { return $1 } @%:@ as_fn_set_status @%:@ as_fn_exit STATUS @%:@ ----------------- @%:@ Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } @%:@ as_fn_exit @%:@ as_fn_unset VAR @%:@ --------------- @%:@ Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset @%:@ as_fn_append VAR VALUE @%:@ ---------------------- @%:@ Append the text in VALUE to the end of the definition contained in VAR. Take @%:@ advantage of any shell optimizations that allow amortized linear growth over @%:@ repeated appends, instead of the typical quadratic growth present in naive @%:@ implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append @%:@ as_fn_arith ARG... @%:@ ------------------ @%:@ Perform arithmetic evaluation on the ARGs, and store the result in the @%:@ global @S|@as_val. Take advantage of shells that can avoid forks. The arguments @%:@ must be portable across @S|@(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in @%:@((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null @%:@ as_fn_mkdir_p @%:@ ------------- @%:@ Create "@S|@as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } @%:@ as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi @%:@ as_fn_executable_p FILE @%:@ ----------------------- @%:@ Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } @%:@ as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by fsvs $as_me , which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ fsvs config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../@%:@@%:@ /;s/...$/ @%:@@%:@/;p;x;p;x' <<_ASBOX @%:@@%:@ Running $as_me. @%:@@%:@ _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "src/config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/config.h" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi # Cause a recompile touch src/config.h if [ "$ac_cv_header_linux_kdev_t_h" = "no" -a "x$ENABLE_DEV_FAKE" = "x" ] then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: * MAJOR(), MINOR() and MAKEDEV() definitions not found. * Fake a definition, but that could make problems for ignore patterns * and commits/updates of device nodes, so these will be disabled. * Please contact dev@fsvs.tigris.org for help, or, if you know your * systems' way, to report the correct header name. * * If you *really* need to use device compares, and have *no* other way, * you could try using the --enable-dev-fake option on ./configure." >&5 $as_echo "$as_me: WARNING: * MAJOR(), MINOR() and MAKEDEV() definitions not found. * Fake a definition, but that could make problems for ignore patterns * and commits/updates of device nodes, so these will be disabled. * Please contact dev@fsvs.tigris.org for help, or, if you know your * systems' way, to report the correct header name. * * If you *really* need to use device compares, and have *no* other way, * you could try using the --enable-dev-fake option on ./configure." >&2;} fi # vi: ts=3 sw=3 fsvs-1.2.6/autom4te.cache/traces.00000644000202400020240000013357112554717236015643 0ustar marekmarekm4trace:configure.in:5: -1- AC_INIT([fsvs], [esyscmd(make --quiet --no-print-directory -f Makefile.in version-nnl 2>/dev/null)], [http://fsvs.tigris.org/]) m4trace:configure.in:5: -1- m4_pattern_forbid([^_?A[CHUM]_]) m4trace:configure.in:5: -1- m4_pattern_forbid([_AC_]) m4trace:configure.in:5: -1- m4_pattern_forbid([^LIBOBJS$], [do not use LIBOBJS directly, use AC_LIBOBJ (see section `AC_LIBOBJ vs LIBOBJS']) m4trace:configure.in:5: -1- m4_pattern_allow([^AS_FLAGS$]) m4trace:configure.in:5: -1- m4_pattern_forbid([^_?m4_]) m4trace:configure.in:5: -1- m4_pattern_forbid([^dnl$]) m4trace:configure.in:5: -1- m4_pattern_forbid([^_?AS_]) m4trace:configure.in:5: -1- AC_SUBST([SHELL]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([SHELL]) m4trace:configure.in:5: -1- m4_pattern_allow([^SHELL$]) m4trace:configure.in:5: -1- AC_SUBST([PATH_SEPARATOR]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PATH_SEPARATOR]) m4trace:configure.in:5: -1- m4_pattern_allow([^PATH_SEPARATOR$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_NAME], [m4_ifdef([AC_PACKAGE_NAME], ['AC_PACKAGE_NAME'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_NAME]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_NAME$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_TARNAME], [m4_ifdef([AC_PACKAGE_TARNAME], ['AC_PACKAGE_TARNAME'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_TARNAME]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_TARNAME$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_VERSION], [m4_ifdef([AC_PACKAGE_VERSION], ['AC_PACKAGE_VERSION'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_VERSION]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_VERSION$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_STRING], [m4_ifdef([AC_PACKAGE_STRING], ['AC_PACKAGE_STRING'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_STRING]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_STRING$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_BUGREPORT], [m4_ifdef([AC_PACKAGE_BUGREPORT], ['AC_PACKAGE_BUGREPORT'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_BUGREPORT]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_BUGREPORT$]) m4trace:configure.in:5: -1- AC_SUBST([PACKAGE_URL], [m4_ifdef([AC_PACKAGE_URL], ['AC_PACKAGE_URL'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([PACKAGE_URL]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_URL$]) m4trace:configure.in:5: -1- AC_SUBST([exec_prefix], [NONE]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([exec_prefix]) m4trace:configure.in:5: -1- m4_pattern_allow([^exec_prefix$]) m4trace:configure.in:5: -1- AC_SUBST([prefix], [NONE]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([prefix]) m4trace:configure.in:5: -1- m4_pattern_allow([^prefix$]) m4trace:configure.in:5: -1- AC_SUBST([program_transform_name], [s,x,x,]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([program_transform_name]) m4trace:configure.in:5: -1- m4_pattern_allow([^program_transform_name$]) m4trace:configure.in:5: -1- AC_SUBST([bindir], ['${exec_prefix}/bin']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([bindir]) m4trace:configure.in:5: -1- m4_pattern_allow([^bindir$]) m4trace:configure.in:5: -1- AC_SUBST([sbindir], ['${exec_prefix}/sbin']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([sbindir]) m4trace:configure.in:5: -1- m4_pattern_allow([^sbindir$]) m4trace:configure.in:5: -1- AC_SUBST([libexecdir], ['${exec_prefix}/libexec']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([libexecdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^libexecdir$]) m4trace:configure.in:5: -1- AC_SUBST([datarootdir], ['${prefix}/share']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([datarootdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^datarootdir$]) m4trace:configure.in:5: -1- AC_SUBST([datadir], ['${datarootdir}']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([datadir]) m4trace:configure.in:5: -1- m4_pattern_allow([^datadir$]) m4trace:configure.in:5: -1- AC_SUBST([sysconfdir], ['${prefix}/etc']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([sysconfdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^sysconfdir$]) m4trace:configure.in:5: -1- AC_SUBST([sharedstatedir], ['${prefix}/com']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([sharedstatedir]) m4trace:configure.in:5: -1- m4_pattern_allow([^sharedstatedir$]) m4trace:configure.in:5: -1- AC_SUBST([localstatedir], ['${prefix}/var']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([localstatedir]) m4trace:configure.in:5: -1- m4_pattern_allow([^localstatedir$]) m4trace:configure.in:5: -1- AC_SUBST([includedir], ['${prefix}/include']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([includedir]) m4trace:configure.in:5: -1- m4_pattern_allow([^includedir$]) m4trace:configure.in:5: -1- AC_SUBST([oldincludedir], ['/usr/include']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([oldincludedir]) m4trace:configure.in:5: -1- m4_pattern_allow([^oldincludedir$]) m4trace:configure.in:5: -1- AC_SUBST([docdir], [m4_ifset([AC_PACKAGE_TARNAME], ['${datarootdir}/doc/${PACKAGE_TARNAME}'], ['${datarootdir}/doc/${PACKAGE}'])]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([docdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^docdir$]) m4trace:configure.in:5: -1- AC_SUBST([infodir], ['${datarootdir}/info']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([infodir]) m4trace:configure.in:5: -1- m4_pattern_allow([^infodir$]) m4trace:configure.in:5: -1- AC_SUBST([htmldir], ['${docdir}']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([htmldir]) m4trace:configure.in:5: -1- m4_pattern_allow([^htmldir$]) m4trace:configure.in:5: -1- AC_SUBST([dvidir], ['${docdir}']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([dvidir]) m4trace:configure.in:5: -1- m4_pattern_allow([^dvidir$]) m4trace:configure.in:5: -1- AC_SUBST([pdfdir], ['${docdir}']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([pdfdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^pdfdir$]) m4trace:configure.in:5: -1- AC_SUBST([psdir], ['${docdir}']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([psdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^psdir$]) m4trace:configure.in:5: -1- AC_SUBST([libdir], ['${exec_prefix}/lib']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([libdir]) m4trace:configure.in:5: -1- m4_pattern_allow([^libdir$]) m4trace:configure.in:5: -1- AC_SUBST([localedir], ['${datarootdir}/locale']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([localedir]) m4trace:configure.in:5: -1- m4_pattern_allow([^localedir$]) m4trace:configure.in:5: -1- AC_SUBST([mandir], ['${datarootdir}/man']) m4trace:configure.in:5: -1- AC_SUBST_TRACE([mandir]) m4trace:configure.in:5: -1- m4_pattern_allow([^mandir$]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_NAME]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_NAME$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_NAME], [/* Define to the full name of this package. */ @%:@undef PACKAGE_NAME]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_TARNAME]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_TARNAME$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_TARNAME], [/* Define to the one symbol short name of this package. */ @%:@undef PACKAGE_TARNAME]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_VERSION]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_VERSION$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_VERSION], [/* Define to the version of this package. */ @%:@undef PACKAGE_VERSION]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_STRING]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_STRING$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_STRING], [/* Define to the full name and version of this package. */ @%:@undef PACKAGE_STRING]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_BUGREPORT]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_BUGREPORT$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_BUGREPORT], [/* Define to the address where bug reports for this package should be sent. */ @%:@undef PACKAGE_BUGREPORT]) m4trace:configure.in:5: -1- AC_DEFINE_TRACE_LITERAL([PACKAGE_URL]) m4trace:configure.in:5: -1- m4_pattern_allow([^PACKAGE_URL$]) m4trace:configure.in:5: -1- AH_OUTPUT([PACKAGE_URL], [/* Define to the home page for this package. */ @%:@undef PACKAGE_URL]) m4trace:configure.in:5: -1- AC_SUBST([DEFS]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([DEFS]) m4trace:configure.in:5: -1- m4_pattern_allow([^DEFS$]) m4trace:configure.in:5: -1- AC_SUBST([ECHO_C]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([ECHO_C]) m4trace:configure.in:5: -1- m4_pattern_allow([^ECHO_C$]) m4trace:configure.in:5: -1- AC_SUBST([ECHO_N]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([ECHO_N]) m4trace:configure.in:5: -1- m4_pattern_allow([^ECHO_N$]) m4trace:configure.in:5: -1- AC_SUBST([ECHO_T]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([ECHO_T]) m4trace:configure.in:5: -1- m4_pattern_allow([^ECHO_T$]) m4trace:configure.in:5: -1- AC_SUBST([LIBS]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([LIBS]) m4trace:configure.in:5: -1- m4_pattern_allow([^LIBS$]) m4trace:configure.in:5: -1- AC_SUBST([build_alias]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([build_alias]) m4trace:configure.in:5: -1- m4_pattern_allow([^build_alias$]) m4trace:configure.in:5: -1- AC_SUBST([host_alias]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([host_alias]) m4trace:configure.in:5: -1- m4_pattern_allow([^host_alias$]) m4trace:configure.in:5: -1- AC_SUBST([target_alias]) m4trace:configure.in:5: -1- AC_SUBST_TRACE([target_alias]) m4trace:configure.in:5: -1- m4_pattern_allow([^target_alias$]) m4trace:configure.in:8: -1- _m4_warn([obsolete], [The macro `AC_GNU_SOURCE' is obsolete. You should run autoupdate.], [../../lib/autoconf/specific.m4:314: AC_GNU_SOURCE is expanded from... configure.in:8: the top level]) m4trace:configure.in:8: -1- AC_SUBST([CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:8: -1- AC_SUBST([CFLAGS]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CFLAGS]) m4trace:configure.in:8: -1- m4_pattern_allow([^CFLAGS$]) m4trace:configure.in:8: -1- AC_SUBST([LDFLAGS]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([LDFLAGS]) m4trace:configure.in:8: -1- m4_pattern_allow([^LDFLAGS$]) m4trace:configure.in:8: -1- AC_SUBST([LIBS]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([LIBS]) m4trace:configure.in:8: -1- m4_pattern_allow([^LIBS$]) m4trace:configure.in:8: -1- AC_SUBST([CPPFLAGS]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CPPFLAGS]) m4trace:configure.in:8: -1- m4_pattern_allow([^CPPFLAGS$]) m4trace:configure.in:8: -1- AC_SUBST([CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:8: -1- AC_SUBST([CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:8: -1- AC_SUBST([CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:8: -1- AC_SUBST([CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:8: -1- AC_SUBST([ac_ct_CC]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([ac_ct_CC]) m4trace:configure.in:8: -1- m4_pattern_allow([^ac_ct_CC$]) m4trace:configure.in:8: -1- AC_SUBST([EXEEXT], [$ac_cv_exeext]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([EXEEXT]) m4trace:configure.in:8: -1- m4_pattern_allow([^EXEEXT$]) m4trace:configure.in:8: -1- AC_SUBST([OBJEXT], [$ac_cv_objext]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([OBJEXT]) m4trace:configure.in:8: -1- m4_pattern_allow([^OBJEXT$]) m4trace:configure.in:8: -1- AC_SUBST([CPP]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CPP]) m4trace:configure.in:8: -1- m4_pattern_allow([^CPP$]) m4trace:configure.in:8: -1- AC_SUBST([CPPFLAGS]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CPPFLAGS]) m4trace:configure.in:8: -1- m4_pattern_allow([^CPPFLAGS$]) m4trace:configure.in:8: -1- AC_SUBST([CPP]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([CPP]) m4trace:configure.in:8: -1- m4_pattern_allow([^CPP$]) m4trace:configure.in:8: -1- AC_SUBST([GREP]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([GREP]) m4trace:configure.in:8: -1- m4_pattern_allow([^GREP$]) m4trace:configure.in:8: -1- AC_SUBST([EGREP]) m4trace:configure.in:8: -1- AC_SUBST_TRACE([EGREP]) m4trace:configure.in:8: -1- m4_pattern_allow([^EGREP$]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([STDC_HEADERS]) m4trace:configure.in:8: -1- m4_pattern_allow([^STDC_HEADERS$]) m4trace:configure.in:8: -1- AH_OUTPUT([STDC_HEADERS], [/* Define to 1 if you have the ANSI C header files. */ @%:@undef STDC_HEADERS]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_SYS_TYPES_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_SYS_TYPES_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_SYS_STAT_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_SYS_STAT_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDLIB_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STRING_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_MEMORY_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_MEMORY_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_STRINGS_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STRINGS_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_INTTYPES_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_INTTYPES_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_STDINT_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDINT_H]) m4trace:configure.in:8: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_UNISTD_H]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_POSIX_SOURCE]) m4trace:configure.in:8: -1- m4_pattern_allow([^_POSIX_SOURCE$]) m4trace:configure.in:8: -1- AH_OUTPUT([_POSIX_SOURCE], [/* Define to 1 if you need to in order for `stat\' and other things to work. */ @%:@undef _POSIX_SOURCE]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_POSIX_1_SOURCE]) m4trace:configure.in:8: -1- m4_pattern_allow([^_POSIX_1_SOURCE$]) m4trace:configure.in:8: -1- AH_OUTPUT([_POSIX_1_SOURCE], [/* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ @%:@undef _POSIX_1_SOURCE]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_MINIX]) m4trace:configure.in:8: -1- m4_pattern_allow([^_MINIX$]) m4trace:configure.in:8: -1- AH_OUTPUT([_MINIX], [/* Define to 1 if on MINIX. */ @%:@undef _MINIX]) m4trace:configure.in:8: -1- AH_OUTPUT([USE_SYSTEM_EXTENSIONS], [/* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # undef _ALL_SOURCE #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # undef _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # undef _POSIX_PTHREAD_SEMANTICS #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # undef _TANDEM_SOURCE #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # undef __EXTENSIONS__ #endif ]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([__EXTENSIONS__]) m4trace:configure.in:8: -1- m4_pattern_allow([^__EXTENSIONS__$]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_ALL_SOURCE]) m4trace:configure.in:8: -1- m4_pattern_allow([^_ALL_SOURCE$]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_GNU_SOURCE]) m4trace:configure.in:8: -1- m4_pattern_allow([^_GNU_SOURCE$]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_POSIX_PTHREAD_SEMANTICS]) m4trace:configure.in:8: -1- m4_pattern_allow([^_POSIX_PTHREAD_SEMANTICS$]) m4trace:configure.in:8: -1- AC_DEFINE_TRACE_LITERAL([_TANDEM_SOURCE]) m4trace:configure.in:8: -1- m4_pattern_allow([^_TANDEM_SOURCE$]) m4trace:configure.in:17: -1- AC_CONFIG_HEADERS([src/config.h]) m4trace:configure.in:22: -1- AC_SUBST([CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:22: -1- AC_SUBST([CFLAGS]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CFLAGS]) m4trace:configure.in:22: -1- m4_pattern_allow([^CFLAGS$]) m4trace:configure.in:22: -1- AC_SUBST([LDFLAGS]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([LDFLAGS]) m4trace:configure.in:22: -1- m4_pattern_allow([^LDFLAGS$]) m4trace:configure.in:22: -1- AC_SUBST([LIBS]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([LIBS]) m4trace:configure.in:22: -1- m4_pattern_allow([^LIBS$]) m4trace:configure.in:22: -1- AC_SUBST([CPPFLAGS]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CPPFLAGS]) m4trace:configure.in:22: -1- m4_pattern_allow([^CPPFLAGS$]) m4trace:configure.in:22: -1- AC_SUBST([CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:22: -1- AC_SUBST([CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:22: -1- AC_SUBST([CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:22: -1- AC_SUBST([CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^CC$]) m4trace:configure.in:22: -1- AC_SUBST([ac_ct_CC]) m4trace:configure.in:22: -1- AC_SUBST_TRACE([ac_ct_CC]) m4trace:configure.in:22: -1- m4_pattern_allow([^ac_ct_CC$]) m4trace:configure.in:23: -1- AC_SUBST([CPP]) m4trace:configure.in:23: -1- AC_SUBST_TRACE([CPP]) m4trace:configure.in:23: -1- m4_pattern_allow([^CPP$]) m4trace:configure.in:23: -1- AC_SUBST([CPPFLAGS]) m4trace:configure.in:23: -1- AC_SUBST_TRACE([CPPFLAGS]) m4trace:configure.in:23: -1- m4_pattern_allow([^CPPFLAGS$]) m4trace:configure.in:23: -1- AC_SUBST([CPP]) m4trace:configure.in:23: -1- AC_SUBST_TRACE([CPP]) m4trace:configure.in:23: -1- m4_pattern_allow([^CPP$]) m4trace:configure.in:33: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:33: the top level]) m4trace:configure.in:45: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:45: the top level]) m4trace:configure.in:52: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:52: the top level]) m4trace:configure.in:59: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:59: the top level]) m4trace:configure.in:76: -1- AC_DEFINE_TRACE_LITERAL([WAA_WC_MD5_CHARS]) m4trace:configure.in:76: -1- m4_pattern_allow([^WAA_WC_MD5_CHARS$]) m4trace:configure.in:76: -1- AH_OUTPUT([WAA_WC_MD5_CHARS], [/* Number of bytes for WAA addressing is @S|@WAA_WC_MD5_CHARS. */ @%:@undef WAA_WC_MD5_CHARS]) m4trace:configure.in:78: -1- AC_SUBST([WAA_WC_MD5_CHARS]) m4trace:configure.in:78: -1- AC_SUBST_TRACE([WAA_WC_MD5_CHARS]) m4trace:configure.in:78: -1- m4_pattern_allow([^WAA_WC_MD5_CHARS$]) m4trace:configure.in:89: -1- AC_DEFINE_TRACE_LITERAL([CFLAGS]) m4trace:configure.in:89: -1- m4_pattern_allow([^CFLAGS$]) m4trace:configure.in:90: -1- AC_SUBST([CFLAGS]) m4trace:configure.in:90: -1- AC_SUBST_TRACE([CFLAGS]) m4trace:configure.in:90: -1- m4_pattern_allow([^CFLAGS$]) m4trace:configure.in:97: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:97: the top level]) m4trace:configure.in:104: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:104: the top level]) m4trace:configure.in:114: -1- AC_DEFINE_TRACE_LITERAL([LDFLAGS]) m4trace:configure.in:114: -1- m4_pattern_allow([^LDFLAGS$]) m4trace:configure.in:115: -1- AC_SUBST([LDFLAGS]) m4trace:configure.in:115: -1- AC_SUBST_TRACE([LDFLAGS]) m4trace:configure.in:115: -1- m4_pattern_allow([^LDFLAGS$]) m4trace:configure.in:134: -1- AC_DEFINE_TRACE_LITERAL([EXTRALIBS]) m4trace:configure.in:134: -1- m4_pattern_allow([^EXTRALIBS$]) m4trace:configure.in:135: -1- AC_SUBST([EXTRALIBS]) m4trace:configure.in:135: -1- AC_SUBST_TRACE([EXTRALIBS]) m4trace:configure.in:135: -1- m4_pattern_allow([^EXTRALIBS$]) m4trace:configure.in:140: -1- AH_OUTPUT([HAVE_LIBPCRE], [/* Define to 1 if you have the `pcre\' library (-lpcre). */ @%:@undef HAVE_LIBPCRE]) m4trace:configure.in:140: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBPCRE]) m4trace:configure.in:140: -1- m4_pattern_allow([^HAVE_LIBPCRE$]) m4trace:configure.in:143: -1- AH_OUTPUT([HAVE_LIBAPRUTIL_1], [/* Define to 1 if you have the `aprutil-1\' library (-laprutil-1). */ @%:@undef HAVE_LIBAPRUTIL_1]) m4trace:configure.in:143: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBAPRUTIL_1]) m4trace:configure.in:143: -1- m4_pattern_allow([^HAVE_LIBAPRUTIL_1$]) m4trace:configure.in:145: -1- AH_OUTPUT([HAVE_LIBSVN_DELTA_1], [/* Define to 1 if you have the `svn_delta-1\' library (-lsvn_delta-1). */ @%:@undef HAVE_LIBSVN_DELTA_1]) m4trace:configure.in:145: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBSVN_DELTA_1]) m4trace:configure.in:145: -1- m4_pattern_allow([^HAVE_LIBSVN_DELTA_1$]) m4trace:configure.in:147: -1- AH_OUTPUT([HAVE_LIBSVN_RA_1], [/* Define to 1 if you have the `svn_ra-1\' library (-lsvn_ra-1). */ @%:@undef HAVE_LIBSVN_RA_1]) m4trace:configure.in:147: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBSVN_RA_1]) m4trace:configure.in:147: -1- m4_pattern_allow([^HAVE_LIBSVN_RA_1$]) m4trace:configure.in:149: -1- AH_OUTPUT([HAVE_LIBGDBM], [/* Define to 1 if you have the `gdbm\' library (-lgdbm). */ @%:@undef HAVE_LIBGDBM]) m4trace:configure.in:149: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LIBGDBM]) m4trace:configure.in:149: -1- m4_pattern_allow([^HAVE_LIBGDBM$]) m4trace:configure.in:153: -1- AC_DEFINE_TRACE_LITERAL([STDC_HEADERS]) m4trace:configure.in:153: -1- m4_pattern_allow([^STDC_HEADERS$]) m4trace:configure.in:153: -1- AH_OUTPUT([STDC_HEADERS], [/* Define to 1 if you have the ANSI C header files. */ @%:@undef STDC_HEADERS]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_FCNTL_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_FCNTL_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_STDDEF_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDDEF_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDLIB_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_STRING_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STRING_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_SYS_TIME_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_SYS_TIME_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_UNISTD_H]) m4trace:configure.in:154: -1- AH_OUTPUT([HAVE_PCRE_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_PCRE_H]) m4trace:configure.in:159: -1- AH_OUTPUT([HAVE_DIRENT_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ @%:@undef HAVE_DIRENT_H]) m4trace:configure.in:159: -1- AH_OUTPUT([HAVE_SYS_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ @%:@undef HAVE_SYS_NDIR_H]) m4trace:configure.in:159: -1- AH_OUTPUT([HAVE_SYS_DIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ @%:@undef HAVE_SYS_DIR_H]) m4trace:configure.in:159: -1- AH_OUTPUT([HAVE_NDIR_H], [/* Define to 1 if you have the header file, and it defines `DIR\'. */ @%:@undef HAVE_NDIR_H]) m4trace:configure.in:161: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_MTIM]) m4trace:configure.in:161: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_MTIM$]) m4trace:configure.in:161: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_MTIM], [/* Define to 1 if `st_mtim\' is a member of `struct stat\'. */ @%:@undef HAVE_STRUCT_STAT_ST_MTIM]) m4trace:configure.in:169: -1- AC_DEFINE_TRACE_LITERAL([HAVE_VALGRIND]) m4trace:configure.in:169: -1- m4_pattern_allow([^HAVE_VALGRIND$]) m4trace:configure.in:169: -1- AH_OUTPUT([HAVE_VALGRIND], [/* compatible valgrind version found */ @%:@undef HAVE_VALGRIND]) m4trace:configure.in:180: -1- _m4_warn([cross], [AC_RUN_IFELSE called without default to allow cross compiling], [../../lib/autoconf/general.m4:2748: AC_RUN_IFELSE is expanded from... configure.in:180: the top level]) m4trace:configure.in:194: -1- AH_OUTPUT([HAVE_LINUX_KDEV_T_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_LINUX_KDEV_T_H]) m4trace:configure.in:194: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LINUX_KDEV_T_H]) m4trace:configure.in:194: -1- m4_pattern_allow([^HAVE_LINUX_KDEV_T_H$]) m4trace:configure.in:197: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:197: the top level]) m4trace:configure.in:196: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEV_FAKE]) m4trace:configure.in:196: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) m4trace:configure.in:202: -1- AC_SUBST([ENABLE_DEV_FAKE]) m4trace:configure.in:202: -1- AC_SUBST_TRACE([ENABLE_DEV_FAKE]) m4trace:configure.in:202: -1- m4_pattern_allow([^ENABLE_DEV_FAKE$]) m4trace:configure.in:206: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:206: the top level]) m4trace:configure.in:205: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_DEBUG]) m4trace:configure.in:205: -1- m4_pattern_allow([^ENABLE_DEBUG$]) m4trace:configure.in:211: -1- AC_SUBST([ENABLE_DEBUG]) m4trace:configure.in:211: -1- AC_SUBST_TRACE([ENABLE_DEBUG]) m4trace:configure.in:211: -1- m4_pattern_allow([^ENABLE_DEBUG$]) m4trace:configure.in:215: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:215: the top level]) m4trace:configure.in:214: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) m4trace:configure.in:214: -1- m4_pattern_allow([^ENABLE_GCOV$]) m4trace:configure.in:220: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_GCOV]) m4trace:configure.in:220: -1- m4_pattern_allow([^ENABLE_GCOV$]) m4trace:configure.in:221: -1- AC_SUBST([ENABLE_GCOV]) m4trace:configure.in:221: -1- AC_SUBST_TRACE([ENABLE_GCOV]) m4trace:configure.in:221: -1- m4_pattern_allow([^ENABLE_GCOV$]) m4trace:configure.in:229: -1- AC_DEFINE_TRACE_LITERAL([HAVE_O_DIRECTORY]) m4trace:configure.in:229: -1- m4_pattern_allow([^HAVE_O_DIRECTORY$]) m4trace:configure.in:229: -1- AH_OUTPUT([HAVE_O_DIRECTORY], [/* O_DIRECTORY found */ @%:@undef HAVE_O_DIRECTORY]) m4trace:configure.in:231: -1- AC_SUBST([HAVE_O_DIRECTORY]) m4trace:configure.in:231: -1- AC_SUBST_TRACE([HAVE_O_DIRECTORY]) m4trace:configure.in:231: -1- m4_pattern_allow([^HAVE_O_DIRECTORY$]) m4trace:configure.in:240: -1- AC_DEFINE_TRACE_LITERAL([NEED_ENVIRON_EXTERN]) m4trace:configure.in:240: -1- m4_pattern_allow([^NEED_ENVIRON_EXTERN$]) m4trace:configure.in:240: -1- AH_OUTPUT([NEED_ENVIRON_EXTERN], [/* "char **environ" needs "extern" */ @%:@undef NEED_ENVIRON_EXTERN]) m4trace:configure.in:242: -1- AC_SUBST([NEED_ENVIRON_EXTERN]) m4trace:configure.in:242: -1- AC_SUBST_TRACE([NEED_ENVIRON_EXTERN]) m4trace:configure.in:242: -1- m4_pattern_allow([^NEED_ENVIRON_EXTERN$]) m4trace:configure.in:255: -1- AC_DEFINE_TRACE_LITERAL([HAVE_FMEMOPEN]) m4trace:configure.in:255: -1- m4_pattern_allow([^HAVE_FMEMOPEN$]) m4trace:configure.in:255: -1- AH_OUTPUT([HAVE_FMEMOPEN], [/* fmemopen() found */ @%:@undef HAVE_FMEMOPEN]) m4trace:configure.in:259: -1- AC_SUBST([HAVE_FMEMOPEN]) m4trace:configure.in:259: -1- AC_SUBST_TRACE([HAVE_FMEMOPEN]) m4trace:configure.in:259: -1- m4_pattern_allow([^HAVE_FMEMOPEN$]) m4trace:configure.in:264: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LOCALES]) m4trace:configure.in:264: -1- m4_pattern_allow([^HAVE_LOCALES$]) m4trace:configure.in:266: -1- AC_SUBST([HAVE_LOCALES]) m4trace:configure.in:266: -1- AC_SUBST_TRACE([HAVE_LOCALES]) m4trace:configure.in:266: -1- m4_pattern_allow([^HAVE_LOCALES$]) m4trace:configure.in:270: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:270: the top level]) m4trace:configure.in:269: -1- AC_DEFINE_TRACE_LITERAL([CHROOTER_JAIL]) m4trace:configure.in:269: -1- m4_pattern_allow([^CHROOTER_JAIL$]) m4trace:configure.in:269: -1- AH_OUTPUT([CHROOTER_JAIL], [/* The path of a chroot jail. */ @%:@undef CHROOTER_JAIL]) m4trace:configure.in:282: -1- AC_SUBST([CHROOTER_JAIL]) m4trace:configure.in:282: -1- AC_SUBST_TRACE([CHROOTER_JAIL]) m4trace:configure.in:282: -1- m4_pattern_allow([^CHROOTER_JAIL$]) m4trace:configure.in:286: -2- _m4_warn([obsolete], [The macro `AC_HELP_STRING' is obsolete. You should run autoupdate.], [../../lib/autoconf/general.m4:207: AC_HELP_STRING is expanded from... configure.in:286: the top level]) m4trace:configure.in:285: -1- AC_DEFINE_TRACE_LITERAL([ENABLE_RELEASE]) m4trace:configure.in:285: -1- m4_pattern_allow([^ENABLE_RELEASE$]) m4trace:configure.in:291: -1- AC_SUBST([ENABLE_RELEASE]) m4trace:configure.in:291: -1- AC_SUBST_TRACE([ENABLE_RELEASE]) m4trace:configure.in:291: -1- m4_pattern_allow([^ENABLE_RELEASE$]) m4trace:configure.in:299: -1- AH_OUTPUT([HAVE_GETDENTS64], [/* Define to 1 if you have the `getdents64\' function. */ @%:@undef HAVE_GETDENTS64]) m4trace:configure.in:299: -1- AC_DEFINE_TRACE_LITERAL([HAVE_GETDENTS64]) m4trace:configure.in:299: -1- m4_pattern_allow([^HAVE_GETDENTS64$]) m4trace:configure.in:300: -1- AH_OUTPUT([HAVE_LINUX_TYPES_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_LINUX_TYPES_H]) m4trace:configure.in:300: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LINUX_TYPES_H]) m4trace:configure.in:300: -1- m4_pattern_allow([^HAVE_LINUX_TYPES_H$]) m4trace:configure.in:301: -1- AH_OUTPUT([HAVE_LINUX_UNISTD_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_LINUX_UNISTD_H]) m4trace:configure.in:301: -1- AC_DEFINE_TRACE_LITERAL([HAVE_LINUX_UNISTD_H]) m4trace:configure.in:301: -1- m4_pattern_allow([^HAVE_LINUX_UNISTD_H$]) m4trace:configure.in:302: -1- AC_DEFINE_TRACE_LITERAL([HAVE_COMPARISON_FN_T]) m4trace:configure.in:302: -1- m4_pattern_allow([^HAVE_COMPARISON_FN_T$]) m4trace:configure.in:302: -1- AH_OUTPUT([HAVE_COMPARISON_FN_T], [/* Define to 1 if the system has the type `comparison_fn_t\'. */ @%:@undef HAVE_COMPARISON_FN_T]) m4trace:configure.in:304: -1- AC_DEFINE_TRACE_LITERAL([_FILE_OFFSET_BITS]) m4trace:configure.in:304: -1- m4_pattern_allow([^_FILE_OFFSET_BITS$]) m4trace:configure.in:304: -1- AH_OUTPUT([_FILE_OFFSET_BITS], [/* Number of bits in a file offset, on hosts where this is settable. */ @%:@undef _FILE_OFFSET_BITS]) m4trace:configure.in:304: -1- AC_DEFINE_TRACE_LITERAL([_LARGE_FILES]) m4trace:configure.in:304: -1- m4_pattern_allow([^_LARGE_FILES$]) m4trace:configure.in:304: -1- AH_OUTPUT([_LARGE_FILES], [/* Define for large files, on AIX-style hosts. */ @%:@undef _LARGE_FILES]) m4trace:configure.in:304: -1- AH_OUTPUT([_DARWIN_USE_64_BIT_INODE], [/* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif]) m4trace:configure.in:307: -1- AC_DEFINE_TRACE_LITERAL([const]) m4trace:configure.in:307: -1- m4_pattern_allow([^const$]) m4trace:configure.in:307: -1- AH_OUTPUT([const], [/* Define to empty if `const\' does not conform to ANSI C. */ @%:@undef const]) m4trace:configure.in:308: -1- AH_OUTPUT([inline], [/* Define to `__inline__\' or `__inline\' if that\'s what the C compiler calls it, or to nothing if \'inline\' is not supported under any name. */ #ifndef __cplusplus #undef inline #endif]) m4trace:configure.in:309: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STRUCT_STAT_ST_RDEV]) m4trace:configure.in:309: -1- m4_pattern_allow([^HAVE_STRUCT_STAT_ST_RDEV$]) m4trace:configure.in:309: -1- AH_OUTPUT([HAVE_STRUCT_STAT_ST_RDEV], [/* Define to 1 if `st_rdev\' is a member of `struct stat\'. */ @%:@undef HAVE_STRUCT_STAT_ST_RDEV]) m4trace:configure.in:310: -1- AC_DEFINE_TRACE_LITERAL([TIME_WITH_SYS_TIME]) m4trace:configure.in:310: -1- m4_pattern_allow([^TIME_WITH_SYS_TIME$]) m4trace:configure.in:310: -1- AH_OUTPUT([TIME_WITH_SYS_TIME], [/* Define to 1 if you can safely include both and . */ @%:@undef TIME_WITH_SYS_TIME]) m4trace:configure.in:311: -1- AC_DEFINE_TRACE_LITERAL([TM_IN_SYS_TIME]) m4trace:configure.in:311: -1- m4_pattern_allow([^TM_IN_SYS_TIME$]) m4trace:configure.in:311: -1- AH_OUTPUT([TM_IN_SYS_TIME], [/* Define to 1 if your declares `struct tm\'. */ @%:@undef TM_IN_SYS_TIME]) m4trace:configure.in:313: -1- AC_DEFINE_TRACE_LITERAL([HAS_FASTCALL]) m4trace:configure.in:313: -1- m4_pattern_allow([^HAS_FASTCALL$]) m4trace:configure.in:314: -1- AC_SUBST([HAS_FASTCALL]) m4trace:configure.in:314: -1- AC_SUBST_TRACE([HAS_FASTCALL]) m4trace:configure.in:314: -1- m4_pattern_allow([^HAS_FASTCALL$]) m4trace:configure.in:323: -1- AC_DEFINE_TRACE_LITERAL([_UINT32_T]) m4trace:configure.in:323: -1- m4_pattern_allow([^_UINT32_T$]) m4trace:configure.in:323: -1- AH_OUTPUT([_UINT32_T], [/* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the @%:@define below would cause a syntax error. */ @%:@undef _UINT32_T]) m4trace:configure.in:323: -1- AC_DEFINE_TRACE_LITERAL([uint32_t]) m4trace:configure.in:323: -1- m4_pattern_allow([^uint32_t$]) m4trace:configure.in:323: -1- AH_OUTPUT([uint32_t], [/* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ @%:@undef uint32_t]) m4trace:configure.in:324: -1- AC_SUBST([HAVE_UINT32_T]) m4trace:configure.in:324: -1- AC_SUBST_TRACE([HAVE_UINT32_T]) m4trace:configure.in:324: -1- m4_pattern_allow([^HAVE_UINT32_T$]) m4trace:configure.in:330: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT32_T]) m4trace:configure.in:330: -1- m4_pattern_allow([^AC_CV_C_UINT32_T$]) m4trace:configure.in:332: -1- AC_DEFINE_TRACE_LITERAL([_UINT64_T]) m4trace:configure.in:332: -1- m4_pattern_allow([^_UINT64_T$]) m4trace:configure.in:332: -1- AH_OUTPUT([_UINT64_T], [/* Define for Solaris 2.5.1 so the uint64_t typedef from , , or is not used. If the typedef were allowed, the @%:@define below would cause a syntax error. */ @%:@undef _UINT64_T]) m4trace:configure.in:332: -1- AC_DEFINE_TRACE_LITERAL([uint64_t]) m4trace:configure.in:332: -1- m4_pattern_allow([^uint64_t$]) m4trace:configure.in:332: -1- AH_OUTPUT([uint64_t], [/* Define to the type of an unsigned integer type of width exactly 64 bits if such a type exists and the standard includes do not define it. */ @%:@undef uint64_t]) m4trace:configure.in:333: -1- AC_SUBST([HAVE_UINT64_T]) m4trace:configure.in:333: -1- AC_SUBST_TRACE([HAVE_UINT64_T]) m4trace:configure.in:333: -1- m4_pattern_allow([^HAVE_UINT64_T$]) m4trace:configure.in:338: -1- AC_DEFINE_TRACE_LITERAL([AC_CV_C_UINT64_T]) m4trace:configure.in:338: -1- m4_pattern_allow([^AC_CV_C_UINT64_T$]) m4trace:configure.in:342: -1- AC_DEFINE_TRACE_LITERAL([uid_t]) m4trace:configure.in:342: -1- m4_pattern_allow([^uid_t$]) m4trace:configure.in:342: -1- AH_OUTPUT([uid_t], [/* Define to `int\' if doesn\'t define. */ @%:@undef uid_t]) m4trace:configure.in:342: -1- AC_DEFINE_TRACE_LITERAL([gid_t]) m4trace:configure.in:342: -1- m4_pattern_allow([^gid_t$]) m4trace:configure.in:342: -1- AH_OUTPUT([gid_t], [/* Define to `int\' if doesn\'t define. */ @%:@undef gid_t]) m4trace:configure.in:342: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_UNISTD_H]) m4trace:configure.in:342: -1- AC_DEFINE_TRACE_LITERAL([HAVE_UNISTD_H]) m4trace:configure.in:342: -1- m4_pattern_allow([^HAVE_UNISTD_H$]) m4trace:configure.in:342: -1- AC_DEFINE_TRACE_LITERAL([HAVE_CHOWN]) m4trace:configure.in:342: -1- m4_pattern_allow([^HAVE_CHOWN$]) m4trace:configure.in:342: -1- AH_OUTPUT([HAVE_CHOWN], [/* Define to 1 if your system has a working `chown\' function. */ @%:@undef HAVE_CHOWN]) m4trace:configure.in:343: -1- AC_DEFINE_TRACE_LITERAL([pid_t]) m4trace:configure.in:343: -1- m4_pattern_allow([^pid_t$]) m4trace:configure.in:343: -1- AH_OUTPUT([pid_t], [/* Define to `int\' if does not define. */ @%:@undef pid_t]) m4trace:configure.in:343: -1- AH_OUTPUT([HAVE_VFORK_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_VFORK_H]) m4trace:configure.in:343: -1- AC_DEFINE_TRACE_LITERAL([HAVE_VFORK_H]) m4trace:configure.in:343: -1- m4_pattern_allow([^HAVE_VFORK_H$]) m4trace:configure.in:343: -1- AH_OUTPUT([HAVE_FORK], [/* Define to 1 if you have the `fork\' function. */ @%:@undef HAVE_FORK]) m4trace:configure.in:343: -1- AH_OUTPUT([HAVE_VFORK], [/* Define to 1 if you have the `vfork\' function. */ @%:@undef HAVE_VFORK]) m4trace:configure.in:343: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_VFORK]) m4trace:configure.in:343: -1- m4_pattern_allow([^HAVE_WORKING_VFORK$]) m4trace:configure.in:343: -1- AH_OUTPUT([HAVE_WORKING_VFORK], [/* Define to 1 if `vfork\' works. */ @%:@undef HAVE_WORKING_VFORK]) m4trace:configure.in:343: -1- AC_DEFINE_TRACE_LITERAL([vfork]) m4trace:configure.in:343: -1- m4_pattern_allow([^vfork$]) m4trace:configure.in:343: -1- AH_OUTPUT([vfork], [/* Define as `fork\' if `vfork\' does not work. */ @%:@undef vfork]) m4trace:configure.in:343: -1- AC_DEFINE_TRACE_LITERAL([HAVE_WORKING_FORK]) m4trace:configure.in:343: -1- m4_pattern_allow([^HAVE_WORKING_FORK$]) m4trace:configure.in:343: -1- AH_OUTPUT([HAVE_WORKING_FORK], [/* Define to 1 if `fork\' works. */ @%:@undef HAVE_WORKING_FORK]) m4trace:configure.in:344: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDLIB_H]) m4trace:configure.in:344: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STDLIB_H]) m4trace:configure.in:344: -1- m4_pattern_allow([^HAVE_STDLIB_H$]) m4trace:configure.in:344: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) m4trace:configure.in:344: -1- m4_pattern_allow([^HAVE_MALLOC$]) m4trace:configure.in:344: -1- AH_OUTPUT([HAVE_MALLOC], [/* Define to 1 if your system has a GNU libc compatible `malloc\' function, and to 0 otherwise. */ @%:@undef HAVE_MALLOC]) m4trace:configure.in:344: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MALLOC]) m4trace:configure.in:344: -1- m4_pattern_allow([^HAVE_MALLOC$]) m4trace:configure.in:344: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS malloc.$ac_objext"]) m4trace:configure.in:344: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) m4trace:configure.in:344: -1- m4_pattern_allow([^LIB@&t@OBJS$]) m4trace:configure.in:344: -1- AC_LIBSOURCE([malloc.c]) m4trace:configure.in:344: -1- AC_DEFINE_TRACE_LITERAL([malloc]) m4trace:configure.in:344: -1- m4_pattern_allow([^malloc$]) m4trace:configure.in:344: -1- AH_OUTPUT([malloc], [/* Define to rpl_malloc if the replacement function should be used. */ @%:@undef malloc]) m4trace:configure.in:345: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS memcmp.$ac_objext"]) m4trace:configure.in:345: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) m4trace:configure.in:345: -1- m4_pattern_allow([^LIB@&t@OBJS$]) m4trace:configure.in:345: -1- AC_LIBSOURCE([memcmp.c]) m4trace:configure.in:346: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDLIB_H]) m4trace:configure.in:346: -1- AH_OUTPUT([HAVE_UNISTD_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_UNISTD_H]) m4trace:configure.in:346: -1- AH_OUTPUT([HAVE_SYS_PARAM_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_SYS_PARAM_H]) m4trace:configure.in:346: -1- AH_OUTPUT([HAVE_GETPAGESIZE], [/* Define to 1 if you have the `getpagesize\' function. */ @%:@undef HAVE_GETPAGESIZE]) m4trace:configure.in:346: -1- AC_DEFINE_TRACE_LITERAL([HAVE_GETPAGESIZE]) m4trace:configure.in:346: -1- m4_pattern_allow([^HAVE_GETPAGESIZE$]) m4trace:configure.in:346: -1- AC_DEFINE_TRACE_LITERAL([HAVE_MMAP]) m4trace:configure.in:346: -1- m4_pattern_allow([^HAVE_MMAP$]) m4trace:configure.in:346: -1- AH_OUTPUT([HAVE_MMAP], [/* Define to 1 if you have a working `mmap\' system call. */ @%:@undef HAVE_MMAP]) m4trace:configure.in:347: -1- AH_OUTPUT([HAVE_STDLIB_H], [/* Define to 1 if you have the header file. */ @%:@undef HAVE_STDLIB_H]) m4trace:configure.in:347: -1- AC_DEFINE_TRACE_LITERAL([HAVE_STDLIB_H]) m4trace:configure.in:347: -1- m4_pattern_allow([^HAVE_STDLIB_H$]) m4trace:configure.in:347: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) m4trace:configure.in:347: -1- m4_pattern_allow([^HAVE_REALLOC$]) m4trace:configure.in:347: -1- AH_OUTPUT([HAVE_REALLOC], [/* Define to 1 if your system has a GNU libc compatible `realloc\' function, and to 0 otherwise. */ @%:@undef HAVE_REALLOC]) m4trace:configure.in:347: -1- AC_DEFINE_TRACE_LITERAL([HAVE_REALLOC]) m4trace:configure.in:347: -1- m4_pattern_allow([^HAVE_REALLOC$]) m4trace:configure.in:347: -1- AC_SUBST([LIB@&t@OBJS], ["$LIB@&t@OBJS realloc.$ac_objext"]) m4trace:configure.in:347: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) m4trace:configure.in:347: -1- m4_pattern_allow([^LIB@&t@OBJS$]) m4trace:configure.in:347: -1- AC_LIBSOURCE([realloc.c]) m4trace:configure.in:347: -1- AC_DEFINE_TRACE_LITERAL([realloc]) m4trace:configure.in:347: -1- m4_pattern_allow([^realloc$]) m4trace:configure.in:347: -1- AH_OUTPUT([realloc], [/* Define to rpl_realloc if the replacement function should be used. */ @%:@undef realloc]) m4trace:configure.in:348: -1- _m4_warn([obsolete], [The macro `AC_TYPE_SIGNAL' is obsolete. You should run autoupdate.], [../../lib/autoconf/types.m4:746: AC_TYPE_SIGNAL is expanded from... configure.in:348: the top level]) m4trace:configure.in:348: -1- AC_DEFINE_TRACE_LITERAL([RETSIGTYPE]) m4trace:configure.in:348: -1- m4_pattern_allow([^RETSIGTYPE$]) m4trace:configure.in:348: -1- AH_OUTPUT([RETSIGTYPE], [/* Define as the return type of signal handlers (`int\' or `void\'). */ @%:@undef RETSIGTYPE]) m4trace:configure.in:349: -1- AH_OUTPUT([HAVE_VPRINTF], [/* Define to 1 if you have the `vprintf\' function. */ @%:@undef HAVE_VPRINTF]) m4trace:configure.in:349: -1- AC_DEFINE_TRACE_LITERAL([HAVE_VPRINTF]) m4trace:configure.in:349: -1- m4_pattern_allow([^HAVE_VPRINTF$]) m4trace:configure.in:349: -1- AC_DEFINE_TRACE_LITERAL([HAVE_DOPRNT]) m4trace:configure.in:349: -1- m4_pattern_allow([^HAVE_DOPRNT$]) m4trace:configure.in:349: -1- AH_OUTPUT([HAVE_DOPRNT], [/* Define to 1 if you don\'t have `vprintf\' but do have `_doprnt.\' */ @%:@undef HAVE_DOPRNT]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_FCHDIR], [/* Define to 1 if you have the `fchdir\' function. */ @%:@undef HAVE_FCHDIR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_GETCWD], [/* Define to 1 if you have the `getcwd\' function. */ @%:@undef HAVE_GETCWD]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_GETTIMEOFDAY], [/* Define to 1 if you have the `gettimeofday\' function. */ @%:@undef HAVE_GETTIMEOFDAY]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_MEMMOVE], [/* Define to 1 if you have the `memmove\' function. */ @%:@undef HAVE_MEMMOVE]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_MEMSET], [/* Define to 1 if you have the `memset\' function. */ @%:@undef HAVE_MEMSET]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_MKDIR], [/* Define to 1 if you have the `mkdir\' function. */ @%:@undef HAVE_MKDIR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_MUNMAP], [/* Define to 1 if you have the `munmap\' function. */ @%:@undef HAVE_MUNMAP]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_RMDIR], [/* Define to 1 if you have the `rmdir\' function. */ @%:@undef HAVE_RMDIR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRCHR], [/* Define to 1 if you have the `strchr\' function. */ @%:@undef HAVE_STRCHR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRDUP], [/* Define to 1 if you have the `strdup\' function. */ @%:@undef HAVE_STRDUP]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRERROR], [/* Define to 1 if you have the `strerror\' function. */ @%:@undef HAVE_STRERROR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRRCHR], [/* Define to 1 if you have the `strrchr\' function. */ @%:@undef HAVE_STRRCHR]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRTOUL], [/* Define to 1 if you have the `strtoul\' function. */ @%:@undef HAVE_STRTOUL]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRTOULL], [/* Define to 1 if you have the `strtoull\' function. */ @%:@undef HAVE_STRTOULL]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_ALPHASORT], [/* Define to 1 if you have the `alphasort\' function. */ @%:@undef HAVE_ALPHASORT]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_DIRFD], [/* Define to 1 if you have the `dirfd\' function. */ @%:@undef HAVE_DIRFD]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_LCHOWN], [/* Define to 1 if you have the `lchown\' function. */ @%:@undef HAVE_LCHOWN]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_LUTIMES], [/* Define to 1 if you have the `lutimes\' function. */ @%:@undef HAVE_LUTIMES]) m4trace:configure.in:350: -1- AH_OUTPUT([HAVE_STRSEP], [/* Define to 1 if you have the `strsep\' function. */ @%:@undef HAVE_STRSEP]) m4trace:configure.in:354: -1- AC_CONFIG_FILES([src/Makefile tests/Makefile]) m4trace:configure.in:355: -1- AC_SUBST([LIB@&t@OBJS], [$ac_libobjs]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([LIB@&t@OBJS]) m4trace:configure.in:355: -1- m4_pattern_allow([^LIB@&t@OBJS$]) m4trace:configure.in:355: -1- AC_SUBST([LTLIBOBJS], [$ac_ltlibobjs]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([LTLIBOBJS]) m4trace:configure.in:355: -1- m4_pattern_allow([^LTLIBOBJS$]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([top_builddir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([top_build_prefix]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([srcdir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([abs_srcdir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([top_srcdir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([abs_top_srcdir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([builddir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([abs_builddir]) m4trace:configure.in:355: -1- AC_SUBST_TRACE([abs_top_builddir]) fsvs-1.2.6/autom4te.cache/requests0000644000202400020240000000644312554717236016074 0ustar marekmarek# This file was generated by Autom4te Sun Aug 31 17:43:43 UTC 2014. # It contains the lists of macros which have been traced. # It can be safely removed. @request = ( bless( [ '0', 1, [ '/usr/share/autoconf' ], [ '/usr/share/autoconf/autoconf/autoconf.m4f', 'configure.in' ], { '_LT_AC_TAGCONFIG' => 1, 'AM_PROG_FC_C_O' => 1, 'LT_CONFIG_LTDL_DIR' => 1, '_AM_SUBST_NOTMAKE' => 1, 'm4_pattern_allow' => 1, 'AC_SUBST_TRACE' => 1, 'LT_INIT' => 1, '_AM_MAKEFILE_INCLUDE' => 1, 'AC_CANONICAL_HOST' => 1, 'AC_CONFIG_FILES' => 1, 'AM_INIT_AUTOMAKE' => 1, 'AM_PROG_F77_C_O' => 1, 'AM_CONDITIONAL' => 1, 'AC_FC_PP_DEFINE' => 1, 'AC_CONFIG_AUX_DIR' => 1, 'AM_MAINTAINER_MODE' => 1, 'AC_CANONICAL_BUILD' => 1, 'AC_FC_FREEFORM' => 1, 'AM_MAKEFILE_INCLUDE' => 1, 'AM_GNU_GETTEXT_INTL_SUBDIR' => 1, 'AC_SUBST' => 1, 'AC_LIBSOURCE' => 1, '_AM_COND_ELSE' => 1, 'sinclude' => 1, 'AC_FC_SRCEXT' => 1, 'AC_CANONICAL_SYSTEM' => 1, 'AC_FC_PP_SRCEXT' => 1, 'AC_REQUIRE_AUX_FILE' => 1, 'AC_CANONICAL_TARGET' => 1, 'AM_PROG_AR' => 1, 'AM_POT_TOOLS' => 1, 'm4_pattern_forbid' => 1, 'AM_SILENT_RULES' => 1, 'LT_SUPPORTED_TAG' => 1, 'AH_OUTPUT' => 1, 'AM_AUTOMAKE_VERSION' => 1, 'AM_PROG_LIBTOOL' => 1, 'm4_sinclude' => 1, 'AC_PROG_LIBTOOL' => 1, 'AC_CONFIG_HEADERS' => 1, '_AM_COND_ENDIF' => 1, 'AM_ENABLE_MULTILIB' => 1, 'AM_PATH_GUILE' => 1, 'AC_CONFIG_LINKS' => 1, '_m4_warn' => 1, 'AM_PROG_MOC' => 1, 'AC_DEFINE_TRACE_LITERAL' => 1, 'AM_NLS' => 1, 'AM_XGETTEXT_OPTION' => 1, 'AM_PROG_CC_C_O' => 1, 'include' => 1, 'm4_include' => 1, 'AC_CONFIG_SUBDIRS' => 1, 'AM_GNU_GETTEXT' => 1, 'AM_PROG_CXX_C_O' => 1, 'AC_CONFIG_LIBOBJ_DIR' => 1, '_AM_COND_IF' => 1, 'AC_INIT' => 1 } ], 'Autom4te::Request' ) ); fsvs-1.2.6/README0000644000202400020240000000713411100577645012342 0ustar marekmarek FSVS - a fast system versioning tool. http://fsvs.tigris.org Developer mailing list at dev@fsvs.tigris.org Users mailing list at users@fsvs.tigris.org (C)opyrights by philipp@marek.priv.at 2005-2008 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA What does it do? ---------------- FSVS is a backup/archival/versioning tool, which uses subversion backends for storage. This means that previous versions of all files are available in case of hardware problems, data loss, virus infections, user problems etc. FSVS is used to take snapshots of the current machine and restore them; all advanced operations (taking diffs, merging, etc.) should be done via some repository browser. FSVS runs currently on Linux, OpenBSD and OS X, and I think it works with Solaris, too - in short, UNIX should be fine. Why was it written? ------------------- Well, mostly to scratch an itch :-) Backup methods using rsync have no or very limited history, svn saves no metadata and needs double local storage, svk doesn't understand all file types and is (IMO) too slow for full system versioning. How is it used? --------------- Please take a look at subversion [1]'s documentation; subversion's libraries (and by implication apr [2]) are needed for operation. See also the subversion book [3]. First install subversion (and, by implication, apr). Next compile fsvs. cd src make And install the binary: (the man-pages are not automatically installed yet.) make install Make a repository somewhere, preferably on another machine. svnadmin create /path/to/repos Create a local directory for the "working copy administrative area". If you'd like to use another path, just set the environment variable WAA to it. mkdir -p /var/spool/fsvs /etc/fsvs Go to the base path for versioning: cd / Tell fsvs which URL it should use: fsvs url svn+ssh://username@machine/path/to/repos Define ignore patterns - all virtual filesystems (/proc, /sys, etc.), and (assuming that you're in / currently) the temporary files in /tmp: fsvs ignore DEVICE:0 ./tmp/* And you're ready to play! Check your data in: fsvs commit -m "First import" See the files in doc for more details; here, as (ordered) list: fsvs.1 - Manual page; describes FSVS' commands USAGE - Manual page in ASCII IGNORING - Why/how to ignore entries fsvs-url-format.5 - Detailed description of FSVS' URLs definitions fsvs-options.5 - Options for FSVS (command line, config file) fsvs-howto-backup.5 - A short HOWTO. These documents can be browsed in HTML on http://doc.fsvs-software.org/, too. (And they're a bit more readable there.) If it bails out with an error, I'd appreciate if you'd run the failing command with the option "-v" (verbose) and send the last lines to the developers mailing list; sometimes it may be necessary to see the complete debug log file, which you can get by using "-v -d". Notes/Links ----------- 1: http://subversion.tigris.org/ 2: http://apr.apache.org/ 3: http://svnbook.red-bean.com/ fsvs-1.2.6/Makefile0000644000202400020240000000120111014524362013077 0ustar marekmarekdefault-target: src/config.h @$(MAKE) --no-print-directory -C src %: @$(MAKE) --no-print-directory -C src $@ src/config.h: configure @echo '' @echo 'You have to run "./configure" before compiling, which might need' @echo 'some options depending on your system.' @echo '' @echo 'See "./configure --help" for a listing of the parameters.' @echo '' @false configure: configure.in @echo Generating configure. autoconf distclean: rm -f config.cache config.log config.status 2> /dev/null || true rm -f src/Makefile src/tags tests/Makefile 2> /dev/null || true rm -f src/config.h src/*.[os] src/.*.d src/fsvs 2> /dev/null || true fsvs-1.2.6/pre-1.2-migration/0000755000202400020240000000000012554717236014536 5ustar marekmarekfsvs-1.2.6/pre-1.2-migration/convert-ignores.pl0000644000202400020240000000141211436765403020212 0ustar marekmarek#!/usr/bin/perl -ni.bak use strict; our $counter; if ($. == 1) { # Header line print; $counter=$_+0; } else { # Normal line my $orig=$_; # Remove optional separators 1 while s{^([^/.]+),+}{$1}; my %fields; my $group="ignore"; $fields{"mode$2"}++ while s/^(\w*?)m(:\d+:\d+)/$1/; $group='take' while s/^(\w*?)t+/$1/; $fields{"dironly"}++ while s/^(\w*?)d+/$1/; $fields{"insens"}++ while s/^(\w*?)i+/$1/; # "./" or "/" at the start die "Ignore pattern '$orig' in $ARGV not converted, got '$_'" unless m{^\.?/}; print join(",", sort("group:$group", keys %fields), $_); die "Line numbering wrong" if !defined($counter) || $counter<1; $counter--; } if (eof) { close ARGV; die "Counter '$counter' wrong?" unless $counter==0; } # vim: set sw=2 ts=2 et fsvs-1.2.6/pre-1.2-migration/Makefile0000644000202400020240000000114511216125742016164 0ustar marekmarek BASEDIR := $(PWD) SCRIPT := $(BASEDIR)/convert-ignores.pl TESTDIR := $(BASEDIR)/in CMPDIR := $(BASEDIR)/exp TMPDIR := /tmp/fsvs-convert-test.d MAKEFLAGS += --no-print-directory all: single combined @echo All done. prep: @mkdir -p $(TMPDIR) @rm -f $(TMPDIR)/* || true @cp -a $(TESTDIR)/* $(TMPDIR)/ cmp: @rm -f $(TMPDIR)/*.bak @diff -au $(TMPDIR)/ $(CMPDIR) single: @$(MAKE) prep @echo Single @for file in $(wildcard $(TMPDIR)/*) ; do $(SCRIPT) $$file ; done @$(MAKE) cmp combined: @$(MAKE) prep @echo Combined @$(SCRIPT) $(wildcard $(TMPDIR)/*) @$(MAKE) cmp .PHONY: prep cmp single combined fsvs-1.2.6/pre-1.2-migration/exp/0000755000202400020240000000000012554717236015332 5ustar marekmarekfsvs-1.2.6/pre-1.2-migration/exp/simple0000644000202400020240000000017511216125742016536 0ustar marekmarek5 group:ignore,./etc/** group:ignore,/sys/** group:ignore,insens,/**.bak dironly,group:ignore,/**.tmp group:take,/**.pl fsvs-1.2.6/pre-1.2-migration/exp/mode0000644000202400020240000000017211216125742016166 0ustar marekmarek4 group:ignore,mode:0:0,/a group:ignore,mode:0:1,./b group:ignore,mode:2:99212,/c group:ignore,mode:1231312:99212,./d fsvs-1.2.6/pre-1.2-migration/exp/zz0000644000202400020240000000040511216125742015704 0ustar marekmarek7 group:take,mode:0:0,/a group:ignore,insens,mode:0:1,./b dironly,group:ignore,mode:2:99212,/c dironly,group:take,insens,mode:1231312:99212,./d dironly,group:ignore,insens,mode:0:1,./e group:take,insens,mode:0:0,/f,igdm:1:2 group:take,insens,/f,igdm:1:2 fsvs-1.2.6/pre-1.2-migration/exp/more0000644000202400020240000000014511216125742016204 0ustar marekmarek3 dironly,group:take,insens,/etc1 dironly,group:take,insens,/etc2 dironly,group:take,insens,/etc3 fsvs-1.2.6/pre-1.2-migration/exp/00000644000202400020240000000000211216125742015371 0ustar marekmarek0 fsvs-1.2.6/pre-1.2-migration/in/0000755000202400020240000000000012554717236015144 5ustar marekmarekfsvs-1.2.6/pre-1.2-migration/in/simple0000644000202400020240000000006211216125742016343 0ustar marekmarek5 ./etc/** /sys/** i/**.bak d/**.tmp t/**.pl fsvs-1.2.6/pre-1.2-migration/in/mode0000644000202400020240000000007011216125742015775 0ustar marekmarek4 m:0:0/a m:0:1./b m:2:99212,/c m:1231312:99212,./d fsvs-1.2.6/pre-1.2-migration/in/zz0000644000202400020240000000020611216125742015515 0ustar marekmarek7 t,m:0:0/a im:0:1./b d,m:2:99212,/c di,tm:1231312:99212,./d dd,i,m:0:1,,,,./e ttti,tt,t,m:0:0/f,igdm:1:2 ttti,tt,t/f,igdm:1:2 fsvs-1.2.6/pre-1.2-migration/in/more0000644000202400020240000000004011216125742016010 0ustar marekmarek3 tid/etc1 itd/etc2 dti/etc3 fsvs-1.2.6/pre-1.2-migration/in/00000644000202400020240000000000211216125742015203 0ustar marekmarek0 fsvs-1.2.6/example/0000755000202400020240000000000012554717236013116 5ustar marekmarekfsvs-1.2.6/example/README0000644000202400020240000000073510757622156014002 0ustar marekmarek This is an example setup for versioning /etc in debian-based systems. --- NO WARRANTY OF ANY KIND --- BEWARE! You might end up putting configuration files, that should be kept secret, into the repository! I defined filtering for the few I know - but you'd better check yourself! If you want to see the fruits of your versioning efforts, put yourself into the sysver group, and use subcommander or any other GUI you like. TODO: Write some better documentation. fsvs-1.2.6/example/etc/0000755000202400020240000000000012554717236013671 5ustar marekmarekfsvs-1.2.6/example/etc/apt/0000755000202400020240000000000012554717236014455 5ustar marekmarekfsvs-1.2.6/example/etc/apt/apt.conf.d/0000755000202400020240000000000012554717236016407 5ustar marekmarekfsvs-1.2.6/example/etc/apt/apt.conf.d/50fsvs-system-versioning0000644000202400020240000000013011002037671023136 0ustar marekmarekDPkg::Post-Invoke ""; DPkg::Post-Invoke:: "/var/lib/fsvs-versioning/scripts/commit.sh"; fsvs-1.2.6/example/etc/cron.daily/0000755000202400020240000000000012554717236015733 5ustar marekmarekfsvs-1.2.6/example/etc/cron.daily/fsvs-versioning0000644000202400020240000000011610757622156021015 0ustar marekmarek#!/bin/sh /var/lib/fsvs-versioning/scripts/commit.sh "Commit per cron.daily" fsvs-1.2.6/example/etc/fsvs/0000755000202400020240000000000012554717236014652 5ustar marekmarekfsvs-1.2.6/example/etc/fsvs/groups/0000755000202400020240000000000012554717236016171 5ustar marekmarekfsvs-1.2.6/example/etc/fsvs/groups/unreadable0000644000202400020240000000101011110214072020157 0ustar marekmarek# This is an example for a FSVS group definition file. # See fsvs(1) for more details. # # This file is used for unreadable files, ie. files without the others-read # bit set. # There are two main choices for them: # - ignore them # - or keep them versioned, but encrypted. # As long as the "ignore" line is present, the entries will be ignored. ignore # If you want to encrypt the data, you have to change the example key-ID to # the one you want to use. auto-prop fsvs:commit-pipe gpg -er root 0x12345678 fsvs-1.2.6/example/setup.sh0000755000202400020240000000444711110214072014577 0ustar marekmarek#!/bin/sh location=/var/lib/fsvs-versioning/repository scripts=/var/lib/fsvs-versioning/scripts group=sysver set -e set -x cd /etc # Ignore if group already exists addgroup $group || true if fsvs info > /dev/null 2>&1 then echo Already configured for /etc. else if ! svnlook info -r0 $location >/dev/null 2>&1 then # Keep the data secret mkdir -m 2770 -p $location # BDB is faster than FSFS, especially for many small files in # many revisions. svnadmin create --fs-type bdb $location # branches might not be needed, but tags could make sense. svn mkdir file://$location/trunk file://$location/tags -m "create trunk and tags" # We keep the directory structure 1:1, so it could easily be # moved to include the full system. # # Note: If we'd do the versioning at the root, we'd have either # to exclude everything except /etc (tricky, and error-prone), or # have some take pattern - but then new ignore patterns (by other # packages) couldn't simply be appended. svn mkdir file://$location/trunk/etc -m "create etc" chown 0.$group $location -R fi # Create local filelist, to make "fsvs ps" work. fsvs checkout file://$location/trunk/etc conf_path=`fsvs info . | grep Conf-Path | cut -f2 -d:` fsvs ignore '/etc/**.dpkg-old' '/etc/**.dpkg-new' '/etc/**.dpkg-dist' '/etc/**.dpkg-bak' fsvs ignore '/etc/**.bak' '/etc/**.old' '/etc/**~' '/**.swp' # easy to remake, no big deal (?) fsvs ignore '/etc/ssh/ssh_host_*key' # Not used? fsvs ignore /etc/apt/secring.gpg fsvs ignore /etc/mtab fsvs ignore /etc/ld.so.cache /etc/adjtime # Just compiled data? fsvs ignore '/etc/selinux/*.pp' # unknown whether that should be backuped. fsvs ignore '/etc/identd.key' fsvs ignore '/etc/ppp/*-secrets' fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/remove-password-line.pl ddclient.conf || true # Are there non-shadow systems? # fsvs ignore './shadow' './gshadow' fsvs ps fsvs:commit-pipe /var/lib/fsvs-versioning/scripts/shadow-clean.pl shadow gshadow # Match entries that are not world-readable. fsvs group 'group:unreadable,m:4:0' # Lock-files are not needed, are they? fsvs ignore './**.lock' './**.LOCK' # Should we commit the current ignore list? # fsvs commit -m "Initial import" # Should we ignore the "Urls" file changing? # Having it in shows which revision /etc was at. fi fsvs-1.2.6/example/var/0000755000202400020240000000000012554717236013706 5ustar marekmarekfsvs-1.2.6/example/var/lib/0000755000202400020240000000000012554717236014454 5ustar marekmarekfsvs-1.2.6/example/var/lib/fsvs-versioning/0000755000202400020240000000000012554717236017616 5ustar marekmarekfsvs-1.2.6/example/var/lib/fsvs-versioning/scripts/0000755000202400020240000000000012554717236021305 5ustar marekmarekfsvs-1.2.6/example/var/lib/fsvs-versioning/scripts/remove-password-line.pl0000755000202400020240000000016511016533454025717 0ustar marekmarek#!/usr/bin/perl while (<>) { # No substitution value, could be used wrongly s#^(\s*password\s*=).*#\1#; print; } fsvs-1.2.6/example/var/lib/fsvs-versioning/scripts/commit.sh0000755000202400020240000000072211051471142023115 0ustar marekmarek#!/bin/sh # So that the defined group can access the data umask 007 # In case the process calling apt-get had some paths defined, they might # not be what FSVS expects. # Re-set the defaults. export FSVS_CONF=/etc/fsvs export FSVS_WAA=/var/spool/fsvs/ # Possibly run this script or FSVS via env(1)? # Would clean *all* FSVS_* variables. # Tell the author as "apt", because we're called by apt-get. fsvs ci -o author=apt /etc -m "${1:-Auto-commit after dpkg}" -q fsvs-1.2.6/example/var/lib/fsvs-versioning/scripts/shadow-clean.pl0000755000202400020240000000031511002027000024153 0ustar marekmarek#!/usr/bin/perl # Replaces the password in shadow-like files # Keeps single-character values (for deactivated etc.) while (<>) { @f=split(/(:)/); $f[2]='-' if length($f[2]) > 1; print join("", @f); } fsvs-1.2.6/example/debian/0000755000202400020240000000000012554717236014340 5ustar marekmarekfsvs-1.2.6/example/debian/README0000644000202400020240000000051611345711452015211 0ustar marekmarekThe described files are part of a monitoring-etc-setup on a Debian/Ubuntu linuxhost. - ./apt.conf.d contains configuration for the apt-hook - ./cron.d contains a trigger for the fsvs cron-job - ./etc contains fsvs config files - ./etc/ssl, fsvs config file for access of secured repositories - ./ignore, a sample ignore ruleset fsvs-1.2.6/example/debian/etc/0000755000202400020240000000000012554717236015113 5ustar marekmarekfsvs-1.2.6/example/debian/etc/config0000644000202400020240000000002211332532272016261 0ustar marekmarekauthor=$SUDO_USER fsvs-1.2.6/example/debian/etc/ssl/0000755000202400020240000000000012554717236015714 5ustar marekmarekfsvs-1.2.6/example/debian/etc/ssl/servers0000644000202400020240000000033511332532272017315 0ustar marekmarek[groups] fsvs = fsvs.repository.host [fsvs] ssl-client-cert-file = /etc/ssl/private/newcert.p12 ssl-client-cert-password = 1k3kl0aU [global] ssl-authority-files = /etc/ssl/default/cacert.pem store-plaintext-passwords=yes fsvs-1.2.6/example/debian/apt.conf.d/0000755000202400020240000000000012554717235016271 5ustar marekmarekfsvs-1.2.6/example/debian/apt.conf.d/75fsvs0000644000202400020240000000007611332532272017341 0ustar marekmarekDpkg::Post-Invoke { "/usr/share/fsvs/scripts/apt-hook.py"; }; fsvs-1.2.6/example/debian/scripts/0000755000202400020240000000000012554717235016026 5ustar marekmarekfsvs-1.2.6/example/debian/scripts/apt-hook.py0000755000202400020240000000505011332532272020112 0ustar marekmarek#!/usr/bin/env python import sys, commands, subprocess from os import stat from os import path import string msg_prfx = 'fsvs-apt-hook_' def getLine(list): result = [] for i in list: if ('Removing' in i) or ('Setting up' in i) or ('Purging' in i) or ('Configuring' in i): line = string.replace(i, '\r', '') result.append(line) return result def getLastAptAction(): logfn = '/var/log/apt/term.log' try: FILE = open(logfn, 'r') except: print 'could not open file' lineList = FILE.readlines() length = len(lineList) FILE.close() result = [] curline = lineList[-1] if 'Log ended:' in curline: cond = False i = 1 while cond == False and (length-i)>0: i+=1 curline = lineList[length-i] if not 'Log started:' in curline: result.insert(1,curline) else: cond = True msg = getLine(result) msg.insert(0, msg_prfx + 'last-apt-action:\n') return(msg) def getDpkgFiles(): cmd = 'dpkg-deb --contents %s' % pkg_file print cmd try: out = commands.getoutput(cmd) except: print 'exception running %s' % cmd exit() list = string.split(out, '\n') print list[1] """ gets "fsvs st" state for working copy / """ def getFsvsStatus(): cmd = 'fsvs st /' out = commands.getoutput(cmd) list = string.split(out, '\n') return list def getConfigChanges(): list = getFsvsStatus() if len(list) > 0: print('The following is a list of files that are changed on dpkg-tasks:') for i in list: print i res = raw_input('Do you want to commit these files? (y/N)') if res.lower() == 'y': return True else: return False def ciConfigChanges(msg): ci_file = '/tmp/fsvs_cm' try: FILE = open(ci_file, 'w') except: print 'could not open file %s' % ci_file for line in msg: FILE.write(line) FILE.close() args =['fsvs', 'ci', '/', '-F', ci_file] res = subprocess.Popen(args) def checkFsvsEnviron(): """ check fsvs bin availability """ if not path.exists('/usr/bin/fsvs'): print msg_prfx + 'error: no instance of fsvs found' quit() """ check fsvs configuration """ cmd = 'fsvs / urls dump' if not len(commands.getoutput(cmd)) > 0: print msg_prfx + 'error: no urls defined for /' quit() """ check fsvs connectivitiy to repo """ cmd = 'fsvs / remote-status' if commands.getstatusoutput(cmd) == '1': print msg_prfx + 'error: no repo available' quit() if __name__ == '__main__': checkFsvsEnviron() commitmsg = getLastAptAction() if getConfigChanges(): ciConfigChanges(commitmsg) fsvs-1.2.6/example/debian/scripts/fsvs-cron0000644000202400020240000000113011436765113017660 0ustar marekmarek#!/bin/sh set -e FSVS_BIN=$(which fsvs) FSVS_OPTS="-ostop_change=true -odir_exclude_mtime=true -ofilter=mtime,text,owner,mode,group,new,deleted" if ! $FSVS_BIN st / $FSVS_OPTS;then echo "fsvs has detected changes in monitored directories." echo "" echo "changed files:" echo "---------------------------------------------------" echo "" $FSVS_BIN st / echo "" echo "user last logged in:" echo "---------------------------------------------------" echo "" last -n 3 echo "" echo "diff of the files changed:" echo "----------------------------------------------------" echo "" $FSVS_BIN diff / fi fsvs-1.2.6/example/debian/ignore0000644000202400020240000000026011436765113015537 0ustar marekmarekignore,m:004:000 ignore,/**.gz ignore,/**.bz2 ignore,/**.zip ignore,/**.rar ignore,/etc/fsvs ignore,/etc/resolv.conf ignore,/etc/mtab ignore,/etc/adjtime take,/etc/ ignore,/** fsvs-1.2.6/example/debian/cron.d/0000755000202400020240000000000012554717235015522 5ustar marekmarekfsvs-1.2.6/example/debian/cron.d/fsvs0000644000202400020240000000037511345711452016423 0ustar marekmarek# # Cron Job for fsvs # SHELL=/bin/sh MAILTO="" MAILFROM="From: FSVS Monitoring " MAILSUBJECT="fsvs file monitoring on localhost" # */1 * * * * root /usr/share/fsvs/scripts/fsvs-cron 2>&1 | mail -a "$MAILFROM" -s "$MAILSUBJECT" $MAILTO fsvs-1.2.6/CHANGES0000644000202400020240000000532412467104255012454 0ustar marekmarekChanges since 1.2.6 - Updates for GCC 5 Changes since 1.2.5 - Fix for segfault on deleted properties, eg. "svn:owner". - configure.in fix for OS X Lion with clang; thanks, Ryan! http://fsvs.tigris.org/issues/show_bug.cgi?id=16 - Removed nested functions, to make the stack non-executable (gcc needs trampoline code). See http://fsvs.tigris.org/issues/show_bug.cgi?id=17. Changes since 1.2.4 - Bugfix: auto-props not applied for explicitly specified entries. Thanks to Peter for the detailed bug report! Please note that the auto-props _only_ get applied if there are _no_ properties on an entry set (yet); so, after fsvs prop-set file property... the auto-props will _not_ be applied (as they might overwrite the manually set properties). Changes since 1.2.3 - Compilation fixes for MacOS 10.6; thanks, Thomas! - Added "password" option, as sent by Mark. Thank you! - Workarounds for gcc-4.5 and gcc-4.6 regressions. Thank you, Brian! - Compatibility with autoconf 2.68. Changes since 1.2.2 - Tried to get configuration/compilation to work with OSX 10.6. Thanks, Florian. - Fix for a stray "fstat64", which compilation for MacOSX10.4. Thank you, Mike. - Fix length calculation bug, found by Mark via a (bad?) compilation warning. Thank you! Changes since 1.2.1 - Documentation fixes. Thank you, Gunnar. - Fixed config_dir, so that using other authentication paths work. Previously $CONF/auth was fixed; better default. - Fix "unversion" on the wc root. - Fix "." as only parameter when started from the root. - Two compile fixes; thank you, Stan! - Solaris 10 compatibility fixes. Thank you, Stan! - Fix SIGPIPE handling. - Don't do the "_base" symlink; it breaks eg. "grep -r /etc". Write an readme instead. - Fix ENOMEM because of still mapped file data; thank you, Mark! - New option "dir_exclude_mtime". Thank you, Gunnar! Changes since 1.2.0 - Documentation updates. - Fixed some small bugs - The secondary URL/revision file doesn't have to exist. Thank you, Mark! - Fix recursive behaviour of "_build-new-list". - Now supports arbitrary "svn+" tunnels, like subversion does. Thank you, Jake. - "fsvs log -v" for now filters the changed entries list, and shows the paths relative to the parameter. - Fixed "-o verbose=all" output; would be interpreted as "totally silent" because of signed compares. - Better out-of-date messages. - Make 'ext-tests' work with debian /bin/sh => dash, too. - Compatibility fixes for subversion 1.6.4. - Fix tempfile being left after FSVS run. - Bugfix: on commit empty property hashes got created. Thank you, Bogdan. - Bugfix for selection of entries (filter bit) - Bugfixes for non-UTF8 locales and update/sync. Thank you, Gunnar. - Additional configure check for Solaris. Thank you, Mark. fsvs-1.2.6/src/0000755000202400020240000000000012554717235012251 5ustar marekmarekfsvs-1.2.6/src/options.c0000644000202400020240000004406111556526402014110 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include "global.h" #include "log.h" #include "interface.h" #include "options.h" #include "helper.h" #include "warnings.h" /** \file * Functions dealing with user settings. */ #define ENV_PREFIX "FSVS_" /* \addtogroup cmds * * \section options * * \code * fsvs options dump * fsvs options help * \endcode * * This command * * There's no \c load, as FSVS wouldn't know where to store the settings. * * Please see \ref options for the list of options, their values and * meanings. * * */ /** A structure to associate a string with an integer. */ struct opt___val_str_t { const char *string; int val; }; /** We don't use the value INT_MIN directly, because then we couldn't use * -1 for "everything." */ #define BITMAP_CLEAR (( (unsigned)INT_MIN) >> 1) #define BITMAP_CLEAR_MASK (BITMAP_CLEAR ^ (BITMAP_CLEAR >> 2)) /** Associate the path options with the enumerated value. * See also \ref o_opt_path */ const struct opt___val_str_t opt___path_strings[]= { { .val=PATH_PARMRELATIVE, .string="parameter"}, { .val=PATH_ABSOLUTE, .string="absolute"}, { .val=PATH_WCRELATIVE, .string="wcroot"}, { .val=PATH_CACHEDENVIRON, .string="environment"}, { .val=PATH_FULLENVIRON, .string="full-environment"}, { .string=NULL, } }; /** Log output strings and bits. */ const struct opt___val_str_t opt___log_output_strings[]= { { .val=LOG__OPT_COLOR, .string="color" }, { .val=LOG__OPT_INDENT, .string="indent" }, { .val=0, .string="normal" }, { .string=NULL, } }; /** Strings for auto/yes/no settings. * * Don't change the order without changing all users! */ const struct opt___val_str_t opt___yes_no[]= { { .val=OPT__YES, .string="yes" }, { .val=OPT__YES, .string="true" }, { .val=OPT__YES, .string="on" }, { .val=OPT__NO, .string="no" }, { .val=OPT__NO, .string="off" }, { .val=OPT__NO, .string="false" }, { .string=NULL, } }; /* Why doesn't this work?? */ const struct opt___val_str_t *opt___no_words = opt___yes_no+3; /** Filter strings and bits. * \ref glob_opt_filter. */ const struct opt___val_str_t opt___filter_strings[]= { { .val=FILTER__ALL, .string="any" }, { .val=FS_CHANGED | FS_NEW | FS_REMOVED, .string="text" }, { .val=FS_META_CHANGED, .string="meta" }, { .val=FS_META_MTIME, .string="mtime" }, { .val=FS_META_OWNER, .string="owner" }, { .val=FS_META_UMODE, .string="mode" }, { .val=FS_META_GROUP, .string="group" }, { .val=FS_NEW, .string="new" }, { .val=FS_CHANGED, .string="changed" }, { .val=FS_REMOVED, .string="deleted" }, { .val=FS_REMOVED, .string="removed" }, { .val=FS__CHANGE_MASK, .string="default" }, { .val=FS__CHANGE_MASK, .string="def" }, { .val=0, .string="none" }, { .string=NULL, } }; /** Change detection strings. * \ref o_chcheck. */ const struct opt___val_str_t opt___chcheck_strings[]= { { .val=0, .string="none" }, { .val=CHCHECK_FILE, .string="file_mtime" }, { .val=CHCHECK_DIRS, .string="dir" }, { .val=CHCHECK_ALLFILES, .string="allfiles" }, { .val=-1, .string="full" }, }; /** Verbosity strings * \ref o_verbose. */ const struct opt___val_str_t opt___verbosity_strings[]= { { .val=VERBOSITY_VERYQUIET | BITMAP_CLEAR, .string="none" }, { .val=VERBOSITY_VERYQUIET | BITMAP_CLEAR, .string="veryquiet" }, { .val=VERBOSITY_QUIET | BITMAP_CLEAR, .string="quiet" }, { .val=VERBOSITY_SHOWCHG, .string="changes" }, { .val=VERBOSITY_SHOWCHG, .string="status" }, { .val=VERBOSITY_SHOWSIZE, .string="size" }, { .val=VERBOSITY_SHOWNAME, .string="path" }, { .val=VERBOSITY_SHOWNAME, .string="name" }, { .val=VERBOSITY_SHOWTIME, .string="time" }, { .val=VERBOSITY_DEFAULT, .string="default" }, { .val=VERBOSITY_TOP_URL, .string="url" }, { .val=VERBOSITY_ALL_URLS | VERBOSITY_TOP_URL, .string="urls" }, { .val=VERBOSITY_COPYFROM, .string="copyfrom" }, { .val=VERBOSITY_GROUP, .string="group" }, { .val=VERBOSITY_STACKTRACE, .string="stack" }, { .val=VERBOSITY_STACKTRACE, .string="backtrace" }, { .val=VERBOSITY_STACKTRACE, .string="stacktrace" }, { .val=-1, .string="all" }, }; /** Delay action names. * See \ref o_delay. */ const struct opt___val_str_t opt___delay_strings[]= { { .val=DELAY_COMMIT, .string="commit" }, { .val=DELAY_UPDATE, .string="update" }, { .val=DELAY_REVERT, .string="revert" }, { .val=DELAY_CHECKOUT, .string="checkout" }, { .val=-1, .string="yes" }, { .val=0, .string="no" }, { .string=NULL, } }; /** Conflict resolution options. * See \ref o_conflict. */ const struct opt___val_str_t opt___conflict_strings[]= { { .val=CONFLICT_STOP, .string="stop" }, { .val=CONFLICT_LOCAL, .string="local" }, { .val=CONFLICT_REMOTE, .string="remote" }, { .val=CONFLICT_BOTH, .string="both" }, { .val=CONFLICT_MERGE, .string="merge" }, { .string=NULL, } }; /** \name Predeclare some functions. * @{ */ opt___parse_t opt___string2val; opt___parse_t opt___strings2bitmap; opt___parse_t opt___strings2empty_bm; opt___parse_t opt___store_string; opt___parse_t opt___store_env_noempty; opt___parse_t opt___normalized_path; opt___parse_t opt___parse_warnings; opt___parse_t opt___atoi; opt___parse_t opt___debug_buffer; /** @} */ /** * Must be visible, so that the inline function have direct accecss. * * As delimiter should \c '_' be used; as the comparision is done via * hlp__strncmp_uline_eq_dash(), the user can also use '-'. */ struct opt__list_t opt__list[OPT__COUNT]= { [OPT__PATH] = { .name="path", .i_val=PATH_PARMRELATIVE, .parse=opt___string2val, .parm=opt___path_strings, }, [OPT__LOG_MAXREV] = { .name="limit", .i_val=0, .parse=opt___atoi, }, [OPT__LOG_OUTPUT] = { .name="log_output", .i_val=LOG__OPT_DEFAULT, .parse=opt___strings2empty_bm, .parm=opt___log_output_strings, }, [OPT__COLORDIFF] = { .name="colordiff", .cp_val=NULL, .parse=opt___store_string, }, [OPT__DIR_SORT] = { .name="dir_sort", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__STATUS_COLOR] = { .name="stat_color", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__STOP_ON_CHANGE] = { .name="stop_change", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__DIR_EXCLUDE_MTIME] = { .name="dir_exclude_mtime", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__FILTER] = { .name="filter", .i_val=0, .parse=opt___strings2bitmap, .parm=opt___filter_strings, }, [OPT__CHANGECHECK] = { .name="change_check", .i_val=CHCHECK_FILE, .parse=opt___strings2bitmap, .parm=opt___chcheck_strings, }, [OPT__ALL_REMOVED] = { .name="all_removed", .i_val=OPT__YES, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__VERBOSE] = { .name="verbose", .i_val=VERBOSITY_DEFAULT, .parse=opt___strings2bitmap, .parm=opt___verbosity_strings, }, [OPT__DEBUG_OUTPUT] = { .name="debug_output", .cp_val=NULL, .parse=opt___store_string, }, [OPT__DEBUG_BUFFER] = { .name="debug_buffer", .i_val=0, .parse=opt___debug_buffer, }, [OPT__GROUP_STATS] = { .name="group_stats", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__CONFLICT] = { .name="conflict", .i_val=CONFLICT_MERGE, .parse=opt___string2val, .parm=opt___conflict_strings, }, [OPT__MERGE_PRG] = { .name="merge_prg", .cp_val="diff3", .parse=opt___store_string, }, [OPT__MERGE_OPT] = { .name="merge_opt", .cp_val="-m", .parse=opt___store_string, }, [OPT__DIFF_PRG] = { .name="diff_prg", .cp_val="diff", .parse=opt___store_string, }, [OPT__DIFF_OPT] = { .name="diff_opt", .cp_val="-pu", .parse=opt___store_string, }, [OPT__DIFF_EXTRA] = { .name="diff_extra", .cp_val=NULL, .parse=opt___store_string, }, [OPT__WARNINGS] = { .name="warning", .parse=opt___parse_warnings, }, [OPT__SOFTROOT] = { .name="softroot", .cp_val=NULL, .parse=opt___normalized_path, }, [OPT__MKDIR_BASE] = { .name="mkdir_base", .i_val=OPT__NO, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__COMMIT_TO] = { .name="commit_to", .cp_val=NULL, .parse=opt___store_string, }, [OPT__AUTHOR] = { .name="author", .cp_val="", .parse=opt___store_env_noempty, }, [OPT__PASSWD] = { .name="password", .cp_val="", .parse=opt___store_string, }, /* I thought about using opt___normalized_path() for these two; but that * would be a change in behaviour. */ [OPT__WAA_PATH] = { .name="waa", .parse=opt___store_string, /* Doing that here gives a warning "initializer not constant". .cp_val=DEFAULT_WAA_PATH, .i_val=strlen(DEFAULT_WAA_PATH), */ .cp_val=NULL, .i_val=0, }, [OPT__CONF_PATH] = { .name="conf", .parse=opt___store_string, /* Doing that here gives a warning "initializer not constant". .cp_val=DEFAULT_CONF_PATH, .i_val=strlen(DEFAULT_CONF_PATH), */ .cp_val=NULL, .i_val=0, }, [OPT__CONFIG_DIR] = { .name="config_dir", .parse=opt___store_string, .cp_val=NULL, .i_val=0, }, [OPT__EMPTY_COMMIT] = { .name="empty_commit", .i_val=OPT__YES, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__EMPTY_MESSAGE] = { .name="empty_message", .i_val=OPT__YES, .parse=opt___string2val, .parm=opt___yes_no, }, [OPT__DELAY] = { .name="delay", .i_val=OPT__NO, .parse=opt___strings2empty_bm, .parm=opt___delay_strings, }, [OPT__COPYFROM_EXP] = { .name="copyfrom_exp", .i_val=OPT__YES, .parse=opt___string2val, .parm=opt___yes_no, }, }; /** Get the debugbuffer size, round and test for minimum size. * The value is in KB; we round up to a 4kB size, and make it at least 8k. * A value of \c 0 means \b off. * */ int opt___debug_buffer(struct opt__list_t *ent, char *string, enum opt__prio_e prio UNUSED) { #ifndef ENABLE_DEBUGBUFFER int status; STOPIF(EINVAL, "!The debugbuffer option is not available, because\n" "fmemopen() was not found during compilation."); ex: return status; #else char *l; int i; i=strtol(string, &l, 0); if (*l) return EINVAL; if (i) { if (i<4) i=4; else /* Round up. */ i = (i+3) & ~3; i *= 1024; } ent->i_val=i; return 0; #endif } /** Get an integer value directly. */ int opt___atoi(struct opt__list_t *ent, char *string, enum opt__prio_e prio UNUSED) { char *l; ent->i_val=strtol(string, &l, 0); if (*l) return EINVAL; return 0; } /** Find an integer value by comparing with predefined strings. */ inline int opt___find_string(const struct opt___val_str_t *list, const char *string, int *result) { for(; list->string; list++) { if (strcmp(string, list->string) == 0) { *result = list->val; return 0; } } return EINVAL; } /** Set an integer value by comparing with some strings. */ int opt___string2val(struct opt__list_t *ent, char *string, enum opt__prio_e prio UNUSED) { int i; int status; STOPIF( opt___find_string(ent->parm, string, &i), NULL); ent->i_val=i; ex: return status; } /** Convert a string into a list of words, and \c OR their associated * values together. * With an association of \c 0, or if BITMAP_CLEAR is set, the value is * resetted. */ int opt___strings2bitmap(struct opt__list_t *ent, char *string, enum opt__prio_e prio UNUSED) { static const char delim[]=";,:/"; int status; int val, i; char buffer[strlen(string)+1]; char *cp; status=0; /* We make a local copy, so we can use strsep(). */ strcpy(buffer, string); string=buffer; val=ent->i_val; DEBUGP("Bitmap starting with 0x%X, from %s", val, string); while ( (cp=strsep(&string, delim)) ) { /* Return errors quietly. */ status=opt___find_string(ent->parm, cp, &i); if (status) goto ex; if (i == 0 || ((i & BITMAP_CLEAR_MASK) == BITMAP_CLEAR)) val=0; else val |= i; } DEBUGP("New bitmap is 0x%X", val); ent->i_val=val; ex: return status; } /** The same as opt___strings2bitmap(), but starting with a zero value on * each parsed value. * */ int opt___strings2empty_bm(struct opt__list_t *ent, char *string, enum opt__prio_e prio) { ent->i_val=0; return opt___strings2bitmap(ent, string, prio); } /** Simple store a copy of the string. */ int opt___store_string(struct opt__list_t *ent, char *string, enum opt__prio_e prio UNUSED) { int status; ent->i_val=strlen(string); /* strdup() would work, but count again. */ /* This initial write has to be done, so cast the const away. */ STOPIF( hlp__strnalloc(ent->i_val, (char**)&ent->cp_val, string), NULL); ex: return status; } /** Store a string, or expand a (non-empty) environment variable. */ int opt___store_env_noempty(struct opt__list_t *ent, char *string, enum opt__prio_e prio) { /* Not ideal - makes a copy of an environment variable, that * wouldn't be needed. */ if (string[0] == '$') string=getenv(string+1); if (!string || !*string) return 0; return opt___store_string(ent, string, prio); } /** Parse warning settings. */ int opt___parse_warnings(struct opt__list_t *ent, char *string, enum opt__prio_e prio) { int status; STOPIF( wa__split_process(string, prio), NULL); ex: return status; } /** -. * If the given priority is at least equal to the current value, parse the * strng and set the value. */ int opt__parse_option(enum opt__settings_e which, enum opt__prio_e prio, char *string) { int status; struct opt__list_t *ent; status=0; string=hlp__skip_ws(string); ent=opt__list+which; if (ent->prio <= prio) { STOPIF( ent->parse(ent, string, prio), "!Parsing value '%s' for option '%s' failed.", string, ent->name); ent->prio=prio; } ex: return status; } /** -. * If the \a value is \c NULL, try to split the \a key on a \c =. * Then find the matching option, and set its value (depending on the given * priority). */ int opt__parse(char *key, char *value, enum opt__prio_e prio, int quiet_errors) { int status; int klen; int i; status=0; /* Normalize. */ key=hlp__skip_ws(key); /* If no value given ... */ if (!value) { value=strchr(key, '='); STOPIF_CODE_ERR(!value, EINVAL, "!Cannot find value in string '%s'.", key); klen=value-key; value++; } else klen=strlen(key); while (klen && isspace(key[klen])) klen--; value=hlp__skip_ws(value); // DEBUGP("Got %*s=%s", klen, key, value); /* Find option. */ for(i=0; i0 && path[p] == PATH_SEPARATOR) path[p--]=0; if (p > 0) return opt___store_string(ent, path, prio); else return EINVAL; } /** -. * */ int opt__help(struct estat *root, int argc, char *argv[]) { return EBUSY; } /** -. * Invalid values are handled by returning \c 1, ie. they don't say \c off. * */ int opt__doesnt_say_off(const char *string) { int i; i=OPT__YES; if (opt___find_string(opt___yes_no+3, string, &i)) return 1; return i; } /** -. * * \todo Maybe the variable reading should be changed to use this code, and * loop via \c getenv() over all options? */ char *opt__variable_from_option(enum opt__settings_e which) { static char buffer[ sizeof(ENV_PREFIX) + sizeof(opt__list[0].name) + 1] = ENV_PREFIX; char * const target=buffer+strlen(ENV_PREFIX); int i; i=0; while ( (target[i] = toupper(opt__list[which].name[i])) ) i++; return buffer; } fsvs-1.2.6/src/add_unvers.c0000644000202400020240000001670011320470254014536 0ustar marekmarek/*********************************************************************** * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include "global.h" #include "add_unvers.h" #include "status.h" #include "ignore.h" #include "warnings.h" #include "est_ops.h" #include "url.h" #include "helper.h" #include "waa.h" /** \file * \ref add and \ref unversion action. * */ /** * \addtogroup cmds * * \section add * * \code * fsvs add [-u URLNAME] PATH [PATH...] * \endcode * * With this command you can explicitly define entries to be versioned, * even if they have a matching ignore pattern. * They will be sent to the repository on the next commit, just like * other new entries, and will therefore be reported as \e New . * * The \c -u option can be used if you're have more than one URL defined * for this working copy and want to have the entries pinned to the this * URL. * * \subsection add_ex Example * Say, you're versioning your home directory, and gave an ignore pattern * of ./.* to ignore all .* entries in your home-directory. * Now you want .bashrc, .ssh/config, and your complete * .kde3-tree saved, just like other data. * * So you tell fsvs to not ignore these entries: * \code * fsvs add .bashrc .ssh/config .kde3 * \endcode * Now the entries below .kde3 would match your earlier * ./.* pattern (as a match at the beginning is sufficient), * so you have to insert a negative ignore pattern (a \e take pattern): * \code * fsvs ignore prepend t./.kde3 * \endcode * Now a fsvs st would show your entries as * \e New , and the next commit will send them to the repository. * * */ /** * \addtogroup cmds * * \section unversion * * \code * fsvs unversion PATH [PATH...] * \endcode * * This command flags the given paths locally as removed. * On the next commit they will be deleted in the repository, and the local * information of them will be removed, but not the entries themselves. * So they will show up as \e New again, and you get another chance * at ignoring them. * * \subsection unvers_ex Example * * Say, you're versioning your home directory, and found that you no longer * want .bash_history and .sh_history versioned. * So you do * \code * fsvs unversion .bash_history .sh_history * \endcode * and these files will be reported as \c d (will be deleted, but only in the * repository). * * Then you do a * \code * fsvs commit * \endcode * * Now fsvs would report these files as \c New , as it does no longer know * anything about them; but that can be cured by * \code * fsvs ignore "./.*sh_history" * \endcode * Now these two files won't be shown as \e New , either. * * The example also shows why the given paths are not just entered as * separate ignore patterns - they are just single cases of a * (probably) much broader pattern. * * \note If you didn't use some kind of escaping for the pattern, the shell * would expand it to the actual filenames, which is (normally) not what you * want. * * */ /** \defgroup howto_add_unv Semantics for an added/unversioned entry * \ingroup userdoc * * Here's a small overview for the \ref add and \ref unversion actions. * * - Unversion: * The entry to-be-unversioned will be deleted from the repository and the * local waa cache, but not from disk. It should match an ignore pattern, * so that it doesn't get committed the next time. * - Add: * An added entry is required on commit - else the user told to store * something which does not exist, and that's an error. * * \section add_unvers_status Status display * * *
Exists in fs -> YES NO *
not seen before \c N \c - *
committed \c C, \c R \c D *
unversioned \c d \c d (or D?, or with !?) *
added \c n \c n (with !) *
* * * If an entry is added, then unversioned, we remove it completely * from our list. We detect that by the RF_NOT_COMMITTED flag. * Likewise for an unversioned, then added, entry. * * Please see also the \ref add command and the \ref unversion command. * */ /** General function for \ref add and \ref unversion actions. * This one really handles the entries. */ int au__action(struct estat *sts) { int status; int old; int mask=RF_ADD | RF_UNVERSION; STOPIF_CODE_ERR(!sts->parent, EINVAL, "!Using %s on the working copy root doesn't make sense.", action->name[0]); status=0; /* This should only be done once ... but as it could be called by others, * without having action->i_val the correct value, we check on every * call. * After all it's just two compares, and only for debugging ... */ BUG_ON( action->i_val != RF_UNVERSION && action->i_val != RF_ADD ); old=sts->flags & mask; /* We set the new value for output, and possibly remove the entry * afterwards. */ sts->flags = (sts->flags & ~mask) | action->i_val; DEBUGP("changing flags: has now %X", sts->flags); STOPIF( st__status(sts), NULL); /* If we have an entry which was added *and* unversioned (or vice versa), * but * 1) has never been committed, we remove it from the list; * 2) is a normal, used entry, we delete the flags. * * Should we print "....." in case 2? Currently we show that it's being * added/unversioned again. */ if (((sts->flags ^ old) & mask) == mask) { if (!sts->repos_rev) STOPIF( ops__delete_entry(sts->parent, sts, UNKNOWN_INDEX, UNKNOWN_INDEX), NULL); else sts->flags &= ~mask; } if (sts->flags & RF_ADD) { /* Get the group. */ STOPIF( ign__is_ignore(sts, &old), NULL); /* We don't want to know whether it's ignored, so we just discard the * ignore flag. */ STOPIF( ops__apply_group(sts, NULL, NULL), NULL); /* And we don't want to ignore it, even if ops__apply_group() only * found an ignore pattern, thank you so much. */ sts->to_be_ignored=0; } if ((sts->flags & mask) == RF_ADD) sts->url=current_url; ex: return status; } /** -. * */ int au__prepare_for_added(void) { int status; STOPIF( url__load_list(NULL, 0), NULL); STOPIF( url__mark_todo(), NULL); STOPIF_CODE_ERR( url__parm_list_used>1, EINVAL, "!At most a single destination URL may be given."); if (url__parm_list_used) { STOPIF(url__find_by_name(url__parm_list[0], ¤t_url), "!No URL with name \"%s\" defined.", url__parm_list[0]); DEBUGP("URL to add to: %s", current_url->url); } else current_url=NULL; /* We need the groups, to assign the auto-props. */ STOPIF( ign__load_list(NULL), NULL); ex: return status; } /** -. * */ int au__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; /* *Only* do the selected elements. */ opt_recursive=-1; /* Would it make sense to do "-=2" instead, so that the user could override * that and really add/unversion more than single elements? */ STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( au__prepare_for_added(), NULL); STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 0), NULL); STOPIF( waa__output_tree(root), NULL); ex: return status; } fsvs-1.2.6/src/update.h0000644000202400020240000000520211024410554013664 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __UPDATE_H__ #define __UPDATE_H__ #include "actions.h" /** \file * \ref update action header file. */ /** Main \ref update worker function. */ work_t up__work; /** Parse subversion properties for the given entry. */ int up__parse_prop(struct estat *sts, const char *utf8_name, const svn_string_t *utf8_value, int *not_handled, apr_pool_t *pool); /** Set the meta-data for this entry. */ int up__set_meta_data(struct estat *sts, const char *filename); /** \name The delta-editor functions. * These are being used for remote-status. */ /** @{ */ svn_error_t *up__set_target_revision(void *edit_baton, svn_revnum_t rev, apr_pool_t *pool); svn_error_t *up__open_root(void *edit_baton, svn_revnum_t base_revision, apr_pool_t *dir_pool UNUSED, void **root_baton); svn_error_t *up__add_directory(const char *path, void *parent_baton, const char *copy_path, svn_revnum_t copy_rev, apr_pool_t *dir_pool UNUSED, void **child_baton); svn_error_t *up__change_dir_prop(void *dir_baton, const char *name, const svn_string_t *value, apr_pool_t *pool UNUSED); svn_error_t *up__close_directory( void *dir_baton, apr_pool_t *pool); svn_error_t *up__absent_directory(const char *path, void *parent_baton, apr_pool_t *pool); svn_error_t *up__add_file(const char *path, void *parent_baton, const char *copy_path, svn_revnum_t copy_rev, apr_pool_t *file_pool, void **file_baton); svn_error_t *up__apply_textdelta(void *file_baton, const char *base_checksum, apr_pool_t *pool, svn_txdelta_window_handler_t *handler, void **handler_baton); svn_error_t *up__change_file_prop(void *file_baton, const char *name, const svn_string_t *value, apr_pool_t *pool); svn_error_t *up__close_file(void *file_baton, const char *text_checksum, apr_pool_t *pool UNUSED); svn_error_t *up__absent_file(const char *path, void *parent_baton, apr_pool_t *pool); svn_error_t *up__close_edit(void *edit_baton, apr_pool_t *pool); svn_error_t *up__abort_edit(void *edit_baton, apr_pool_t *pool); /** @} */ int up__handle_special(struct estat *sts, char *path, char *data, apr_pool_t *pool); int up__unlink(struct estat *sts, char *filename); int up__rmdir(struct estat *sts, struct url_t *url); int up__fetch_decoder(struct estat *sts); #endif fsvs-1.2.6/src/log.h0000644000202400020240000000136510756467655013222 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __LOG_H__ #define __LOG_H__ #include "actions.h" /** \file * \ref log action header file. */ /** Prints the given log messages. */ work_t log__work; /** \name Log option bits. * The colorized cg-log output is nice! * @{ */ #define LOG__OPT_COLOR (1) #define LOG__OPT_INDENT (2) /** @} */ #define LOG__OPT_DEFAULT (LOG__OPT_COLOR | LOG__OPT_INDENT) #endif fsvs-1.2.6/src/helper.h0000644000202400020240000001701211335522035013666 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __HELPER_H__ #define __HELPER_H__ #include #include "global.h" #include "options.h" /** \file * Helper functions header file. */ static const char hex_chars[] = "0123456789abcdef"; #define _bin2hex(b, h) do { \ *(h++) = hex_chars[*b >> 4]; \ *(h++) = hex_chars[*b & 0x0f]; \ b++; \ } while(0) #define Mbin2hex(bin, hex, len) do { \ switch(len) \ { \ case 4: _bin2hex(bin, hex); \ case 3: _bin2hex(bin, hex); \ case 2: _bin2hex(bin, hex); \ case 1: _bin2hex(bin, hex); \ case 0: break; \ default: \ { int i=len; \ while (i--) \ _bin2hex(bin, hex); \ } \ } \ \ *hex='\0'; \ } while(0) /** Converts a string with local encoding to \a UTF8, suitable for storage * in the repository. */ int hlp__local2utf8(const char* local_string, char** utf8_string, int len); /** Converts an \a UTF8 string to local encoding, to make it printable on * the * current console. */ int hlp__utf82local(const char* utf8_string, char** local_string, int len); void hlp__copy_stats(struct stat *src, struct sstat_t *dest); int hlp__lstat(const char *fn, struct sstat_t *st); int hlp__fstat(int fd, struct sstat_t *st); /** A function like \a strcpy, but cleaning up paths. */ char *hlp__pathcopy (char *dst, int *len, ...) __attribute__((sentinel)) ; /** Parses a string to a revision number. */ int hlp__parse_rev(char *stg, char **eos, svn_revnum_t *rev); /** Reads a string from the given \c input into the (self-managed) buffer * \a string, removes \\r and/or \\n at the end, and depending on \a flags * strips whitespace or comments. */ int hlp__string_from_filep(FILE *input, char **string, char **eos, int flags); /** If this bit is set, whitespace at the start and the end is removed. */ #define SFF_WHITESPACE (1) /** With this flag comment lines (with \c '#' as first non-whitespace * character) are ignored. */ #define SFF_COMMENT (2) /** Can/should be used after opening the file. */ #define SFF_RESET_LINENUM (0x4000) /** Get the line number of the last (current) \c FILE*, as return value. */ #define SFF_GET_LINENUM (0x8000) /** Returns the name of the given user. */ const char *hlp__get_uname(uid_t uid, char *not_found); /** Returns the name of the given group. */ const char *hlp__get_grname(gid_t gid, char *not_found); /** Print the given data to \a output, safely converting special characters * to codes like \c \\x1e. */ int hlp__safe_print(FILE *output, char *string, int maxlen); /** \name f_encoder Encoder and decoder * @{ */ /** Blocksize for encoding pipes; we use a not too small value. */ #define ENCODE_BLOCKSIZE (32*1024) /** Structure for an encoding process, with \c svn_stream_t input. * * When we shall give data, we have to feed data. * If not all data can be taken, we have to buffer the rest. * (We have to read some data, but don't know how much we can send * further down the chain - so we have to buffer).*/ struct encoder_t { /** Our datasource/sink. */ svn_stream_t *orig; /** Where to put the final md5. */ md5_digest_t *output_md5; /** The un-encoded data digest (context). */ apr_md5_ctx_t md5_ctx; /** How many bytes are left to send in this buffer. */ apr_size_t bytes_left; /** PID of child, for \c waitpid(). */ pid_t child; /** Whether we're writing or reading. */ int is_writer; /** STDIN filehandle for child. */ int pipe_in; /** STDOUT filehandle for child. */ int pipe_out; /** Whether we can get more data. */ int eof; /** Where unsent data starts. */ int data_pos; /** The buffer. */ char buffer[ENCODE_BLOCKSIZE]; }; /** Encode \c svn_stream_t filter. */ int hlp__encode_filter(svn_stream_t *s_stream, const char *command, int is_writer, char *path, svn_stream_t **output, struct encoder_t **encoder_out, apr_pool_t *pool); /** @} */ /** Chroot helper function. */ int hlp__chrooter(void); /** Distribute the environment variables on the loaded entries. */ int hlp__match_path_envs(struct estat *root); /** Return a path that gets displayed for the user. */ int hlp__format_path(struct estat *sts, char *wc_relative_path, char **output); /** Find a \c GID by group name, cached. */ int hlp__get_gid(char *group, gid_t *gid, apr_pool_t *pool); /** Find a \c UID by user name, cached. */ int hlp__get_uid(char *user, uid_t *uid, apr_pool_t *pool); /** Returns a string describing the revision number. */ char *hlp__rev_to_string(svn_revnum_t rev); /** Function to compare two strings for \a max bytes, but treating \c '-' * and \c '_' as equal. */ int hlp__strncmp_uline_eq_dash(char *always_ul, char *other, int max); /** \a name is a subversion internal property. */ int hlp__is_special_property_name(const char *name); /** Reads all data from \a stream and drops it. */ int hlp__stream_md5(svn_stream_t *stream, unsigned char md5[APR_MD5_DIGESTSIZE]); /** Delay until time wraps. */ int hlp__delay(time_t start, enum opt__delay_e which); /** Renames a local file to something like .mine. */ int hlp__rename_to_unique(char *fn, char *extension, const char **unique_name, apr_pool_t *pool); /* Some of the following functions use a (void*), but I'd rather use a * (void**) ... Sadly that isn't convertible from eg. some (struct **) - * and so I'd have to use casts everywhere, which wouldn't help the * type-safety anyway. */ /** Allocates a buffer in \a *dest, and copies \a source. */ int hlp__strnalloc(int len, char **dest, const char const *source); /** Like \c hlp__strnalloc, but concatenates strings until a \c NULL is * found. */ int hlp__strmnalloc(int len, char **dest, const char const *source, ...) __attribute__((sentinel)); /** Own implementation of \c strdup(), possibly returning \c ENOMEM. */ inline static int hlp__strdup(char **dest, const char const *src) { return hlp__strnalloc(strlen(src), dest, src); } /** Error returning \c calloc(); uses \c (void**) \a output. */ int hlp__calloc(void *output, size_t nmemb, size_t count); /** Reallocate the \c (void**) \a output. */ int hlp__realloc(void *output, size_t size); /** Allocates a buffer of \a len bytes in \c (void**) \a *dest; can return * \c ENOMEM. */ inline static int hlp__alloc(void *dest, size_t len) { *(void**)dest=NULL; return hlp__realloc(dest, len); } /** Stores the first non-whitespace character position from \a input in \a * word_start, and returns the next whitespace position in \a word_end. */ char* hlp__get_word(char *input, char **word_start); /** Skips all whitespace, returns first non-whitespace character. */ inline static char *hlp__skip_ws(char *input) { while (isspace(*input)) input++; return input; } /** Reads the subversion config file(s), found by \ref o_configdir. */ int hlp__get_svn_config(apr_hash_t **config); /** Algorithm for finding the rightmost 0 bit. * orig i: ... x 0 1 1 1 * i+1: ... x 1 0 0 0 * XOR gives: ... x 1 1 1 1 * AND i+1: ... 0 1 0 0 0 * * Maybe there's an easier way ... don't have "Numerical Recipes" * here with me. */ static inline int hlp__rightmost_0_bit(int i) { return (i ^ (i+1)) & (i+1); } int hlp__compare_string_pointers(const void *a, const void *b); int hlp__only_dir_mtime_changed(struct estat *sts); #endif fsvs-1.2.6/src/update.c0000644000202400020240000010140612152033674013671 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * \ref update action. * * When we've loaded the "old" version from disk, * we fetch the new values from the repository. * * \todo Could be made a bit faster. Either with multiple threads, or * changing the subversion API to get all text-base changes in full-text. * For a small change fsvs could query whole new trees with an "old" update. * */ /** \addtogroup cmds * * \section update * * \code * fsvs update [-r rev] [working copy base] * fsvs update [-u url@rev ...] [working copy base] * \endcode * * This command does an update on the current working copy; per default for * all defined URLs, but you can restrict that via \ref glob_opt_urls "-u". * * It first reads all filelist changes from the repositories, overlays them * (so that only the highest-priority entries are used), and then fetches * all necessary changes. * * * \subsection update_to_0 Updating to zero * * If you start an update with a target revision of zero, the entries * belonging to that URL will be removed from your working copy, and the * URL deleted from your URL list. \n * This is a convenient way to replace an URL with another. \n * * \note As FSVS has no full mixed revision support yet, it doesn't know * whether under the removed entry is a lower-priority one with the same * path, which should get visible now. \n * Directories get changed to the highest priority URL that has an entry * below (which might be hidden!). * * Because of this you're advised to either use that only for completely * distinct working copies, or do a \ref sync-repos (and possibly one or * more \ref revert calls) after the update. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "helper.h" #include "url.h" #include "status.h" #include "racallback.h" #include "props.h" #include "checksum.h" #include "revert.h" #include "warnings.h" #include "est_ops.h" #include "waa.h" #include "commit.h" #include "racallback.h" static char *filename, *filename_tmp=NULL; static unsigned tmp_len=0; /** Prefetch update-pipe property. * In case we're updating an existing file, we won't get \b all properties * sent - only changed. So we have to look for existing properties if we * need them. * */ int up__fetch_decoder(struct estat *sts) { int status, st2; hash_t db; datum value; db=NULL; status=0; /* Need it, but don't have it? */ if (!action->needs_decoder || sts->decoder) goto ex; status=prp__open_byestat(sts, GDBM_READER, &db); if (status == ENOENT) status=0; else { STOPIF(status, NULL); /* Currently we don't need all properties - we just read the ones we * know we'll need. */ if (prp__get(db, propval_updatepipe, &value) == 0) STOPIF( hlp__strdup( &sts->decoder, value.dptr), NULL); } ex: if (db) { st2=hsh__close(db, status); if (!status) STOPIF(st2, NULL); } return status; } /** -. * * If \a not_handled is not \c NULL, it gets set to \c 1 if this property * is \b not handled; so the caller knows that he has to to write the * property into some other storage if he wants to keep it. * * \note \a not_handled does \b not get set to \c 0; pre-populating is left * to the caller. * */ int up__parse_prop(struct estat *sts, const char *utf8_name, const svn_string_t *utf8_value, int *not_handled, apr_pool_t *pool) { char *cp, *loc_name, *loc_value; int i,status; apr_uid_t uid; apr_gid_t gid; apr_time_t at; svn_error_t *status_svn; /* We get the name and value in UTF8. * For the currently used properties it makes no difference; * but see doc/develop/UTF8. */ /* We need the localized name only for debug and error messages; * we still compare the utf8-name, and work with the utf8-data. */ STOPIF( hlp__utf82local(utf8_name, &loc_name, -1), NULL); status=0; if (!utf8_value) { /* A NULL here means that the property got deleted. * That mostly means that we cannot say whether the user * or group changed - the best we can do is not to crash. * Well, one case is special - if it's the propname_special * flag... */ DEBUGP("got NULL property for %s: %s", sts->name, loc_name); //goto ex; loc_value=NULL; } else { STOPIF( hlp__utf82local(utf8_value->data, &loc_value, -1), NULL); DEBUGP("got property for %s: %s=%s", sts->name, loc_name, loc_value); } /* if an invalid utf8_value is detected, we'd better ignore it. * who knows which Pandora's box we'd open ... */ if (0 == strcmp(utf8_name, propname_special) && 0 == strcmp(utf8_value->data, propval_special)) { if (!utf8_value) { sts->st.mode = (sts->st.mode & ~S_IFMT) | S_IFREG; DEBUGP("no longer special"); } else if (TEST_PACKED(S_ISANYSPECIAL, sts->new_rev_mode_packed)) { DEBUGP("already marked as special"); } else { /* Remove any S_IFDIR and similar bits, if it is not already marked * as a special entry. */ if (!(S_ISLNK(PACKED_to_MODE_T(sts->new_rev_mode_packed)) || S_ISCHR(PACKED_to_MODE_T(sts->new_rev_mode_packed)) || S_ISBLK(PACKED_to_MODE_T(sts->new_rev_mode_packed))) ) { sts->st.mode = (sts->st.mode & ~S_IFMT) | S_IFANYSPECIAL; sts->new_rev_mode_packed = MODE_T_to_PACKED(sts->st.mode); } DEBUGP("this is a special node"); } goto ex; } if (!utf8_value) goto ex; if (0 == strcmp(utf8_name, propname_owner)) { /* for user and group we try to find the user name, and fall back * to the uid. */ i=strtoul(utf8_value->data, &cp, 0); if (cp == utf8_value->data) STOPIF( wa__warn(WRN__META_USER_INVALID, EINVAL, "cannot read uid in %s", loc_value), NULL); else { cp=hlp__skip_ws(cp); if (*cp) { status=hlp__get_uid(cp, &uid, pool); if (status == APR_SUCCESS) i=uid; /* If not found, return no error to upper levels */ status=0; } if (sts->st.uid != i) { sts->remote_status |= FS_META_OWNER; if (!action->is_compare) { sts->st.uid = i; DEBUGP("marking owner %s to %d", loc_value, sts->st.uid); } } } } else if (0 == strcmp(utf8_name, propname_group)) { i=strtoul(utf8_value->data, &cp, 0); if (cp == utf8_value->data) STOPIF( wa__warn(WRN__META_USER_INVALID, EINVAL, "cannot read gid in %s", loc_value), NULL); else { cp=hlp__skip_ws(cp); if (*cp) { status=hlp__get_gid(cp, &gid, pool); if (status == APR_SUCCESS) i=gid; status=0; } if (sts->st.gid != i) { sts->remote_status |= FS_META_GROUP; if (!action->is_compare) { sts->st.gid = i; DEBUGP("marking group %s to %d", loc_value, sts->st.gid); } } } } else if (0 == strcmp(utf8_name, propname_mtime)) { status_svn=svn_time_from_cstring(&at, utf8_value->data, pool); if (status_svn) STOPIF( wa__warn(WRN__META_MTIME_INVALID, EINVAL, "modification time string invalid: %s", loc_value), NULL); else { if (sts->st.mtim.tv_sec != apr_time_sec(at) || sts->st.mtim.tv_nsec != apr_time_usec(at) * 1000) { sts->remote_status |= FS_META_MTIME; if (!action->is_compare) { /* Currently deactivated. Seems to make more problems than the * reverse behaviour. * -- Take the newer of the two timestamps. */ // if (apr_time_sec(at) >= sts->st.mtim.tv_sec) { sts->st.mtim.tv_sec=apr_time_sec(at); // if (apr_time_usec(at)*1000 > sts->st.mtim.tv_nsec) sts->st.mtim.tv_nsec=apr_time_usec(at) * 1000; } DEBUGP("marking mtime \"%s\" to %24.24s", loc_value, ctime(& (sts->st.mtim.tv_sec) )); } } } } else if (0 == strcmp(utf8_name, propname_umode)) { i=strtoul(utf8_value->data, &cp, 0); if (*cp || i>07777) STOPIF( wa__warn(WRN__META_UMASK_INVALID, EINVAL, "no valid permissions found in %s", loc_value), NULL); else { if ((sts->st.mode & 07777) != i) { sts->remote_status |= FS_META_UMODE; if (!action->is_compare) { sts->st.mode = (sts->st.mode & ~07777) | i; DEBUGP("marking mode \"%s\" to 0%o", loc_value, sts->st.mode & 07777); } } } } else if (0 == strcmp(utf8_name, propname_origmd5)) { /* Depending on the order of the properties we might not know whether * this is a special node or a regular file; so we only disallow that * for directories. */ BUG_ON(S_ISDIR(sts->st.mode)); STOPIF( cs__char2md5( utf8_value->data, NULL, sts->md5), NULL); DEBUGP("got a orig-md5: %s", cs__md5tohex_buffered(sts->md5)); sts->has_orig_md5=1; } else { if (strcmp(utf8_name, propval_updatepipe) == 0) { if (action->needs_decoder) { /* Currently we assume that programs (update- and commit-pipe) are * valid regardless of codeset; that wouldn't work as soon as the * programs' names includes UTF-8. * * \todo utf8->local?? */ STOPIF( hlp__strdup( &sts->decoder, utf8_value->data), NULL); sts->decoder_is_correct=1; DEBUGP("got a decoder: %s", sts->decoder); } } /* Ignore svn:entry:* properties, but store the updatepipe, too. */ if (!hlp__is_special_property_name(utf8_name)) { sts->remote_status |= FS_PROPERTIES; DEBUGP("property %s: %s=%s", sts->name, loc_name, loc_value); if (not_handled) *not_handled=1; } } ex: return status; #if 0 inval: /* possibly have a switch to just warn, but ignore invalid * values, so that an emergency can be 99% handled */ /* print path in case of errors? */ status=ops__build_path(&cp, sts); STOPIF(EINVAL, "incorrect utf8_value for property %s ignored: " "file %s: \"%s\"", name, status == 0 ? cp : sts->name, utf8_value->data); #endif } /** -. * Remove a (non-dir) file. * Must return errors silently. */ int up__unlink(struct estat *sts, char *filename) { int status; status=0; if (!filename) STOPIF( ops__build_path(&filename, sts), NULL ); /* If file has changed, we bail out */ if (sts->entry_status & FS_CHANGED) STOPIF_CODE_ERR(1, EBUSY, "File %s has been changed - won't remove", filename); if (unlink(filename) == -1) { status=errno; /* If it does not exist any more - should we warn?? */ if (status == ENOENT) status=0; } else { STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); STOPIF( waa__delete_byext(filename, WAA__PROP_EXT, 1), NULL); } DEBUGP("unlink(%s)", filename); ex: return status; } /** Recursively delete a directory structure. * Only non-changed, known entries, so we don't * remove changed data. * * If an entry does not exist (ENOENT), it is ignored. * * Only entries that are registered from URLs given on the command line * (with \c -u) are removed. * * If children that belong to other URLs are found we don't remove the * directory. * * \todo conflict */ int up__rmdir(struct estat *sts, struct url_t *url) { int status, i, has_others; struct estat *cur; char *path; status=0; has_others=0; /* Remove children */ for(i=0; ientry_count; i++) { cur=sts->by_inode[i]; if (url && cur->url != url) has_others++; else { /* Checking the contents of sts here is allowed, because this should * be the estat::old pointer - which should have the previous entries * in it. */ /* Just trying the unlink is a single system call, like getting the * type of the entry with \c lstat(). */ status=up__unlink(cur, NULL); if (status == EISDIR) status=up__rmdir(cur, url); STOPIF( status, "unlink of %s failed", cur->name); } } if (!has_others) { STOPIF( ops__build_path(&path, sts), NULL ); status = rmdir(path) == -1 ? errno : 0; DEBUGP("removing %s: %d", path, status); if (status == ENOENT) status=0; STOPIF( status, "Cannot remove directory %s", path); } ex: return status; } /** -. * * The file has current properties, which we'd like to replace with the * saved. But all not-set properties should not be modified. * * And all settings should be saved in the waa-area _with the current * values_, so that this entry won't be seen as modified. * * The easy way: * We set what we can, and do a stat() afterwards to capture the current * setting.\n * This has a small race condition: If another process changes the * meta-data \e after setting and \e before querying, we don't see that it * was changed. * * How it was done previously: * We stored a copy of the wanted things, and copy what we set.\n * So there's no race-condition, except that we change meta-data a process * has just changed. * * Since svn 1.3.0 we no longer get all properties on an update, * only these that are different to the reported version. * That means that most times we'll get only the mtime as changed. * * Now, if the file has a unix-mode other than \c 0600 or an owner which is * not equal to the current user, we wouldn't set that because the change * mask didn't tell to. \n * So the file would retain the values of the temporary file, which are * 0600 and the current user and group. * * The new strategy is: write all values. If there are no properties set * for a file, we'll just write the values it currently has - so no * problem. \n * With one exception: the ctime will change, and so we'll believe that * it has changed next time. So we fetch the \c real, current values * afterwards. * * Meta-data-only changes happen too often, see this case: * - We're at rev \c N. * - We commit an entry and get rev \c M for this entry. The * directory still has \c N, because there might be other new entries * in-between. * - We want to update to \c T. * - We're sending to subversion directory is at \c N, file is at * \c M, * - and we get back file has changed, properties ... * [svn:entry:committed-rev = \c M]. * Basically we're saying we have file at \c M, and get back changed, * last change happened at \c M. (Will file a bug report.) * * So we get a meta-data change, update the meta-data (currently - will * change that soon), and have another ctime (but don't update the entries' * meta-data), so find the entry as changed ... * * Current solution: read back the entries' meta-data after changing it. * * Another thought - if we have different meta-data locally, that's * possibly something worth preserving. If the owner has changed in the * repository \b and locally, we'd have to flag a conflict! * Furthermore the root entry gets no properties, so it gets set to owner * \c 0.0, mode \c 0600 ... which is not right either. */ int up__set_meta_data(struct estat *sts, char *filename) { struct timeval tv[2]; int status; mode_t current_mode; status=0; current_mode= PACKED_to_MODE_T(sts->new_rev_mode_packed); if (!filename) STOPIF( ops__build_path(&filename, sts), NULL ); DEBUGP_dump_estat(sts); /* We have a small problem here, in that we cannot change *only* the * user or group. It doesn't matter much; the problem case is that the * owner has changed locally, the repository gives us another group, * and we overwrite the owner. But still: TODO */ if (CHOWN_BOOL || !S_ISLNK(current_mode)) { if (sts->remote_status & (FS_META_OWNER | FS_META_GROUP)) { DEBUGP("setting %s to %d.%d", filename, sts->st.uid, sts->st.gid); status=CHOWN_FUNC(filename, sts->st.uid, sts->st.gid); if (status == -1) { STOPIF( wa__warn( errno==EPERM ? WRN__CHOWN_EPERM : WRN__CHOWN_OTHER, errno, "Cannot chown \"%s\" to %d:%d", filename, sts->st.uid, sts->st.gid), NULL ); } } } else { DEBUGP("a symlink, but no lchown: %s", filename); } /* A chmod or utimes on a symlink changes the *target*, not * the symlink itself. Don't do that. */ if (!S_ISLNK(current_mode)) { if (sts->remote_status & FS_META_UMODE) { /* The mode must be set after user/group. * If the entry has 07000 bits set (SGID, SUID, sticky), * they'd disappear after chown(). */ DEBUGP("setting %s's mode to 0%o", filename, sts->st.mode & 07777); status=chmod(filename, sts->st.mode & 07777); if (status == -1) { STOPIF( wa__warn( errno == EPERM ? WRN__CHMOD_EPERM : WRN__CHMOD_OTHER, errno, "Cannot chmod \"%s\" to 0%3o", filename, sts->st.mode & 07777 ), NULL ); } } } if (UTIMES_BOOL || !S_ISLNK(current_mode)) { if (sts->remote_status & FS_META_MTIME) { /* index 1 is mtime */ tv[1].tv_sec =sts->st.mtim.tv_sec; tv[1].tv_usec=sts->st.mtim.tv_nsec/1000; /* index 0 is atime. * It's not entirely correct that we set atime to mtime here, * but the atime is a volatile thing anyway ... */ tv[0].tv_sec =sts->st.mtim.tv_sec; tv[0].tv_usec=sts->st.mtim.tv_nsec/1000; DEBUGP("setting %s's mtime %24.24s", filename, ctime(& (sts->st.mtim.tv_sec) )); STOPIF_CODE_ERR( UTIMES_FUNC(filename, tv) == -1, errno, "utimes(%s)", filename); } } else { DEBUGP("a symlink, but no lutimes: %s", filename); } STOPIF( hlp__lstat(filename, & sts->st), NULL); ex: return status; } /** Handling non-file non-directory entries. * We know it's a special file, but not more; we have to take the filedata * and retrieve the type. * * After this call \c sts->st.mode and \c sts->new_rev_mode_packed are set * to the current value. */ int up__handle_special(struct estat *sts, char *path, char *data, apr_pool_t *pool) { int status; char *cp; STOPIF( ops__string_to_dev(sts, data, &cp), NULL); STOPIF( hlp__utf82local(cp, &cp, -1), NULL); /* As we got that from the repository ... */ sts->new_rev_mode_packed=sts->local_mode_packed; sts->stringbuf_tgt=NULL; DEBUGP("special %s has mode 0%o", path, sts->st.mode); /* Process the entry. */ /* The (& S_IFMT) is a no-op, because of the packed storage. */ switch (PACKED_to_MODE_T(sts->new_rev_mode_packed) & S_IFMT) { case S_IFBLK: case S_IFCHR: STOPIF_CODE_ERR( mknod(path, sts->st.mode, sts->st.rdev) == -1, errno, "mknod(%s)", path) ; break; case S_IFLNK: STOPIF_CODE_ERR( symlink(cp, path) == -1, errno, "symlink(%s, %s)", cp, path); break; default: STOPIF_CODE_ERR(1, EINVAL, "what kind of node is this??? (mode=0%o)", sts->st.mode); } ex: return status; } /* ---CUT--- here are the delta-editor functions */ svn_error_t *up__set_target_revision(void *edit_baton, svn_revnum_t rev, apr_pool_t *pool) { struct estat *sts=edit_baton; int status; status=0; /* It makes no sense to set all members to the new revision - * we may get new ones, and they wouldn't be set. * So do the whole tree at the end. */ target_revision=rev; sts->repos_rev=rev; RETURN_SVNERR(status); } svn_error_t *up__open_root(void *edit_baton, svn_revnum_t base_revision, apr_pool_t *dir_pool UNUSED, void **root_baton) { struct estat *sts=edit_baton; sts->repos_rev=base_revision; *root_baton=sts; return SVN_NO_ERROR; } svn_error_t *up__add_directory(const char *utf8_path, void *parent_baton, const char *utf8_copy_path, svn_revnum_t copy_rev, apr_pool_t *dir_pool UNUSED, void **child_baton) { struct estat *dir=parent_baton; struct estat *sts; int status; char* path; STOPIF( cb__add_entry(dir, utf8_path, &path, utf8_copy_path, copy_rev, S_IFDIR, NULL, 1, child_baton), NULL ); sts=(struct estat*)*child_baton; if (!action->is_compare) { /* this must be done immediately, because subsequent accesses may * try to add sub-entries. */ /* 0700 until overridden by property */ STOPIF_CODE_ERR( mkdir(path, 0700) == -1, errno, "mkdir(%s)", path); /* pre-fill data */ STOPIF( hlp__lstat(path, &(sts->st)), "lstat(%s)", path); } status=0; ex: RETURN_SVNERR(status); } svn_error_t *up__change_dir_prop(void *dir_baton, const char *utf8_name, const svn_string_t *value, apr_pool_t *pool) { struct estat *sts=dir_baton; int status; status=0; if (!sts->url || url__current_has_precedence(sts->url)) STOPIF( up__parse_prop(sts, utf8_name, value, NULL, pool), NULL); ex: RETURN_SVNERR(status); } svn_error_t *up__close_directory( void *dir_baton, apr_pool_t *pool) { struct estat *sts=dir_baton; int status; STOPIF( ops__build_path(&filename, sts), NULL); /* set meta-data */ STOPIF( up__set_meta_data(sts, filename), NULL); /* set correct values */ STOPIF( hlp__lstat( filename, &(sts->st)), "Cannot lstat('%s')", filename); /* finished, report to user */ STOPIF( st__status(sts), NULL); /* Mark this directory for being checked next time. */ sts->flags |= RF_CHECK; ex: RETURN_SVNERR(status); } /// FSVS GCOV MARK: up__absent_directory should not be executed svn_error_t *up__absent_directory(const char *utf8_path, void *parent_baton, apr_pool_t *pool) { struct estat *dir UNUSED =parent_baton; DEBUGP("in %s", __PRETTY_FUNCTION__); return SVN_NO_ERROR; } svn_error_t *up__add_file(const char *utf8_path, void *parent_baton, const char *utf8_copy_path, svn_revnum_t copy_rev, apr_pool_t *file_pool, void **file_baton) { struct estat *dir=parent_baton; int status; STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, copy_rev, S_IFREG, NULL, 1, file_baton), NULL); ex: RETURN_SVNERR(status); } svn_error_t *up__apply_textdelta(void *file_baton, const char *base_checksum, apr_pool_t *pool, svn_txdelta_window_handler_t *handler, void **handler_baton) { struct estat *sts=file_baton; svn_stream_t *svn_s_src, *svn_s_tgt; int status; char *cp; char* fn_utf8; apr_file_t *source, *target; struct encoder_t *encoder; svn_stringbuf_t *stringbuf_src; stringbuf_src=NULL; encoder=NULL; STOPIF( ops__build_path(&filename, sts), NULL); if (action->is_compare) { /* svn_stringbuf_create from a NULL pointer doesn't work - * we have to initialize it. */ cp=""; goto into_stringbufs; } STOPIF_CODE_ERR( sts->entry_status & FS_CHANGED, EBUSY, "file '%s' was changed locally and cannot be updated", filename); #if 0 /* If an entry was removed, and an update is issued - * should we restore the entry, or should we not? * We do that, because the repository says it should be here. */ STOPIF_CODE_ERR( sts->entry_status & FS_REMOVED, ENOENT, "file '%s' was deleted locally", filename); #endif status=strlen(filename)+10; if (status > tmp_len) { /* round to next kB */ status= (status+1024) & ~(1024-1); STOPIF( hlp__realloc( &filename_tmp, status), NULL); tmp_len=status; } strcpy(filename_tmp, filename); strcat(filename_tmp, ".up.tmp"); DEBUGP("target is %s (0%o),", filename, sts->st.mode); DEBUGP(" temp is %s", filename_tmp); if (!S_ISREG(sts->st.mode)) { /* special entries are taken into a svn_stringbuf_t */ if (S_ISLNK(sts->st.mode)) { STOPIF( ops__link_to_string(sts, filename, &cp), NULL); STOPIF( hlp__local2utf8(cp, &cp, -1), NULL); } else cp=ops__dev_to_filedata(sts); into_stringbufs: stringbuf_src=svn_stringbuf_create(cp, pool); sts->stringbuf_tgt=svn_stringbuf_create("", pool); svn_s_src=svn_stream_from_stringbuf(stringbuf_src, pool); svn_s_tgt=svn_stream_from_stringbuf(sts->stringbuf_tgt, pool); status=0; } else { /** \anchor FHP File handle pools. * * This is a bit complicated. * * With the file:/// protocol, the source and destination * filehandles are not closed by the subversion libraries; * with svn+ssh:/// they are. * * If we just do a apr_file_close(), we get the error EBADF * (bad filehandle), and would accordingly die. * * If we don't do it (and let apr_pool_cleanup close it), the * close may just fall into the next second, and our * (in up__close_file) cached ctime is wrong - so we'd mark this * entry as changed. * * One solution would be to do a apr_file_close(), and ignore EBADF; * this is a bit unclean. * * So we go the other route: we simply define a subpool, where we * allocate the handles in, and clear that later. * That has the additional advantage that the struct estat could * possibly be shrinked in the future. */ /* Please note that for svn+ssh the pool given to this function cannot * be used, as this is already destroyed by the time we get to * up__close_file, and an apr_pool_clear() then results in a segfault. * So we have to take the directories' pool. */ /* We take a subpool of the global pool; that takes (tested) nearly * resources, as it's destroyed in close_file(). */ STOPIF( apr_pool_create(&(sts->filehandle_pool), global_pool), "Creating the filehandle pool"); /* If the file is new, has changed or is removed, * we should get full-text, ie. a delta against the empty file. */ STOPIF( apr_file_open(&source, (sts->remote_status & (FS_NEW|FS_CHANGED|FS_REMOVED)) ? "/dev/null" : filename, APR_READ, 0, sts->filehandle_pool), NULL); /* Mode, owner etc. will be done at file_close. * We read if it's something special. */ STOPIF( apr_file_open(&target, filename_tmp, APR_WRITE | APR_CREATE | APR_TRUNCATE, APR_UREAD | APR_UWRITE, sts->filehandle_pool), NULL); svn_s_src=svn_stream_from_aprfile(source, sts->filehandle_pool); svn_s_tgt=svn_stream_from_aprfile(target, sts->filehandle_pool); /* How do we get the filesize here? */ if (!action->is_import_export) STOPIF( cs__new_manber_filter(sts, svn_s_tgt, &svn_s_tgt, sts->filehandle_pool), NULL); if (sts->decoder) { STOPIF( hlp__encode_filter(svn_s_tgt, sts->decoder, 1, filename, &svn_s_tgt, &encoder, sts->filehandle_pool), NULL); /* If the file gets decoded, use the original MD5 for comparision. */ encoder->output_md5= &(sts->md5); } } STOPIF( hlp__local2utf8(filename, &fn_utf8, -1), NULL ); svn_txdelta_apply(svn_s_src, svn_s_tgt, action->is_compare ? NULL : sts->md5, fn_utf8, pool, handler, handler_baton); sts->remote_status |= FS_CHANGED; ex: RETURN_SVNERR(status); } svn_error_t *up__change_file_prop(void *file_baton, const char *utf8_name, const svn_string_t *value, apr_pool_t *pool) { struct estat *sts=file_baton; int status; status=0; if (!sts->url || url__current_has_precedence(sts->url)) STOPIF( up__parse_prop(sts, utf8_name, value, NULL, pool), NULL); /* Ah yes, the famous "late property" sketch ... */ BUG_ON(sts->remote_status & FS_CHANGED, "Entry has already been fetched, properties too late!"); ex: RETURN_SVNERR(status); } svn_error_t *up__close_file(void *file_baton, const char *text_checksum, apr_pool_t *pool) { struct estat *sts=file_baton; int status; if (action->is_compare && text_checksum) { if (memcmp(text_checksum, sts->md5, sizeof(sts->md5)) != 0) sts->remote_status |= FS_CHANGED; } else { /* now we have a new md5 */ DEBUGP("close file (0%o): md5=%s", sts->st.mode, cs__md5tohex_buffered(sts->md5)); BUG_ON(!sts->st.mode); if (S_ISREG(sts->st.mode)) { status=0; /* See the comment mark FHP. */ /* This may be NULL if we got only property-changes, no file * data changes. */ if (sts->filehandle_pool) apr_pool_destroy(sts->filehandle_pool); sts->filehandle_pool=NULL; /* Now the filehandles should be closed. */ /* This close() before rename() is necessary to find out * if all data has been written (out of disk-space, etc). * Sadly we can't check for errors. */ } else { DEBUGP("closing special file"); sts->stringbuf_tgt->data[ sts->stringbuf_tgt->len ]=0; STOPIF( up__handle_special(sts, filename_tmp, sts->stringbuf_tgt->data, pool), NULL); } /* set meta-data */ STOPIF( up__set_meta_data(sts, filename_tmp), NULL); /* rename to correct filename */ STOPIF_CODE_ERR( rename(filename_tmp, filename)==-1, errno, "Cannot rename '%s' to '%s'", filename_tmp, filename); /* The rename changes the ctime. */ STOPIF( hlp__lstat( filename, &(sts->st)), "Cannot lstat('%s')", filename); } /* finished, report to user */ STOPIF( st__status(sts), NULL); ex: RETURN_SVNERR(status); } /// FSVS GCOV MARK: up__absent_file should not be executed svn_error_t *up__absent_file(const char *utf8_path, void *parent_baton, apr_pool_t *pool) { struct estat *dir UNUSED=parent_baton; DEBUGP("in %s", __PRETTY_FUNCTION__); return SVN_NO_ERROR; } svn_error_t *up__close_edit(void *edit_baton, apr_pool_t *pool) { struct estat *sts UNUSED=edit_baton; return SVN_NO_ERROR; } /// FSVS GCOV MARK: up__abort_edit should not be executed svn_error_t *up__abort_edit(void *edit_baton, apr_pool_t *pool) { struct estat *sts UNUSED=edit_baton; return SVN_NO_ERROR; } /* ---CUT--- end of delta-editor */ /* For locally changed files we have to tell the RA layer * that we don't have the original text, so that we get the full * text instead of a delta. */ int ac___up_set_paths(struct estat *dir, const svn_ra_reporter2_t *reporter, void *report_baton, apr_pool_t *pool) { int status, i; struct estat *sts; svn_error_t *status_svn; char *fn; status=0; for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; if (S_ISDIR(sts->st.mode)) STOPIF( ac___up_set_paths(sts, reporter, report_baton, pool), NULL); else if (sts->entry_status & (FS_CHANGED | FS_REMOVED)) { STOPIF( ops__build_path(&fn, sts), NULL ); DEBUGP(" changed: %s", fn); /* Again, we have to cut the "./" in front ... */ STOPIF_SVNERR( reporter->delete_path, (report_baton, fn+2, pool)); } } ex: return status; } /** Main update action. * * We do most of the setup before checking the whole tree. * * Please note that this is not atomic - use unionfs. */ int up__work(struct estat *root, int argc, char *argv[]) { int status; svn_error_t *status_svn; svn_revnum_t rev; time_t delay_start; status=0; status_svn=NULL; STOPIF( waa__find_base(root, &argc, &argv), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); STOPIF_CODE_ERR(!urllist_count, EINVAL, "There's no URL defined"); STOPIF( url__mark_todo(), NULL); STOPIF_CODE_ERR( argc != 0, EINVAL, "Cannot do partial updates!"); opt__set_int(OPT__CHANGECHECK, PRIO_MUSTHAVE, opt__get_int(OPT__CHANGECHECK) | CHCHECK_FILE); /* Do that here - if some other checks fail, it won't take so long * to notice the user */ STOPIF( waa__read_or_build_tree(root, argc, argv, argv, NULL, 0), NULL); while ( ! ( status=url__iterator(&rev) ) ) { if (rev == 0) STOPIF( cb__remove_url(root, current_url), NULL); else STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); if (action->is_compare) { /* This is for remote-status. Just nothing to be done. */ if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("Remote-status against revision\t%ld.\n", rev); } else { /* set new revision */ DEBUGP("setting revision to %llu", (t_ull)rev); STOPIF( ci__set_revision(root, rev), NULL); if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("Updating %s to revision\t%ld.\n", current_url->url, rev); } } STOPIF_CODE_ERR( status != EOF, status, NULL); status=0; if (action->is_compare) { } else { DEBUGP("fetching from repository"); STOPIF( rev__do_changed(root, global_pool), NULL); /* See the comment at the end of commit.c - atomicity for writing * these files. */ delay_start=time(NULL); STOPIF( waa__output_tree(root), NULL); STOPIF( url__output_list(), NULL); STOPIF( hlp__delay(delay_start, DELAY_UPDATE), NULL); } ex: STOP_HANDLE_SVNERR(status_svn); ex2: return status; } /* * The problem with update is this. * - We need to check the working copy for changes. * We have to do that to tell the svn layer which files to give us in full, * as we won't do anything with a delta stream (we don't have the common * ancestor). * - We don't need to know about new local entries; if we stored them, * we'd need to filter them out on waa__output_tree(). * (If we didn't filter them, they'd show up as already committed - so * we'd loose them for the next commit.) * And whether we do a getdents() while reading the directories or an * lstat() before writing doesn't matter that much. * - If we just did the tree update without new local files and write that * as current version in the WAA, we wouldn't find new entries that were * done *before* the update - the parent directories' time stamp would * be stored as the update time, and so we'd believe it to be unchanged. * * So what we do is * - we read the tree, but * - don't accept new local entries; * - directories that showed up as changed *before* the update get the * RF_CHECK flag set on up__open_directory(), so that they get read * on the next operations, too. * */ fsvs-1.2.6/src/sync.h0000644000202400020240000000127410756467655013414 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __SYNC_H__ #define __SYNC_H__ #include "actions.h" /** \file * \ref sync-repos action header file. */ /** Loads the directory structure from the repository. */ work_t sync__work; /** Prints the synchronization status and stats the (maybe existing) * local entries. */ action_t sync__progress; #endif fsvs-1.2.6/src/log.c0000644000202400020240000002665311264677022013206 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * Fetch log information from repository - \ref log command. * * */ /** \addtogroup cmds * * \section log * * \code * fsvs log [-v] [-r rev1[:rev2]] [-u name] [path] * \endcode * * This command views the revision log information associated with the * given \e path at its topmost URL, or, if none is given, the highest * priority URL. * * The optional \e rev1 and \e rev2 can be used to restrict the * revisions that are shown; if no values are given, the logs are given * starting from \c HEAD downwards, and then a limit on the number of * revisions is applied (but see the \ref o_logmax "limit" option). * * If you use the \ref glob_opt_verb "-v" -option, you get the files * changed in each revision printed, too. * * There is an option controlling the output format; see the \ref * o_logoutput "log_output option". * * Optionally the name of an URL can be given after \c -u; then the log of * this URL, instead of the topmost one, is shown. * * TODOs: * - \c --stop-on-copy * - Show revision for \b all URLs associated with a working copy? * In which order? * */ #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "est_ops.h" #include "waa.h" #include "url.h" #include "options.h" #include "log.h" #include "update.h" #include "racallback.h" #include "helper.h" #define MAX_LOG_OUTPUT_LINE (1024) //svn_log_message_receiver_t log__receiver; static int log___path_prefix_len, log___path_skip, log___path_parm_len; static char *log___path_prefix, *log___path_parm; int log___divider(FILE *output, char *color_after) { return -1 == fprintf(output, "%s" "------------------------------------" "------------------------------------\n" "%s", (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_COLOR) ? ANSI__BLUE : "", (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_COLOR) ? color_after : "") ? errno : 0; } /** The callback function for log messages. * The header and message body are printed in normal subversion format, * possibly with indenting and/or colorizing. * The filehandle for output must be given as \a baton. * * The various strings are (?) in UTF-8, so we have to convert them. */ svn_error_t *log__receiver(void *baton, apr_hash_t *changed_paths, svn_revnum_t revision, const char *author, const char *date, const char *message, apr_pool_t *pool) { static const char indent[]=" "; int status; int lines, len, cur, sol, i; const char *ccp; char *auth, *dat, *mess; FILE *output=stdout; apr_hash_index_t *hi; void const *name; apr_ssize_t namelen; char *local_name; int fn_count; char **filenames, *path_to_store; static const char ps[]={PATH_SEPARATOR, 0}; DEBUGP("got log for %llu", (t_ull)revision); /* It seems possible that message=NULL. */ if (!message) message="(No message.)"; /* count lines. */ ccp=message; lines=1; while ( (ccp=strchr(ccp, '\n')) ) lines++, ccp++; len = ccp+strlen(message) - message; DEBUGP("got %d lines", lines); /* Are these always in UTF-8 ? */ STOPIF( hlp__utf82local(author, &auth, -1), NULL); STOPIF( hlp__utf82local(date, &dat, -1), NULL); /* We don't do the message in a single piece, because that might be large. */ STOPIF( log___divider(output, ANSI__GREEN), NULL); /* Taken from a svn commit message. */ STOPIF_CODE_EPIPE( fprintf(output, "r%llu | %s | %s | %d line%s\n" "%s", (t_ull)revision, auth, dat, lines, lines == 1 ? "" : "s", (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_COLOR) ? ANSI__NORMAL : ""), NULL); /* Print optionally the filenames */ if (changed_paths) { STOPIF_CODE_EPIPE( fputs("Changed paths:\n", output), NULL); /* Prepare for sorting. */ fn_count=apr_hash_count(changed_paths); STOPIF( hlp__alloc( &filenames, sizeof(*filenames)*fn_count), NULL); i=0; hi=apr_hash_first(pool, changed_paths); while (hi) { apr_hash_this(hi, &name, &namelen, NULL); STOPIF( hlp__utf82local( name, &local_name, namelen), NULL); BUG_ON(i>=fn_count, "too many filenames in hash - count was %d", fn_count); DEBUGP("got path %s", local_name); if (strncmp(local_name, log___path_prefix, log___path_prefix_len) == 0) { path_to_store=local_name + log___path_prefix_len; switch (*path_to_store) { case 0: /* Hack to make the ++ right. */ path_to_store="x"; case PATH_SEPARATOR: path_to_store++; STOPIF( hlp__strmnalloc(1 + log___path_parm_len + strlen(path_to_store) + 1 + 3, filenames+i, log___path_parm, (log___path_parm_len>1 && *path_to_store && log___path_parm[ log___path_parm_len-1 ] != PATH_SEPARATOR) ? ps : "", path_to_store, NULL), NULL); i++; } } hi = apr_hash_next(hi); } BUG_ON(i>fn_count, "Wrong number of filenames in hash - count was %d", fn_count); fn_count=i; qsort(filenames, fn_count, sizeof(*filenames), hlp__compare_string_pointers); for(i=0; i0) { DEBUGP("todo %d bytes, \\x%02X; sol=%d", len, *message & 0xff, sol); if (sol && (opt__get_int(OPT__LOG_OUTPUT) & LOG__OPT_INDENT)) STOPIF_CODE_ERR( fputs(indent, output)==EOF, errno, NULL); cur= len <= MAX_LOG_OUTPUT_LINE ? len : MAX_LOG_OUTPUT_LINE; ccp=memchr(message, '\n', cur); if (ccp) cur=ccp-message+1; else if (cur == MAX_LOG_OUTPUT_LINE) { /* No newline, we need to split. */ /* Find a position where we can split the stream into valid * characters. * UTF-8 has defined that at most 4 bytes can be in a single * character, although up to 7 bytes could be used. We keep it * simple, and only look for a start character. */ /* We limit the loop to find invalid sequences earlier. */ for(i=8; i>=0 && cur > 0; i--) { cur--; /* No UTF-8 character (ie. 7bit ASCII) */ if ((message[cur] & 0x80) == 0 || /* or first character */ (message[cur] & 0xc0) == 0xc0) break; } STOPIF_CODE_ERR(i < 0, EILSEQ, "Invalid UTF8-sequence in log message for revision %llu found", (t_ull) revision); /* cur is now the index of the start character, and so equals the * number of bytes to process. */ } DEBUGP("log output: %d bytes", cur); STOPIF( hlp__utf82local(message, &mess, cur), NULL); STOPIF_CODE_EPIPE( fputs(mess, output), NULL); message+=cur; len-=cur; /* If we found a newline, we need to indent. * Is sol=!!ccp better? sol=ccp gives a warning, and sol=ccp!=NULL is * not nice, too. sol=(int)ccp gives warnings ... */ sol= ccp!=NULL; } STOPIF_CODE_EPIPE( putc('\n', output), NULL); ex: RETURN_SVNERR(status); } /** -. * * */ int log__work(struct estat *root, int argc, char *argv[]) { struct estat *sts; int status; svn_error_t *status_svn; char *path; apr_array_header_t *paths; int limit; char **normalized; const char *base_url; status_svn=NULL; STOPIF_CODE_ERR(argc>1, EINVAL, "!This command takes (currently) at most a single path."); /* Check for redirected STDOUT. */ if (!isatty(STDOUT_FILENO)) opt__set_int( OPT__LOG_OUTPUT, PRIO_PRE_CMDLINE, opt__get_int( OPT__LOG_OUTPUT) & ~LOG__OPT_COLOR); DEBUGP("options bits are %d", opt__get_int(OPT__LOG_OUTPUT)); STOPIF( waa__find_common_base( argc, argv, &normalized), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); STOPIF( waa__input_tree(root, NULL, NULL), NULL); if (argc) { STOPIF_CODE_ERR( argc>1, EINVAL, "!The \"log\" command currently handles only a single path."); STOPIF( ops__traverse(root, normalized[0], 0, 0, &sts), "!The entry \"%s\" cannot be found.", normalized[0]); log___path_parm_len=strlen(argv[0]); STOPIF( hlp__strnalloc(log___path_parm_len+2, &log___path_parm, argv[0]), NULL); } else { log___path_parm_len=0; log___path_parm=""; sts=root; } current_url=NULL; if (url__parm_list_used) { STOPIF_CODE_ERR(url__parm_list_used>1, EINVAL, "!Only a single URL can be given."); STOPIF( url__find_by_name(url__parm_list[0], ¤t_url), "!No URL with name \"%s\" found", url__parm_list[0]); } else { if (sts->url) current_url=sts->url; else { STOPIF_CODE_ERR(urllist_count>1, EINVAL, "!The given entry has no URL associated yet."); } } if (!current_url) current_url=urllist[0]; DEBUGP("doing URL %s", current_url->url); STOPIF( url__open_session(NULL, NULL), NULL); if (argc) { paths=apr_array_make(global_pool, argc, sizeof(char*)); STOPIF( ops__build_path(&path, sts), NULL); *(char **)apr_array_push(paths) = path+2; } else { paths=NULL; path="."; } /* Calculate the comparision string. */ STOPIF_SVNERR( svn_ra_get_repos_root2, (current_url->session, &base_url, global_pool)); /* |- current_url->url -| * |- repos root-| * http://base/url /trunk /relative/path/ cwd/entry... * |-- log_path_skip ---| * * sts->path_len would be wrong for the WC root. * */ log___path_prefix_len=current_url->urllen - strlen(base_url) + strlen(path)-1; STOPIF( hlp__strmnalloc( log___path_prefix_len+1+5, &log___path_prefix, current_url->url + strlen(base_url), /* Include the "/", but not the ".". */ sts->parent ? path+1 : NULL, NULL), NULL); DEBUGP("got %d: %s - %s; filter %s(%d, %d)", opt_target_revisions_given, hlp__rev_to_string(opt_target_revision), hlp__rev_to_string(opt_target_revision2), log___path_prefix, log___path_prefix_len, log___path_skip); /* To take the difference (for -rX:Y) we need to know HEAD. */ STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); STOPIF( url__canonical_rev(current_url, &opt_target_revision2), NULL); switch (opt_target_revisions_given) { case 0: opt_target_revision=SVN_INVALID_REVNUM; opt_target_revision2=1; STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); opt__set_int(OPT__LOG_MAXREV, PRIO_DEFAULT, 100); break; case 1: opt_target_revision2 = 1; opt__set_int(OPT__LOG_MAXREV, PRIO_DEFAULT, 1); break; case 2: opt__set_int(OPT__LOG_MAXREV, PRIO_DEFAULT, abs(opt_target_revision-opt_target_revision2)+1); break; default: BUG("how many"); } limit=opt__get_int(OPT__LOG_MAXREV); DEBUGP("log limit at %d", limit); status_svn=svn_ra_get_log(current_url->session, paths, opt_target_revision, opt_target_revision2, limit, opt__is_verbose() > 0, 0, // TODO: stop-on-copy, log__receiver, NULL, global_pool); if (status_svn) { if (status_svn->apr_err == -EPIPE) goto ex; STOPIF_SVNERR( status_svn, ); } STOPIF( log___divider(stdout, ANSI__NORMAL), NULL); ex: STOP_HANDLE_SVNERR(status_svn); ex2: return status; } fsvs-1.2.6/src/helper.c0000644000202400020240000014322712467104255013700 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "waa.h" #include "est_ops.h" #include "options.h" #include "interface.h" #include "checksum.h" #include "helper.h" #include "cache.h" /** \file * General helper functions. */ /* Must be behind global.h or at least config.h */ #ifdef HAVE_LOCALES #include #endif #ifdef HAVE_LOCALES /** Initializer for i18n operations. * \param from_charset Source charset * \param to_charset Destination charset. * \param cd iconv-handle */ int hlp___get_conv_handle(const char* from_charset, const char* to_charset, iconv_t* cd) { int status; status=0; *cd = iconv_open(to_charset, from_charset); STOPIF_CODE_ERR( *cd == (iconv_t)-1, errno, "Conversion from %s to %s is not supported", from_charset, to_charset); ex: return status; } /** Charset convert function. * Using a handle obtained with \a hlp___get_conv_handle() this function * dynamically allocates some buffer space, and returns the converted * data in it. * * \param cd A conversion handle * \param from The source string * \param to A pointer to the converted string. * \param len The maximum number of input characters to translate. * * If \a from is \a NULL, \a to is returned as \a NULL. * * A few buffers are used round-robin, so that the caller need not free * anything and the maximum memory usage is limited. * Normally only 1 or 2 buffers are "active", eg. file name for a symlink * and its destination, or source and destination for apply_textdelta. * * The destination string is always terminated with a \\0. * * \note If there's an irreparable conversion error, we must not print * (parts of) the strings. One or even both might not be suitable for * printing on the current console - so we do not know what could happen. */ static inline int hlp___do_convert(iconv_t cd, const char* from, char** to, int len) { static struct cache_t *cache; int status; char* to_buf; const char* from_buf; size_t srclen_rem, dstlen_rem; int iconv_ret, i, done; struct cache_entry_t *ent; STOPIF( cch__new_cache(&cache, 8), NULL); /* Input = NULL ==> Output = NULL */ if (!from) { *to = NULL; goto ex; } srclen_rem = len == -1 ? strlen(from)+1 : len; from_buf=from; STOPIF( cch__add(cache, 0, NULL, srclen_rem, &to_buf), NULL); ent=cache->entries[cache->lru]; /* Do the conversion. */ while(srclen_rem) { done=to_buf - ent->data; /* Check for buffer space; reallocate, if necessary. */ if (ent->len - done < srclen_rem) { /* Calculate increased buffer size. */ i=ent->len + 2*srclen_rem + 16; /* If we'd need less than 256 bytes, then get 256 bytes. * There's a good chance that we'll get longer file names; * we'll avoid a re-allocate, and it isn't that much memory. */ i = i < 256 ? 256 : i; STOPIF( cch__entry_set(cache->entries + cache->lru, 0, NULL, i, 1, &to_buf), NULL); ent=cache->entries[cache->lru]; /* Due to the reallocate, the buffer address may have * changed. Update our working pointer according to our * remembered position. */ to_buf=ent->data+done; } /* How much space is left? */ dstlen_rem=ent->len - done; /* Field precision and length cannot be given as long; let's hope they * are never that long :-) */ DEBUGP("before iconv from=%-*.*s", (int)srclen_rem, (int)srclen_rem, from_buf); /* iconv should have a const in it! */ iconv_ret = iconv(cd, (char**)&from_buf, &srclen_rem, &to_buf, &dstlen_rem); DEBUGP("after iconv to=%s ret=%d", ent->data, iconv_ret); /* Only allowed error is E2BIG. */ if (iconv_ret == -1) { /* We don't know which pointer has the local codeset, and even if we * did, we don't know if it's safe to print it. * After all, we got a conversion error - there may be invalid * characters in it. * "Hier seyen Drachen" :-] */ STOPIF_CODE_ERR( errno != E2BIG, errno, "Conversion of string failed. " "Next bytes are \\x%02X\\x%02X\\x%02X\\x%02X", srclen_rem>=1 ? from_buf[0] : 0, srclen_rem>=2 ? from_buf[1] : 0, srclen_rem>=3 ? from_buf[2] : 0, srclen_rem>=4 ? from_buf[3] : 0 ); /* We got E2BIG, so get more space. That should happen automatically * in the next round. */ } } /* Terminate */ *to_buf=0; /* Return */ *to=ent->data; DEBUGP("converted %*.*s to %s", len, len, from, *to); ex: /* reset the conversion. */ iconv(cd, NULL, NULL, NULL, NULL); return status; } /** Dummy converter function. * We need to honor the length parameter; but this function XOR the * hlp___do_convert() run, so only one cache is active. * * If the length is given as \c -1, the original string is returned. */ int hlp___dummy_convert(const char *input, char**output, int len) { int status; static struct cache_t *cache; status=0; if (!input) *output=NULL; else { if (len == -1) len=strlen(input)+1; STOPIF( cch__new_cache(&cache, 8), NULL); STOPIF( cch__add(cache, 0, input, len+1, output), NULL); (*output)[len]=0; } ex: return status; } /** -. * If \a len \c ==-1, a \c strlen() is done. * */ int hlp__local2utf8(const char *local_string, char** utf8_string, int len) { static iconv_t iconv_cd = NULL; int status; status=0; if (!local_codeset) STOPIF( hlp___dummy_convert(local_string, utf8_string, len), NULL); else { if (!iconv_cd) { STOPIF( hlp___get_conv_handle( local_codeset, "UTF-8", &iconv_cd), NULL); } STOPIF( hlp___do_convert(iconv_cd, local_string, utf8_string, len), NULL); } ex: return status; } /** -. * If \a len \c ==-1, a \c strlen() is done. * */ int hlp__utf82local(const char *utf8_string, char** local_string, int len) { static iconv_t iconv_cd = NULL; int status; status=0; if (!local_codeset) STOPIF( hlp___dummy_convert(utf8_string, local_string, len), NULL); else { /* Get a conversion handle, if not already done. */ if (!iconv_cd) { STOPIF( hlp___get_conv_handle( "UTF-8", local_codeset, &iconv_cd), NULL); } STOPIF( hlp___do_convert(iconv_cd, utf8_string, local_string, len), NULL); } ex: return status; } #else /* For safety return a copy. */ int hlp__local2utf8(const char *local_string, char** utf8_string, int len) { static struct cache_entry_t *c=NULL; return cch__entry_set( &c, 0, local_string, len, 0, utf8_string); } int hlp__utf82local(const char *utf8_string, char** local_string, int len) { return hlp__local2utf8(utf8_string, local_string, len); } #endif /** Small utility function to copy a system-defined struct \a stat into our * own struct \a sstat_t - which is much smaller. */ inline void __attribute__((always_inline)) hlp__copy_stats(struct stat *src, struct sstat_t *dest) { if (S_ISCHR(src->st_mode) || S_ISBLK(src->st_mode)) dest->rdev=src->st_rdev; else dest->size=src->st_size; dest->mode=src->st_mode; dest->dev=src->st_dev; dest->ino=src->st_ino; dest->uid=src->st_uid; dest->gid=src->st_gid; #ifdef HAVE_STRUCT_STAT_ST_MTIM dest->mtim=src->st_mtim; dest->ctim=src->st_ctim; #else dest->mtim.tv_sec=src->st_mtime; dest->mtim.tv_nsec=0; dest->ctim.tv_sec=src->st_ctime; dest->ctim.tv_nsec=0; #endif } /** \defgroup stat_wrap Stat()-Wrappers. * \ingroup perf * * These functions wrap the syscalls \a lstat() and \a fstat(), to return * the "normalized" 0 for success and an error number otherwise. * Furthermore they return their result in a struct \a sstat_t pointer. * * The copying done in these functions hurts a bit, but the space wasted * by the normal struct stat64 hurts much more. * GHz are cheap, memory doesn't scale as much. * * \note As we want to store symlinks as such, we never use \a stat() calls - * these would follow the symlinks and return the wrong meta-data. * */ /** @{ */ /** A wrapper for \a lstat(). */ int hlp__lstat(const char *fn, struct sstat_t *st) { int status; struct stat st64; status=lstat(fn, &st64); if (status == 0) { DEBUGP("%s: uid=%llu gid=%llu mode=0%llo dev=0x%llx " "ino=%llu rdev=0x%llx size=%llu", fn, (t_ull)st64.st_uid, (t_ull)st64.st_gid, (t_ull)st64.st_mode, (t_ull)st64.st_dev, (t_ull)st64.st_ino, (t_ull)st64.st_rdev, (t_ull)st64.st_size); /* FIFOs or sockets are never interesting; they get filtered out by * pretending that they don't exist. */ /* We should return -ENOENT here, so that higher levels can give * different error messages ... it might be confusing if "fsvs info * socket" denies some existing entry. */ if (S_ISFIFO(st64.st_mode) || S_ISSOCK(st64.st_mode) || S_ISDOOR(st64.st_mode)) { st64.st_mode = (st64.st_mode & ~S_IFMT) | S_IFGARBAGE; status=-ENOENT; } if (st) hlp__copy_stats(&st64, st); } else { status=errno; DEBUGP("stat %s: errno=%d", fn, errno); } return status; } /** A wrapper for \a fstat(). */ int hlp__fstat(int fd, struct sstat_t *st) { int status; struct stat st64; status=fstat(fd, &st64); if (status == 0) { hlp__copy_stats(&st64, st); DEBUGP("fd %d: uid=%d gid=%d mode=%o dev=%llx ino=%llu rdev=%llx size=%llu", fd, st->uid, st->gid, st->mode, (t_ull)st->dev, (t_ull)st->ino, (t_ull)st->rdev, (t_ull)st->size); } else { status=errno; DEBUGP("stat #%d: errno=%d", fd, errno); } return status; } /** @} */ static const char *src; static const char *src_1, *src_2, *src_3; static int eop; static va_list va; static const char null=0; static int status; void Increment() { src =src_1; src_1=src_2; src_2=src_3; /* Next character. */ if (*src_3) src_3++; /* If just prepared character in \c *src_3 is a \c \\0 , * wrap to next argument. * (If there are still arguments.) */ if (!eop) while (!*src_3) { src_3=va_arg(va, char*); if (src_3) DEBUGP("adding %s", src_3); else { /* End of parameters. Let the pointers point to a valid character * (instead of NULL), so that dereferencing works. */ eop=1; src_3=&null; break; } } } /** -. * \param dst A target buffer * \param len An optional length-saving field * \param ... A \c NULL -terminated list of pointers, which will be * concatenated. * \return \a dst as target buffer. * * This works like \c strcpy, but removes sequences like /./ and * //. * * \warning The buffer \a dst will be \b overwritten and \b has to have * enough space! * * If the first path has no PATH_SEPARATOR as first character, the * start_path is used as beginning - so that always full paths are * returned. * * Used for cases where we cannot run \a realpath() (as this follows * symlinks, something we don't want), but need to take care of * some strange pathnames. * * \note Not as easy as it sounds - try to do cases like * a//..//.///b/c//. * * For people reading JoelOnSoftware and related pages - yes, this is * cheating, but it works and is fast :-). * */ /* As we need to compare up to 4 characters "/../", we use 4 pointers, which * usually will point to neighbor characters. * That gives the compares easy access to characters splitted over several * arguments. */ char *hlp__pathcopy(char *dst, int *len, ...) { static const char ps[]={ PATH_SEPARATOR, 0 }; int had_path; char *dest; status=0; had_path=0; eop=0; va_start(va, len); /* dest is the working pointer, dst is needed to check for too many .. */ dest=dst; src_1=src_2=src_3=&null; /* Do first 4 characters */ Increment(); if (*src_3 != PATH_SEPARATOR) { strcpy(dest, start_path); dest+=start_path_len; /* If we have a PATH_SEPARATOR at the end (eg. we're in /), we have to * remove it. */ while (start_path_len>0 && dest[-1]==PATH_SEPARATOR) dest--; /* We need to fake a PATH_SEPARATOR into the stream; this bubbles up * and will be done in the loop. */ src_2=ps; } else { Increment(); } Increment(); Increment(); while (*src) { if (*src == PATH_SEPARATOR) { if (!had_path) *(dest++)=*src; Increment(); had_path=1; /* The next few checks are duplicated in ops__traverse */ if (*src == '.' && *src_1 == PATH_SEPARATOR) { /* Simply ignore the ".". The next PATH_SEPARATOR gets ignored by * the next round. */ Increment(); } else if (*src == '.' && *src_1 == 0) { /* We've got a "." as last parameter. Remove the last * PATH_SEPARATOR, and ignore the "." to stop the loop. */ /* But only if it's not something like "/.", ie. keep the first * PATH_SEPARATOR. */ if (dest-dst > 1) *(--dest)=0; Increment(); } else { if (*src == '.' && *src_1 == '.' && (*src_2 == PATH_SEPARATOR || *src_2 == 0) ) { Increment(); Increment(); /* Remove just written PATH_SEPARATOR - in case of a/b/../ we * have to remove the b as well.*/ dest[-1]=0; dest=strrchr(dst, PATH_SEPARATOR); /* Prevent too many "..". */ if (!dest) dest=dst; /* We re-write the PATH_SEPARATOR, so that following /./ are * correctly done. */ had_path=0; } } } else { *(dest++)=*src; Increment(); had_path=0; } #if 0 /* For debugging: */ dest[0]=0; DEBUGP("solution: %s", dst); DEBUGP("next: %s", src); #endif } /* Terminate! */ *dest=0; if (len) *len=dest-dst; DEBUGP("finished path is %s", dst); return dst; } /** -. * * Normally a number; currently a special case is recogniced, namely \c * HEAD . * * If the parameter \c eos is not \c NULL, it gets set to the character * behind the parsed part, which is ignored. * If it is \c NULL, the string must end here. */ int hlp__parse_rev(char *stg, char **eos, svn_revnum_t *rev) { static const char head[]="HEAD"; int status; int inval; char *end; status=0; if (strncasecmp(stg, head, strlen(head)) == 0) { *rev=SVN_INVALID_REVNUM; end=stg+strlen(head); } else *rev=strtoull(stg, &end, 10); inval = opt_target_revision == 0; if (eos) *eos=end; else inval |= (stg == end) || (*end != 0); STOPIF_CODE_ERR( inval, EINVAL, "The given revision argument '%s' is invalid", stg); ex: return status; } /** -. * Has a few buffers for these operations. * * The cache is statically allocated, as we cannot return \c ENOMEM. * * If we cannot store a cache during querying, we'll return the value, but * forget that we already know it. * * \todo Keep most-used? */ const char *hlp__get_grname(gid_t gid, char *not_found) { struct group *gr; static struct cache_t cache = { .max=CACHE_DEFAULT }; char *str; if (cch__find(&cache, gid, NULL, &str, NULL) == 0) return *str ? str : not_found; gr=getgrgid(gid); cch__add(&cache, gid, gr ? gr->gr_name : "", -1, &str); return *str ? str : not_found; } /** -. * Has a few buffers for these operations; the cache is statically * allocated, as we cannot return \c ENOMEM. * * If we cannot store a cache during querying, we'll return the value, but * forget that we already know it. * */ const char *hlp__get_uname(uid_t uid, char *not_found) { struct passwd *pw; static struct cache_t cache = { .max=CACHE_DEFAULT }; char *str; if (cch__find(&cache, uid, NULL, &str, NULL) == 0) return *str ? str : not_found; pw=getpwuid(uid); cch__add(&cache, uid, pw ? pw->pw_name : "", -1, &str); return *str ? str : not_found; } /** -. * Uses a simple hash function. */ int hlp__get_uid(char *user, uid_t *uid, apr_pool_t *pool) { static struct cache_t *cache=NULL; int status; apr_gid_t a_gid; /* Needed for 64bit type-conversions. */ cache_value_t cv; STOPIF( cch__new_cache(&cache, 64), NULL); if (cch__hash_find(cache, user, &cv) == ENOENT) { status=apr_uid_get(uid, &a_gid, user, pool); if (status) status=ENOENT; else { cv=*uid; STOPIF( cch__hash_add(cache, user, cv), NULL); } } else *uid=(uid_t)cv; ex: return status; } /** -. * Uses a simple hash function. */ int hlp__get_gid(char *group, gid_t *gid, apr_pool_t *pool) { static struct cache_t *cache=NULL; int status; /* Needed for 64bit type-conversions. */ cache_value_t cv; STOPIF( cch__new_cache(&cache, 64), NULL); if (cch__hash_find(cache, group, &cv) == ENOENT) { status=apr_gid_get(gid, group, pool); if (status) status=ENOENT; else { cv=*gid; STOPIF( cch__hash_add(cache, group, cv), NULL); } } else *gid=(gid_t)cv; ex: return status; } #define STRING_LENGTH (4096) /** -. * Returns 0 for success, \c EOF for no more data. * * Empty lines (only whitespace) are ignored (but counted). * * If \a no_ws is set, the returned pointer has whitespace at beginning * and end removed; \c \\r and \c \\n at the end are always removed. * * Only a single statically allocated buffer is used, so the line has to be * copied if needed over several invocations. * * \a eos is set to the last non-whitespace character in the line. */ int hlp__string_from_filep(FILE *input, char **string, char **eos, int flags) { int status; static char *buffer=NULL; static unsigned linenum; char *start; int i; status=0; if (flags & SFF_RESET_LINENUM) linenum=0; if (flags & SFF_GET_LINENUM) return linenum; if (!input) goto ex; /* We impose a hard limit for simplicities' sake. */ if (!buffer) STOPIF( hlp__alloc( &buffer, STRING_LENGTH), NULL); while (1) { start=NULL; linenum++; if (!fgets(buffer, STRING_LENGTH, input)) { /* fgets() returns NULL at EOF or on error; feof() * is (reliably) set only after \c fgets(). */ if (feof(input)) { status=EOF; goto ex; } status=errno; goto ex; } start=buffer; if (flags & SFF_WHITESPACE) start=hlp__skip_ws(start); if ((flags & SFF_COMMENT) && *start == '#') continue; i=strlen(start)-1; /* Remove the \n at the end. For DOS-CRLF pairs we must use this test * order. */ if (i > 0 && start[i] == '\n') i--; if (i > 0 && start[i] == '\r') i--; /* Always remove the \r|\n at the end; other whitespace is optionally * removed. */ start[i+1]=0; while (i>=0 && isspace(start[i])) i--; if (flags & SFF_WHITESPACE) /* i is now in [-1 ... ] */ start[i+1]=0; if (eos) *eos=start+i+1; if (*start) break; } if (start) DEBUGP("read string %s", start); *string = start; ex: return status; } /** -. * Some special values are recogniced - eg. \c \\r, \c \\n. */ int hlp__safe_print(FILE *output, char *string, int maxlen) { static const char to_encode[32]= { '0', 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 0, 'r', 0, 0, 'f', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; int cur; int status; status=0; /* No need to optimize here and change that into a single compare. * After all, we're doing IO. */ while (status>=0 && maxlen>0) { /* We could mark the string as "unsigned", but that would collide with many * other definitions. See linus' post Jan. 2007 on linux-kernel about the * "braindead compiler". */ cur=(*string) & 0xff; string++; maxlen--; if (cur == 0x7f) { STOPIF_CODE_EPIPE( fputs("\\x7f", output), NULL); continue; } /* Thanks to UTF-8, we need to take care only for the control characters. * The things above 0x80 are needed. */ if (curis_writer) { poll_data.events=POLLOUT; poll_data.fd=encoder->pipe_in; } else { poll_data.events=POLLIN; poll_data.fd=encoder->pipe_out; } status=0; /* We cannot wait indefinitely, because we don't get any close event yet. * */ STOPIF_CODE_ERR( poll(&poll_data, 1, 100) == -1, errno, "Error polling for data"); ex: return status; } /** Writer function for an encoder. * We have to write the full buffer through the pipe before returning - * possibly in some chunks. * */ svn_error_t *hlp___encode_write(void *baton, const char *data, apr_size_t *len) { int status; svn_error_t *status_svn; int write_pos, bytes_left; struct encoder_t *encoder=baton; apr_size_t wlen; status=0; write_pos=0; bytes_left= len ? *len : 0; /* If we get data==NULL, we go on until we get an eof. */ while (data ? (bytes_left || encoder->bytes_left) : !encoder->eof) { /* Try to give the child process some data. */ if (bytes_left) { status=send(encoder->pipe_in, data+write_pos, bytes_left, MSG_DONTWAIT); DEBUGP("sending %d bytes to child %d from %d: %d; %d", bytes_left, encoder->child, write_pos, status, errno); if (status == -1) { status=errno; if (status == EAGAIN) { /* Just wait, we might be able to send that later. */ } else STOPIF(status, "Error writing to child"); } else { /* Data sent. */ write_pos+=status; bytes_left-=status; status=0; DEBUGP("%d bytes left", bytes_left); } } /* --- Meanwhile the child process processes the data --- */ if (encoder->pipe_out != -1) { /* Try to read some data. */ status = recv(encoder->pipe_out, encoder->buffer, sizeof(encoder->buffer), MSG_DONTWAIT); DEBUGP("receiving bytes from child %d: %d; %d", encoder->child, status, errno); if (status==0) { STOPIF_CODE_ERR( close(encoder->pipe_out) == -1, errno, "Cannot close connection to child"); DEBUGP("child %d finished", encoder->child); encoder->pipe_out=EOF; encoder->eof=1; } else { if (status == -1) { status=errno; if (status == EAGAIN) { /* Just wait, we might get data later. */ status=0; } else STOPIF(status, "Error reading from child"); } else { apr_md5_update(&encoder->md5_ctx, encoder->buffer, status); encoder->bytes_left=status; encoder->data_pos=0; /* No error, got so many bytes ... Send 'em on. */ status=0; } } } if (encoder->bytes_left) { wlen=encoder->bytes_left; STOPIF_SVNERR( svn_stream_write, (encoder->orig, encoder->buffer + encoder->data_pos, &wlen)); encoder->data_pos+=wlen; encoder->bytes_left-=wlen; } STOPIF( hlp___encoder_waiter(encoder), NULL); } /* *len is not changed - we wrote the full data. */ ex: RETURN_SVNERR(status); } /** Reader function for an encoder. * A \c svn_stream_t reader gets as much data as it requested; a short read * would be interpreted as EOF. * */ svn_error_t *hlp___encode_read(void *baton, char *data, apr_size_t *len) { int status; svn_error_t *status_svn; int read_pos, bytes_left; struct encoder_t *encoder=baton; int ign_count; status=0; ign_count=1; read_pos=0; bytes_left=*len; while (bytes_left && !encoder->eof) { /* No more data buffered? */ if (!encoder->bytes_left && encoder->orig) { encoder->data_pos=0; encoder->bytes_left=sizeof(encoder->buffer); STOPIF_SVNERR( svn_stream_read, (encoder->orig, encoder->buffer, &(encoder->bytes_left)) ); DEBUGP("read %llu bytes from stream", (t_ull)encoder->bytes_left); if (encoder->bytes_left < sizeof(encoder->buffer)) { STOPIF_SVNERR( svn_stream_close, (encoder->orig) ); encoder->orig=NULL; } } /* Try to give the child process some data. */ if (encoder->bytes_left) { status=send(encoder->pipe_in, encoder->buffer+encoder->data_pos, encoder->bytes_left, MSG_DONTWAIT); DEBUGP("sending %llu bytes to child %d from %d: %d; %d", (t_ull)encoder->bytes_left, encoder->child, encoder->data_pos, status, errno); if (status == -1) { status=errno; if (status == EAGAIN) { /* Just wait, we might be able to send that later. */ } else STOPIF(status, "Error writing to child"); } else { /* Data sent. */ apr_md5_update(&encoder->md5_ctx, encoder->buffer+encoder->data_pos, status); encoder->data_pos+=status; encoder->bytes_left-=status; status=0; DEBUGP("%llu bytes left", (t_ull)encoder->bytes_left); } } if (encoder->bytes_left == 0 && !encoder->orig && encoder->pipe_in != -1) { DEBUGP("closing connection"); STOPIF_CODE_ERR( close(encoder->pipe_in) == -1, errno, "Cannot close connection to child"); encoder->pipe_in=-1; } /* --- Meanwhile the child process processes the data --- */ /* Try to read some data. */ status = recv(encoder->pipe_out, data+read_pos, bytes_left, MSG_DONTWAIT); if (status==-1 && errno==EAGAIN && ign_count>0) ign_count--; else DEBUGP("receiving %d bytes from child %d: errno=%d", status, encoder->child, errno); if (status==0) { encoder->eof=1; STOPIF_CODE_ERR( close(encoder->pipe_out) == -1, errno, "Cannot close connection to child"); } else { if (status == -1) { status=errno; if (status == EAGAIN) { if (ign_count == 0) ign_count=20; /* Just wait, we might get data later. */ } else STOPIF(status, "Error reading from child"); } else { /* No error, got so many bytes ... */ read_pos+=status; bytes_left-=status; status=0; } } STOPIF( hlp___encoder_waiter(encoder), NULL); } *len=read_pos; ex: RETURN_SVNERR(status); } svn_error_t *hlp___encode_close(void *baton) { int status; svn_error_t *status_svn; int retval; struct encoder_t *encoder=baton; md5_digest_t md5; DEBUGP("closing connections for %d", encoder->child); if (encoder->is_writer && encoder->pipe_in!=EOF) { /* We close STDIN of the child, and wait until there's no more data * left. Then we close STDOUT. */ STOPIF_CODE_ERR( close(encoder->pipe_in) == -1, errno, "Cannot close connection to child"); encoder->pipe_in=EOF; STOPIF_SVNERR( hlp___encode_write, (baton, NULL, NULL)); STOPIF_SVNERR( svn_stream_close, (encoder->orig) ); } status=waitpid(encoder->child, &retval, 0); DEBUGP("child %d gave %d - %X", encoder->child, status, retval); STOPIF_CODE_ERR(status == -1, errno, "Waiting for child process failed"); /* waitpid() returns the child pid */ status=0; apr_md5_final(md5, &encoder->md5_ctx); if (encoder->output_md5) memcpy(encoder->output_md5, md5, sizeof(*encoder->output_md5)); DEBUGP("encode end gives MD5 of %s", cs__md5tohex_buffered(md5)); STOPIF_CODE_ERR(retval != 0, ECHILD, "Child process returned 0x%X", retval); ex: IF_FREE(encoder); RETURN_SVNERR(status); } /** Helper function. * Could be marked \c noreturn. */ void hlp___encode_filter_child(int pipe_in[2], int pipe_out[2], const char *path, const char *command) { int status, i; /* The symmetry of the sockets makes it a bit easier - it doesn't matter * which handle we take. */ STOPIF_CODE_ERR( dup2(pipe_in[1], STDIN_FILENO) == -1 || dup2(pipe_out[1], STDOUT_FILENO) == -1, errno, "Cannot dup2() the childhandles"); /* Now we may not give any more debug information to STDOUT - it would * get read as data from the encoder! */ /* Close known filehandles. */ STOPIF_CODE_ERR( ( close(pipe_in[0]) | close(pipe_out[0]) | close(pipe_in[1]) | close(pipe_out[1]) ) == -1, errno, "Cannot close the pipes"); /* Try to close other filehandles, like a connection to the repository or * similar. They should not be kept for the exec()ed process, as * (hopefully) nobody did a fcntl(FD_CLOEXEC) on them, but better to be * sure. */ /* There are other constants than FD_SETSIZE, but they should do the * same. */ /* We start from 3 - STDIN, STDOUT and STDERR should be preserved. */ for(i=3; ioutput_md5 pointer to the destination address. * */ int hlp__encode_filter(svn_stream_t *s_stream, const char *command, int is_writer, char *path, svn_stream_t **output, struct encoder_t **encoder_out, apr_pool_t *pool) { int status; svn_stream_t *new_str; struct encoder_t *encoder; int pipe_in[2]; int pipe_out[2]; DEBUGP("encode filter: %s", command); status=0; STOPIF( hlp__alloc( &encoder, sizeof(*encoder)), NULL); new_str=svn_stream_create(encoder, pool); STOPIF_ENOMEM( !new_str); svn_stream_set_read(new_str, hlp___encode_read); svn_stream_set_write(new_str, hlp___encode_write); svn_stream_set_close(new_str, hlp___encode_close); /* We use a socketpair and not a normal pipe because on a socket we can try * to change the in-kernel buffer, possibly up to some hundred MB - which * is not possible with a pipe (limited to 4kB). */ STOPIF_CODE_ERR( socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_in) == -1 || socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_out) == -1, errno, "Cannot create a socket pair"); /* So that the child doesn't have any data cached: */ fflush(NULL); encoder->child=fork(); if (encoder->child == 0) hlp___encode_filter_child(pipe_in, pipe_out, path, command); /* Parent continues. */ STOPIF_CODE_ERR(encoder->child == -1, errno, "Cannot fork()"); STOPIF_CODE_ERR( ( close(pipe_in[1]) | close(pipe_out[1]) ) == -1, errno, "Cannot close the pipes"); encoder->pipe_in=pipe_in[0]; encoder->pipe_out=pipe_out[0]; encoder->orig=s_stream; encoder->bytes_left=0; encoder->eof=0; encoder->is_writer=is_writer; encoder->output_md5=NULL; apr_md5_init(& encoder->md5_ctx); *encoder_out = encoder; *output=new_str; ex: return status; } /** Checks for the needed environment variables, and does the chroot()ing * if necessary. * See \ref howto_chroot. */ int hlp__chrooter(void) { int status; char *libs, *root, *cwd; int fd, len; static const char delim[]=" \r\n\t\f"; static const char so_pre[]="lib"; static const char so_post[]=".so"; void *hdl; char filename[128]; libs=getenv(CHROOTER_LIBS_ENV); DEBUGP("Libraries to load: %s", libs); root=getenv(CHROOTER_ROOT_ENV); DEBUGP("fd of old root: %s", root); cwd=getenv(CHROOTER_CWD_ENV); DEBUGP("fd of old cwd: %s", cwd); /* If none are set, there's no need for the chroot. Just return. */ status = (libs ? 1 : 0) | (root ? 2 : 0) | (cwd ? 4 : 0); if (status == 0) { DEBUGP("All are empty, just return."); goto ex; } /* Only one not set? */ STOPIF_CODE_ERR(status != 7, EINVAL, "All of %s, %s and %s must be set!", CHROOTER_LIBS_ENV, CHROOTER_CWD_ENV, CHROOTER_ROOT_ENV); status=0; strcpy(filename, so_pre); /* Load libraries */ libs=strtok(libs, delim); while (libs && *libs) { DEBUGP("Trying library %s", libs); hdl=dlopen(libs, RTLD_NOW | RTLD_GLOBAL); if (hdl == NULL) { len=strlen(libs); if (sizeof(filename) < (len + strlen(so_pre) + strlen(so_post) +2)) DEBUGP("Library name %s too long for expansion", libs); else { strcpy(filename+strlen(so_pre), libs); strcpy(filename+strlen(so_pre)+len, so_post); /* 2nd try */ hdl=dlopen(filename, RTLD_NOW | RTLD_GLOBAL); } /* We allow to specify only "m" for "libm.so". */ } STOPIF_CODE_ERR( hdl == NULL, errno, "Cannot load library %s", libs); libs=strtok(NULL, delim); } /* Load message lists */ strerror(EINVAL); /* Locale data */ iconv_open("437","850"); /* Load DNS libraries. */ gethostbyname("localhost"); //gethostbyname("Does.surely.not.exist.invalid"); /* Back to the root(s) :-) */ fd=atoi(root); STOPIF_CODE_ERR( fchdir(fd) == -1, errno, "Cannot fchdir() on handle %d", fd); /* We ignore errors here, and on the close below. */ close(fd); STOPIF_CODE_ERR( chroot(".") == -1, errno, "Cannot chroot() back"); /* Go to the remembered wd */ fd=atoi(cwd); STOPIF_CODE_ERR( fchdir(fd) == -1, errno, "Cannot fchdir() on handle %d", fd); close(fd); ex: return status; } /** Is the given environment valid for substitution? * Returns a boolean - 0 means not valid, everything else ok. * * It is valid, if * - The name starts with \c WC * - The first \a p2c_len characters of \a path2cmp match with the value. * If \c p2c_len equals \c -1 the length of the path in the environment * variable is taken. * * If \a value_len is not \c NULL, it get set to the length of the path in * the environment variable. * */ inline int hlp___is_valid_env(char *env, char *path2cmp, int p2c_len, char **value, int *value_len) { char *cp; int x; *value=NULL; cp=strchr(env, '='); if (!cp) return 0; /* Skip '=' */ cp++; *value=cp; x=strlen(cp); /* Remove PATH_SEPARATORs at the end */ while (x>0 && cp[x-1]==PATH_SEPARATOR) x--; if (value_len) *value_len=x; if (p2c_len == -1) p2c_len=x; /* The name must start with "WC"; */ return env[0]=='W' && env[1] == 'C' && /* and the value must match the given path. */ strncmp(cp, path2cmp, p2c_len) == 0; } /** Here we simply (try to) match the (few) environment variables against * the entries, and set pointers for matching paths; so for displaying them * it suffices to walk the tree up until an entry with \c arg set is found. * */ int hlp__match_path_envs(struct estat *root) { int status; char **env; char *cp; struct estat *sts; int len; status=0; for(env=environ; *env; env++) { DEBUGP("test env %s", *env); /* The path in the environment variable must have at least the working * copy path. */ if (hlp___is_valid_env(*env, wc_path, wc_path_len, &cp, &len)) { /* The length cannot be smaller; if it's equal, it's the root entry. * */ if (len == wc_path_len) sts=root; else { /* Find entry. The +1 is the PATH_SEPARATOR. */ status=ops__traverse(root, cp+wc_path_len+1, 0, 0, &sts); if (status) { DEBUGP("no match: %s", *env); continue; } } /* It matches. */ len=(cp-*env)-1; /* We could use hlp__strnalloc() here; but then we'd have to copy * from *env-1, which *should* be save, as that is normally allocated * on the top of the (mostly downgrowing) stack. * Be conservative. */ STOPIF( hlp__alloc( &sts->arg, 1+len+1+3), NULL); /* \todo DOS-compatible as %env% ? */ sts->arg[0]=ENVIRONMENT_START; memcpy(sts->arg+1, *env, len); sts->arg[1+len]=0; DEBUGP("match: %s gets %s", sts->name, sts->arg); } } /* Ignore previous ENOENT and similar. */ status=0; ex: return status; } /** Can be in several formats; see \ref o_opt_path. * * \todo Build the \c wc_relative_path only if necessary - remove the * parameter from the caller chains. */ int hlp__format_path(struct estat *sts, char *wc_relative_path, char **output) { int status; static struct cache_entry_t *cache=NULL; char *path, **env, *cp, *match; struct estat *parent_with_arg; static const char ps[2]= { PATH_SEPARATOR, 0}; int len, sts_rel_len, max_len; status=0; switch (opt__get_int(OPT__PATH)) { case PATH_WCRELATIVE: path=wc_relative_path; break; case PATH_CACHEDENVIRON: case PATH_PARMRELATIVE: parent_with_arg=sts; while (parent_with_arg->parent && !parent_with_arg->arg) { // DEBUGP("no arg: %s", parent_with_arg->name); parent_with_arg=parent_with_arg->parent; } /* If we got out of the loop, but there's no ->arg, we must be at the * root (because ! ->parent is the other condition). * The root is always the wc_path, so set it as default ... */ /** \todo We should set it beginning from a command line parameter, * if we have one. Preferably the nearest one ... */ if (!parent_with_arg->arg) parent_with_arg->arg=wc_path; len=strlen(parent_with_arg->arg); sts_rel_len=sts->path_len - parent_with_arg->path_len; /* If there was no parameter, and we're standing at the WC root, we * would have no data to print. * (We cannot set "." as argument for the root entry, because all * other entries would inherit that - we'd get "./file", just like * before. */ if (len == 0 && sts_rel_len == 0) { path="."; break; } DEBUGP("parent=%s, has %s; len=%d, rel_len=%d", parent_with_arg->name, parent_with_arg->arg, len, sts_rel_len); /* Maybe we should cache the last \c parent_with_arg and \c pwa_len. */ STOPIF( cch__entry_set(&cache, 0, NULL, len + 1 + sts_rel_len + 3, 0, &path), NULL); /* We cannot use hlp__pathcopy(), as that would remove things like * ./.././, which the user possibly wants. * Use the parameter as given; only avoid putting a superfluous / at * the end. */ memcpy(path, parent_with_arg->arg, len); /* If we had an parameter (so the PATH_SEPARATOR won't be the first * character), and the last character of the parameter isn't already a * PATH_SEPARATOR, and we have to append some path (ie. our current * element isn't already finished [because it was directly given]), * we set a PATH_SEPARATOR. */ if (len>0 && path[len-1] != PATH_SEPARATOR && parent_with_arg != sts) path[len++]=PATH_SEPARATOR; memcpy(path+len, wc_relative_path+parent_with_arg->path_len+1, sts_rel_len); path[len+sts_rel_len]=0; break; case PATH_ABSOLUTE: case PATH_FULLENVIRON: STOPIF( cch__entry_set(&cache, 0, NULL, wc_path_len + 1 + sts->path_len + 1, 0, &path), NULL); hlp__pathcopy(path, NULL, wc_path, ps, wc_relative_path, NULL); if (opt__get_int(OPT__PATH) == PATH_ABSOLUTE) break; /* Substitute some environment. * \todo It would be better to cache already matched environment * variables in the corresponding struct estats (eg. in the arg * variable); then we could match like above in the case * PATH_PARMRELATIVE. * The problem is that this cannot be done in a single point in time * (although just before waa__partial_update() looks promising) - * because a major directory tree might be new, and this will only be * found during processing the new items. * * So there are two ways: * - Looping *once* through the environment, after waa__input_tree() * - Adv: Done once, is fast. * - Disadv: May miss some opportunities for matching. * - Doing that for every path * - Disadv: Slow, because it has to be tried *every* time * - Adv: Matches as much as possible * * Maybe there should simply be a choise between PATH_ENVIRON_FAST * and PATH_ENVIRON_FULL. */ match=NULL; /* We need at least a single character to substitute. */ max_len=1; for(env=environ; *env; env++) { if (!hlp___is_valid_env(*env, path, -1, &cp, &len)) continue; if (len > max_len && path[len]==PATH_SEPARATOR) { match=*env; max_len=len; } } if (match) { DEBUGP("matched %s", match); cp=strchr(match, '='); /* If the environment variable has a longer name than the path, we * don't substitute. */ if (max_len > cp-match+1) { /* length of environment variable name */ len=cp-match; /* \todo DOS-compatible as %env% ? */ *path=ENVIRONMENT_START; memcpy(path+1, match, len); path[1+ len]=PATH_SEPARATOR; /* debug: */ path[1+ len+1]=0; DEBUGP("path=%s, rest=%s; have %d, sts has %d", path, path+max_len+1, max_len, sts->path_len); memmove(path+1+len, path+max_len, wc_path_len+sts->path_len - max_len); } } break; default: BUG_ON(1); } *output=path; ex: return status; } /** -. * * Can be non-numeric, like \c HEAD. */ char *hlp__rev_to_string(svn_revnum_t rev) { static int last=0; /* Sadly GCC doesn't statically solve sizeof(rev)*log(10)/log(2) ... */ static char buffers[2][(int)(sizeof(rev)*4)+3]; last++; if (last>= sizeof(buffers)/sizeof(buffers[0])) last=0; if (rev == SVN_INVALID_REVNUM) strcpy(buffers[last], "HEAD"); else { BUG_ON(rev < 0); sprintf(buffers[last], "%llu", (t_ull)rev); } return buffers[last]; } /** -. * If \a max<0, the comparision is done until the \c \\0. * \a max is the maximum number of characters to compare; the result is * always \c equal (\c ==0), if \c max==0. * * Not useable for lesser/greater compares. */ int hlp__strncmp_uline_eq_dash(char *always_ul, char *other, int max) { while (max) { /* Nicer than the negation. */ if (*always_ul == *other || (*always_ul == '_' && *other == '-')) ; else /* Different. */ return 1; /* We need not check for *other==0, because they must be equal * according the above comparision. * We must check afterwards, because the \0 must be compared, too. */ if (max<0 && *always_ul==0) break; if (max > 0) max--; always_ul++; other++; } return 0; } /** -. * */ int hlp__is_special_property_name(const char *name) { static const char prop_pre_toignore[]="svn:entry"; static const char prop_pre_toignore2[]="svn:wc:"; if (strncmp(name, prop_pre_toignore, strlen(prop_pre_toignore)) == 0 || strncmp(name, prop_pre_toignore2, strlen(prop_pre_toignore2)) == 0) return 1; return 0; } /** -. * \a md5, if not \c NULL, must point to at least MD5_DIGEST_LENGTH bytes. * */ int hlp__stream_md5(svn_stream_t *stream, unsigned char md5[APR_MD5_DIGESTSIZE]) { int status; svn_error_t *status_svn; const int buffer_size=16384; char *buffer; apr_size_t len; apr_md5_ctx_t md5_ctx; status=0; STOPIF( hlp__alloc( &buffer, buffer_size), NULL); if (md5) apr_md5_init(&md5_ctx); DEBUGP("doing stream md5"); len=buffer_size; while (len == buffer_size) { STOPIF_SVNERR( svn_stream_read, (stream, buffer, &len)); if (md5) apr_md5_update(&md5_ctx, buffer, len); } if (md5) apr_md5_final(md5, &md5_ctx); ex: return status; } /** Delays execution until the next second. * Needed because of filesystem granularities; FSVS only stores seconds, * not more. */ int hlp__delay(time_t start, enum opt__delay_e which) { if (opt__get_int(OPT__DELAY) & which) { DEBUGP("waiting ..."); if (!start) start=time(NULL); /* We delay with 25ms accuracy. */ while (time(NULL) <= start) usleep(25000); } return 0; } /** -. * We could either generate a name ourself, or just use this function - and * have in mind that we open and close a file, just to overwrite it * immediately. * * But by using that function we get the behaviour that * subversion users already know. */ int hlp__rename_to_unique(char *fn, char *extension, const char **unique_name, apr_pool_t *pool) { int status; svn_error_t *status_svn; apr_file_t *tmp_f; /* Thank you, subversion 1.6.4. "Path not canonical" - pfft. */ if (fn[0] == '.' && fn[1] == PATH_SEPARATOR) fn+=2; STOPIF_SVNERR( svn_io_open_unique_file2, (&tmp_f, unique_name, fn, extension, svn_io_file_del_on_close, pool)); STOPIF( apr_file_close(tmp_f), NULL); DEBUGP("got unique name for local file: %s", *unique_name); if (rename(fn, *unique_name) == -1) { /* Remember error. */ status=errno; DEBUGP("renaming %s to %s gives an error %d.", fn, *unique_name, status); /* The rename() should kill the file. * But if it fails, we'd keep that file here - and that's not * ncessary. */ if (unlink(*unique_name) == -1) /* On error just put a debug message - we'll fail either way. */ DEBUGP("Cannot unlink %s: %d", *unique_name, errno); STOPIF(status, "Cannot rename local file to unique name %s", *unique_name); } ex: return status; } /** -. * Caches the result, so that the configuration is only fetched a single time. */ int hlp__get_svn_config(apr_hash_t **config) { int status; svn_error_t *status_svn; static apr_hash_t *cfg=NULL; char *cp; int len; status=0; /* We assume that a config hash as NULL will never be returned. * (Else we'd try to fetch it more than once.) */ if (!cfg) { /* Subversion doesn't like "//" in pathnames - even if it's just the * local configuration area. So we have to normalize them. */ len = opt__get_int(OPT__CONFIG_DIR)==0 ? opt__get_int(OPT__CONF_PATH)+strlen(DEFAULT_CONFIGDIR_SUB)+1 : opt__get_int(OPT__CONFIG_DIR); STOPIF( hlp__alloc( &cp, len), NULL); if (opt__get_int(OPT__CONFIG_DIR)==0) hlp__pathcopy(cp, &len, opt__get_string(OPT__CONF_PATH), DEFAULT_CONFIGDIR_SUB, NULL); else hlp__pathcopy(cp, &len, opt__get_string(OPT__CONFIG_DIR), NULL); opt__set_string(OPT__CONFIG_DIR, PRIO_MUSTHAVE, cp); opt__set_int(OPT__CONFIG_DIR, PRIO_MUSTHAVE, len); STOPIF_SVNERR( svn_config_get_config, (&cfg, opt__get_string(OPT__CONFIG_DIR), global_pool)); DEBUGP("reading config from %s", opt__get_string(OPT__CONFIG_DIR)); } *config=cfg; ex: return status; } /** -. * If \a source is not \c NULL \a len bytes are copied. * The buffer is \b always \c \\0 terminated. */ int hlp__strnalloc(int len, char **dest, const char const *source) { int status; STOPIF( hlp__alloc( dest, len+1), NULL); if (source) memcpy(*dest, source, len); (*dest)[len]=0; ex: return status; } /** -. */ int hlp__strmnalloc(int len, char **dest, const char const *source, ...) { int status; va_list vl; char *dst; /* We don't copy now, because we want to know the end of the string * anyway. */ STOPIF( hlp__alloc( dest, len), NULL); va_start(vl, source); dst=*dest; while (source) { while (1) { BUG_ON(len<=0); if (! (*dst=*source) ) break; source++, dst++, len--; } source=va_arg(vl, char*); } ex: return status; } /** -. * That is not defined in terms of \c hlp__alloc(), because glibc might do * some magic to get automagically 0-initialized memory (like mapping \c * /dev/zero). */ int hlp__calloc(void *output, size_t nmemb, size_t count) { int status; void **tgt=output; status=0; *tgt=calloc(nmemb, count); STOPIF_CODE_ERR(!*tgt, ENOMEM, "calloc(%llu, %llu) failed", (t_ull)nmemb, (t_ull)count); ex: return status; } /** -. */ int hlp__realloc(void *output, size_t size) { int status; void **tgt; status=0; tgt=output; *tgt=realloc(*tgt, size); /* Allocation of 0 bytes might (legitimately) return NULL. */ STOPIF_CODE_ERR(!*tgt && size, ENOMEM, "(re)alloc(%llu) failed", (t_ull)size); ex: return status; } /** -. */ char* hlp__get_word(char *input, char **word_start) { input=hlp__skip_ws(input); if (word_start) *word_start=input; while (*input && !isspace(*input)) input++; return input; } #ifndef HAVE_STRSEP /** -. * Copyright (C) 2004, 2007 Free Software Foundation, Inc. * Written by Yoann Vandoorselaere . * Taken from http://www.koders.com/c/fid4F16A5D73313ADA4FFFEEBA99BE639FEC82DD20D.aspx?s=md5 */ char * strsep (char **stringp, const char *delim) { char *start = *stringp; char *ptr; if (start == NULL) return NULL; /* Optimize the case of no delimiters. */ if (delim[0] == '\0') { *stringp = NULL; return start; } /* Optimize the case of one delimiter. */ if (delim[1] == '\0') ptr = strchr (start, delim[0]); else /* The general case. */ ptr = strpbrk (start, delim); if (ptr == NULL) { *stringp = NULL; return start; } *ptr = '\0'; *stringp = ptr + 1; return start; } #endif int hlp__compare_string_pointers(const void *a, const void *b) { const char * const *c=a; const char * const *d=b; return strcoll(*c,*d); } int hlp__only_dir_mtime_changed(struct estat *sts) { int st; st = sts->entry_status; return opt__get_int(OPT__DIR_EXCLUDE_MTIME) && S_ISDIR(sts->st.mode) && (!(st & FS_CHILD_CHANGED)) && (st & FS__CHANGE_MASK) == FS_META_MTIME; } fsvs-1.2.6/src/revert.h0000644000202400020240000000366611073666373013745 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __REVERT_H__ #define __REVERT_H__ #include "actions.h" /** \file * \ref revert command header file. */ /** \ref revert main action function. */ work_t rev__work; /** Has to fetch the decoder from the repository. */ #define DECODER_UNKNOWN ((char*)-1) /** Gets a clean copy from the repository. */ int rev__install_file(struct estat *sts, svn_revnum_t revision, char *decoder, apr_pool_t *pool); /** Go through the tree, and fetch all changed entries (estimated * per \c remote_status). */ int rev__do_changed(struct estat *dir, apr_pool_t *pool); /** Gets and writes the properties of the given \a sts into its \ref prop * file. */ int rev__get_props(struct estat *sts, char *utf8_path, svn_revnum_t revision, apr_pool_t *pool); /** Gets the entry into a temporary file. */ int rev__get_text_to_tmpfile(char *loc_url, svn_revnum_t revision, char *encoder, char *filename_base, char **filename, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool); /** Just a wrapper for rev__get_text_to_stream(). */ int rev__get_text_into_buffer(char *loc_url, svn_revnum_t revision, const char *decoder, svn_stringbuf_t **output, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool); /** General function to get a file into a stream. */ int rev__get_text_to_stream( char *loc_url, svn_revnum_t revision, const char *decoder, svn_stream_t *output, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool); #endif fsvs-1.2.6/src/sync.c0000644000202400020240000002633411264677022013375 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * Synchronize from repository - \ref sync-repos command. * * Load the repository tree and store it as last used- * so that the next commit sends all changes against this * current repository state. * * */ /** \addtogroup cmds * * \section sync-repos * * \code * fsvs sync-repos [-r rev] [working copy base] * \endcode * * This command loads the file list afresh from the repository. \n * A following commit will send all differences and make the repository data * identical to the local. * * This is normally not needed; the only use cases are * - debugging and * - recovering from data loss in the \ref o_waa "$FSVS_WAA" area. * * It might be of use if you want to backup two similar machines. Then you * could commit one machine into a subdirectory of your repository, make a * copy of that directory for another machine, and * \c sync this other directory on the other machine. * * A commit then will transfer only _changed_ files; so if the two machines * share 2GB of binaries (\c /usr , \c /bin , \c /lib , ...) then * these 2GB are still shared in the repository, although over * time they will deviate (as both committing machines know * nothing of the other path with identical files). * * This kind of backup could be substituted by two or more levels of * repository paths, which get \e overlaid in a defined priority. * So the base directory, which all machines derive from, will be committed * from one machine, and it's no longer necessary for all machines to send * identical files into the repository. * * The revision argument should only ever be used for debugging; if you fetch * a filelist for a revision, and then commit against later revisions, * problems are bound to occur. * * * \note There's issue 2286 in subversion which describes sharing * identical files in the repository in unrelated paths. By using this * relaxes the storage needs; but the network transfers would still be much * larger than with the overlaid paths. * * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "status.h" #include "checksum.h" #include "est_ops.h" #include "cache.h" #include "revert.h" #include "props.h" #include "commit.h" #include "waa.h" #include "url.h" #include "status.h" #include "update.h" #include "racallback.h" #include "helper.h" /** Get entries of directory, and fill tree. * * Most of the data should already be here; we just * fill the length of the entries in. * */ int sync___recurse(struct estat *cur_dir, apr_pool_t *pool) { int status; svn_error_t *status_svn; apr_pool_t *subpool, *subsubpool; apr_hash_t *dirents; char *path; const char *name; const void *key; void *kval; apr_hash_index_t *hi; svn_dirent_t *val; char *url, *path_utf8; struct svn_string_t *decoder; struct estat *sts; svn_stringbuf_t *entry_text; char *link_local; status=0; subpool=subsubpool=NULL; /* get a fresh pool */ STOPIF( apr_pool_create_ex(&subpool, pool, NULL, NULL), "no pool"); STOPIF( ops__build_path( &path, cur_dir), NULL); DEBUGP("list of %s", path); STOPIF( hlp__local2utf8(path, &path_utf8, -1), NULL); STOPIF_SVNERR( svn_ra_get_dir2, (current_url->session, &dirents, NULL, NULL, /* Use "" for the root, and cut the "./" for everything else. */ (cur_dir->parent) ? path_utf8 + 2 : "", current_url->current_rev, SVN_DIRENT_HAS_PROPS | SVN_DIRENT_HAS_PROPS | SVN_DIRENT_KIND | SVN_DIRENT_SIZE, subpool)); for( hi=apr_hash_first(subpool, dirents); hi; hi = apr_hash_next(hi)) { apr_hash_this(hi, &key, NULL, &kval); name=key; val=kval; STOPIF( cb__add_entry(cur_dir, name, NULL, NULL, 0, 0, NULL, 0, (void**)&sts), NULL); if (url__current_has_precedence(sts->url) && !S_ISDIR(sts->st.mode)) { /* File or special entry. */ sts->st.size=val->size; decoder= sts->user_prop ? apr_hash_get(sts->user_prop, propval_updatepipe, APR_HASH_KEY_STRING) : NULL; if (S_ISREG(sts->st.mode) && !decoder) { /* Entry finished. */ } else if (S_ISREG(sts->st.mode) && val->size > 8192) { /* Make this size configurable? Remove altogether? After all, the * processing time needs not be correlated to the encoded size. */ DEBUGP("file encoded, but too big for fetching (%llu)", (t_ull)val->size); } else { /* Now we're left with special devices and small, encoded files. */ STOPIF( url__full_url(sts, &url), NULL); /* get a fresh pool */ STOPIF( apr_pool_create_ex(&subsubpool, subpool, NULL, NULL), "no pool"); /* That's the third time we access this file ... * svn_ra needs some more flags for the directory listing functions. */ STOPIF( rev__get_text_into_buffer(url, sts->repos_rev, decoder ? decoder->data : NULL, &entry_text, NULL, sts, NULL, subsubpool), NULL); sts->st.size=entry_text->len; DEBUGP("parsing %s as %llu: %s", url, (t_ull)sts->st.size, entry_text->data); /* If the entry exists locally, we might have a more detailed value * than FT_ANYSPECIAL. */ if (!S_ISREG(sts->st.mode)) /* We don't need the link destination; we already got the MD5. */ STOPIF( ops__string_to_dev(sts, entry_text->data, NULL), NULL); /* For devices there's no length to compare; the rdev field * shares the space. * And for normal files the size is already correct. */ if (S_ISLNK(sts->st.mode)) { /* Symlinks get their target translated to/from the locale, so * they might have a different length. */ STOPIF( hlp__utf82local(entry_text->data+strlen(link_spec), &link_local, -1), NULL); sts->st.size = strlen(link_local); } if (subsubpool) apr_pool_destroy(subsubpool); } /* After this entry is done we can return a bit of memory. */ if (sts->user_prop) { apr_pool_destroy(apr_hash_get(sts->user_prop, "", 0)); sts->user_prop=NULL; } DEBUGP_dump_estat(sts); } /* We have to loop even through obstructed directories - some * child may not be overlayed. */ if (val->kind == svn_node_dir) { STOPIF( sync___recurse( sts, subpool), NULL); } } ex: if (subpool) apr_pool_destroy(subpool); return status; } /** Repository callback. * * Here we get most data - all properties and the tree structure. */ int sync__progress(struct estat *sts) { int status; struct sstat_t st; char *path; status=0; STOPIF( ops__build_path(&path, sts), NULL); STOPIF( waa__delete_byext( path, WAA__FILE_MD5s_EXT, 1), NULL); STOPIF( waa__delete_byext( path, WAA__PROP_EXT, 1), NULL); /* We get the current type in sts->new_rev_mode_packed, but we need * sts->st.mode set for writing. */ sts->st.mode = (sts->st.mode & ~S_IFMT) | PACKED_to_MODE_T(sts->new_rev_mode_packed); STOPIF( st__rm_status(sts), NULL); /* If the entry is a special node (symlink or device), we have * a little problem here. * * On a sync we don't get the text of the entries - so we don't * know which kind of special entry we have, and so we don't know * which unix-mode (S_ISCHR etc.) we have to use and write. * * We could do one of those: * - Fetch the entry to know the type. * This is slow, because we have to do a roundtrip for each entry, * and that perhaps a thousand times. * - We could use another property. * That makes us incompatible to subversion. * - We could remove the check in ops__save_1entry(). * Which mode should we write? * * If the entry exists and we can lstat() it, we have no problem - * we know a correct mode, and the MD5 says whether the data matches. * We just have to repair the entry_type field. * * * The old sync-repos didn't set FT_ANYSPECIAL, and just wrote * this entry as a file. * So it would be shown as removed. * * We do that now, too. If the entry gets reverted, we have it's * correct meta-data - until then we don't worry. * */ if ( hlp__lstat(path, &st) == 0 ) { if ((sts->st.mode & S_IFMT) == 0) { sts->st=st; } /* We fetch the dev/inode to get a correct sorting. * * We don't use the whole inode - we'd store the *current* mtime * and ctime and don't know whether this file has changed. * We use ctime/mtime only *if they are empty*, ie. haven't been given * from the repository. */ sts->st.ino=st.ino; sts->st.dev=st.dev; sts->st.size=st.size; /* We don't store that in the repository, so take the current value. * */ sts->st.ctim=st.ctim; if (!(sts->remote_status & FS_META_MTIME)) sts->st.mtim=st.mtim; if (!(sts->remote_status & FS_META_OWNER)) sts->st.uid=st.uid; if (!(sts->remote_status & FS_META_GROUP)) sts->st.gid=st.gid; if (!(sts->remote_status & FS_META_UMODE)) sts->st.mode=st.mode; /* If we do a directory, we set the \c RF_CHECK flag, so that new * entries will be found. */ if (S_ISDIR(sts->st.mode)) sts->flags |= RF_CHECK; } else { if (S_ISANYSPECIAL(sts->st.mode)) { /* We don't know what it really is. BUG? */ sts->st.mode= (sts->st.mode & ~S_IFMT) | S_IFREG; } } /* We have to re-sort the directories. */ if (S_ISDIR(sts->st.mode)) sts->to_be_sorted=1; ex: return status; } /** -. * * Could possibly be folded into the new update. */ int sync__work(struct estat *root, int argc, char *argv[]) { int status; svn_error_t *status_svn; svn_revnum_t rev; char *strings; int string_space; status=0; status_svn=NULL; STOPIF( waa__find_base(root, &argc, &argv), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); /* We cannot easily format the paths for arguments ... first, we don't * have any (normally) */ string_space=0; strings=NULL; while ( ! ( status=url__iterator(&rev) ) ) { if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("sync-repos for %s rev\t%llu.\n", current_url->url, (t_ull)rev); /* We have nothing ... */ current_url->current_rev=0; STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); /* set new revision */ current_url->current_rev=rev; STOPIF( ci__set_revision(root, rev), NULL); STOPIF( sync___recurse(root, current_url->pool), NULL); } STOPIF_CODE_ERR( status != EOF, status, NULL); /* Take the correct values for the root. */ STOPIF( hlp__lstat( ".", &root->st), NULL); root->flags |= RF_CHECK; /* See the comment at the end of commit.c - atomicity for writing * these files. */ STOPIF( waa__output_tree(root), NULL); /* The current revisions might have changed. */ STOPIF( url__output_list(), NULL); /* The copyfrom database is no longer valid. */ STOPIF( waa__delete_byext(wc_path, WAA__COPYFROM_EXT, 1), NULL); ex: STOP_HANDLE_SVNERR(status_svn); ex2: return status; } fsvs-1.2.6/src/cat.h0000644000202400020240000000076211073666422013172 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CACHE_H__ #define __CACHE_H__ /** \file * \ref cat action header file. */ work_t cat__work; #endif fsvs-1.2.6/src/checkout.h0000644000202400020240000000111110756467655014233 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CHECKOUT_H__ #define __CHECKOUT_H__ #include "actions.h" #include "global.h" /** \file * Header file for the \ref checkout action. */ /** \ref checkout action. */ work_t co__work; #endif fsvs-1.2.6/src/hash_ops.h0000644000202400020240000000637011013210520014202 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __HASH_OPS_H #define __HASH_OPS_H #include "global.h" #include /** \file * Hash operations header file. * * Similar to the property operations; but these here work on a hash of * lists, to be able to store multiple files with the same name. * */ /** A convenience type. */ typedef struct hash_s *hash_t; /** The abstract hash type. */ struct hash_s { /** We use a GDBM file as a hash, so we don't have to have all data in * memory. */ GDBM_FILE db; /** Storage for transactional \c DELETE. * * Eg on commit only when everything was ok we may remove the used * copyfrom entries; here we store the keys to remove. */ /* Should that simply be a hash_t? We'd get a bit cleaner code, but would * waste a few bytes. */ GDBM_FILE to_delete; /** Allocated copy of the filename, if HASH_REMEMBER_FILENAME was set. */ char *filename; }; /** Create a new hash for \a wcfile with the given \a name. */ int hsh__new(char *wcfile, char *name, int gdbm_mode, hash_t *hash); /** Only a temporary hash; not available in \c gdbm. * Unless the predefined constants include the value \c 0, and ORed * together give -1, this is a distinct value. */ #define HASH_TEMPORARY ((GDBM_NEWDB | GDBM_READER | \ GDBM_WRCREAT | GDBM_WRITER) +1) /** This flag tells hsh__new() to remember the filename, for later * cleaning-up. */ #define HASH_REMEMBER_FILENAME (0x40000000) /** \section hsh__lists Lists addressed by some hash. * @{ */ /** Number of slots reserved. */ #define HASH__LIST_MAX (32) /** For short-time storage (single program run): Insert the pointer \a * value into the \a hash at \a key. */ int hsh__insert_pointer(hash_t hash, datum key, void* value); /** Get an list of \a found entries from \a hash addressed by \a * current_key into the (statically allocated) \a arr. */ int hsh__list_get(hash_t hash, datum current_key, datum *next_key, struct estat **arr[], int *found); /** @} */ /** \section hsh_simple Simple hash operations. * These are just wrappers, and are incompatible to the other hash * functions - they don't store any links to other elements. * @{ */ /** Store character strings in the hash table. */ int hsh__store_charp(hash_t db, char *key, char *value); /** Store some value in the hash table. */ int hsh__store(hash_t db, datum key, datum value); /** Read \a value associated with some \a key in \a db. * Memory of datum::dptr is malloc()ed. */ int hsh__fetch(hash_t db, datum key, datum *value); /** Find first \a key. */ int hsh__first(hash_t db, datum *key); /** Find next \a key. */ int hsh__next(hash_t db, datum *key, const datum *oldkey); /** Registers some key for deletion on database close. */ int hsh__register_delete(hash_t db, datum key); /** Close a property file. */ int hsh__close(hash_t db, int has_failed); /** Collect garbage in the hash table. */ int hsh__collect_garbage(hash_t db, int *did_remove); /** @} */ #endif fsvs-1.2.6/src/cache.h0000644000202400020240000001066612467104255013471 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CACHE_H__ #define __CACHE_H__ #include "helper.h" /** \file * Cache header file. */ /** Type of data we're caching; must be size-compatible with a pointer, as * such is stored in some cases (eg ops__build_path()). * */ typedef long cache_value_t; /** What an internal cache entry looks like. * Is more or less a buffer with (allocated) length; the real length is * normally specified via some \\0 byte, by the caller. (A string.) */ struct cache_entry_t { /** ID of entry */ cache_value_t id; /** User-data for hashes */ cache_value_t hash_data; /** Length of data */ int len; #if 0 /** Measurement of accesses */ short accessed; #endif /** Copy of data. */ char data[1]; }; #define CACHE_DEFAULT (4) /** Cache structure. * The more \b active an entry is, the more at the start of the array. * * If a \c struct \ref cache_t is allocated, its \c .max member should be * set to the default \ref CACHE_DEFAULT value. * * For a \c struct \ref cache_t* the function \ref cch__new_cache() must be * used. */ struct cache_t { /** For how many entries is space allocated? */ int max; /** How many entries are used. */ int used; /** Which entry was the last accessed. * * If the array of entries looked like this, with \c B accessed after \c * C after \c D: * \dot * digraph { * rank=same; * D -> C -> B -> Z -> Y -> ppp -> E; * ppp [label="..."]; * B [label="B=LRU", style=bold]; * } * \enddot * After setting a new entry \c A it looks like that: * \dot * digraph { * rank=same; * D -> C -> B -> A -> Y -> ppp -> E; * ppp [label="..."]; * A [label="A=LRU", style=bold]; * } * \enddot * */ int lru; /** Cache entries, \c NULL terminated. */ struct cache_entry_t *entries[CACHE_DEFAULT+1]; }; /** Adds a copy of the given data (\a id, \a data with \a len) to the \a * cache; return the new allocated data pointer in \a copy. * */ int cch__add(struct cache_t *cache, cache_value_t id, const char *data, int len, char **copy); /** Find an entry, return index and/or others. */ int cch__find(struct cache_t *cache, cache_value_t id, int *index, char **data, int *len); /** Copy the given data into the given cache entry. */ int cch__entry_set(struct cache_entry_t **cache, cache_value_t id, const char *data, int len, int copy_old_data, char **copy); /** Look for the same \a id in the \a cache, and overwrite or append the * given data. */ int cch__set_by_id(struct cache_t *cache, cache_value_t id, const char *data, int len, int copy_old_data, char **copy); /** Makes the given index the head of the LRU list. */ void cch__set_active(struct cache_t *cache, int index); /** Create a new \a cache, with a user-defined size. * * I'd liked to do something like * \code * static struct cache_t *cache=cch__new_cache(32); * \endcode * but that couldn't return error codes (eg. \c ENOMEM). * We'd need something like exceptions .... * * So I take the easy route with an inline function. Additional cost: a * single "test if zero". * * \note Another way could have been: * \code * static struct cache_t *cache; * static int status2=cch__new_cache(&cache, 32); * * if (status2) { status=status2; goto ex; } * * ex: * return status; * \endcode * But that's not exactly "better", and still does a "test if zero" on each * run. * * */ __attribute__((gnu_inline, always_inline)) static inline int cch__new_cache(struct cache_t **cache, int max) { int status, len; status=0; if (!*cache) { len= sizeof(struct cache_entry_t*)*(max-CACHE_DEFAULT)+ sizeof(struct cache_t); STOPIF( hlp__alloc( cache, len), NULL); memset(*cache, 0, len); (*cache)->max=max; } ex: return status; } /** Interpret the \a cache as a hash, look for the \a key, returning the * \ref cache_entry_t::hash_data or \c ENOENT. */ int cch__hash_find(struct cache_t *cache, const char *key, cache_value_t *data); /** Interpret the \a cache as a hash and store the given \a value to the \a * key. */ int cch__hash_add(struct cache_t *cache, const char *key, cache_value_t value); #endif fsvs-1.2.6/src/direnum.h0000644000202400020240000000265611106525735014070 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __DIRENUM_H__ #define __DIRENUM_H__ /** \file * Directory enumerator header file. */ // for alphasort #include #include "global.h" /** This function reads a directory into a self-allocated memory area. */ int dir__enumerator(struct estat *this, int est_count, int by_name) ; /** Sorts the entries of the directory \a sts by name into the * estat::by_name array, which is reallocated and NULL-terminated. */ int dir__sortbyname(struct estat *sts); /** Sorts the existing estat::by_inode array afresh, by device/inode. */ int dir__sortbyinode(struct estat *sts); int dir___f_sort_by_inode(struct estat **a, struct estat **b); int dir___f_sort_by_inodePP(struct estat *a, struct estat *b); int dir___f_sort_by_name(const void *a, const void *b); int dir___f_sort_by_nameCC(const void *a, const void *b); int dir___f_sort_by_nameCS(const void *a, const void *b); /** How many bytes an average filename needs. * Measured on a debian system: * \code * find / -printf "%f\n" | wc * \endcode * */ #define ESTIMATED_ENTRY_LENGTH (15) #endif fsvs-1.2.6/src/revert.c0000644000202400020240000011752011264677022013726 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include "revert.h" #include "waa.h" #include "est_ops.h" #include "racallback.h" #include "warnings.h" #include "resolve.h" #include "checksum.h" #include "props.h" #include "helper.h" #include "url.h" #include "update.h" #include "cp_mv.h" #include "status.h" /** \file * \ref revert action. * This reverts local changes, ie. resets the given paths to the repository * versions. * This cannot be undone by fsvs - keep backups :-) */ /** \addtogroup cmds * * \section revert * * \code * fsvs revert [-rRev] [-R] PATH [PATH...] * \endcode * * This command undoes local modifications: * - An entry that is marked to be unversioned gets this flag removed. * - For a already versioned entry (existing in the repository) the local * entry is replaced with its repository version, and its status and * flags are cleared. * - An entry that is a \b modified copy destination gets reverted to the * copy source data. * - Manually added entries are changed back to "N"ew.\b * * Please note that implicitly copied entries, ie. entries that are marked * as copied because some parent directory is the base of a copy, can * not be un-copied; they can only be reverted to their original * (copied-from) data, or removed. * * If you want to undo a \c copy operation, please see the \ref uncopy * "uncopy" command. * * See also \ref howto_entry_statii. * * If a directory is given on the command line all versioned entries in * this directory are reverted to the old state; this behaviour can be * modified with \ref glob_opt_rec "-R/-N", or see below. * * The reverted entries are printed, along with the status they had \b * before the revert (because the new status is per definition \e * unchanged). * * If a revision is given, the entries' data is taken from this revision; * furthermore, the \b new status of that entry is shown. * * \note Please note that mixed revision working copies are not (yet) * possible; the \e BASE revision is not changed, and a simple \c revert * without a revision arguments gives you that. \n * By giving a revision parameter you can just choose to get the text from * a different revision. * * * \subsection rev_cmp_up Difference to update * If something doesn't work as it should in the installation you can * revert entries until you are satisfied, and directly \ref commit * "commit" the new state. * * In contrast, if you \ref update "update" to an older version, you * - cannot choose single entries (no mixed revision working copies yet), * - and you cannot commit the old version with changes, as the "skipped" * (later) changes will create conflicts in the repository. * * * \subsection rev_del Currently only known entries are handled. * * If you need a switch (like \c --delete in \c rsync(1) ) to remove * unknown (new, not yet versioned) entries, to get the directory in the * exact state it is in the repository, please tell the dev\@ * mailing list. * * \todo Another limitation is that just-deleted just-committed entries * cannot be fetched via \c revert, as FSVS no longer knows about them. \n * TODO: If a revision is given, take a look there, and ignore the local * data? \n * As a workaround you could use the \ref cat "cat" and/or \ref checkout * "checkout" commands to fetch repository-only data. * * * \subsection rev_p Removed directory structures * * If a path is specified whose parent is missing, \c fsvs complains. \n * We plan to provide a switch (probably \c -p), which would create (a * sparse) tree up to this entry. * * * \subsection rev_rec Recursive behaviour * When the user specifies a non-directory entry (file, device, symlink), * this entry is reverted to the old state. * * If the user specifies a directory entry, these definitions should apply: * *
command line switchresult *
\c -N this directory only (meta-data), *
none this directory, and direct children of the directory, *
\c -R this directory, and the complete tree below. *
* * * \subsection rev_copied Working with copied entries * If an entry is marked as copied from another entry (and not committed!), * a \c revert will fetch the original copyfrom source. To undo the copy * setting use the \ref uncopy "uncopy" command. * * */ /** List of (bit-)flags for rev___undo_change(). * These have an order, ie. SET_CURRENT overrides REVERT_MTIME. */ enum rev___dir_change_flag_e { NOT_CHANGED=0, REVERT_MTIME=1, SET_CURRENT=2, GET_TSTAMP=0x1000, }; /** A count of files reverted in \b this run. */ static int number_reverted=0; static svn_revnum_t last_rev; #define REV___GETFILE_MAX_CACHE (4) /** -. * * This function fetches an non-directory entry \a loc_url from the * repository in \c current_url, and writes it to \a output - which gets * closed via \c svn_stream_close(). * * \a decoder should be set correctly. * \todo if it's \c NULL, but an update-pipe is set on the entry, the data * has to be read from disk again, to be correctly processed. * * No meta-data is set, and the \c svn:special attribute is ignored. * * The revision number must be valid, it may not be \c SVN_INVALID_REVNUM. * * If \a sts_for_manber is \c NULL, no manber hashes are calculated. * * If \a output_sts is \c NULL, the meta-data properties are kept in \a * props; else its fields are filled (as far as possible) with data. That * includes the estat::repos_rev field. * * The user-specified properties can be returned in \a props. * * As this just returns the data in a stream, the files' type mostly * doesn't matter; it just may not be a directory, because we'd get an * error from subversion. * * \a loc_url must be given in the current locale; it will be converted to * UTF8 before being sent to the subversion libraries. */ int rev__get_text_to_stream( char *loc_url, svn_revnum_t revision, const char *decoder, svn_stream_t *output, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool) { int status; svn_error_t *status_svn; svn_string_t *prop_val; struct encoder_t *encoder; char *relative_url, *utf8_url; apr_hash_t *properties; char target_rev[10]; encoder=NULL; status=0; DEBUGP("getting file %s@%s from %s", loc_url, hlp__rev_to_string(revision), current_url->url); if (strncmp(loc_url, "./", 2) == 0) { /* Skip ./ in front. */ relative_url=loc_url+2; } else { /* It could be an absolute value. */ /* Verify that the correct URL is taken. * The "/" that's not stored at the end of the URL must be there, too. * */ if (strncmp(current_url->url, loc_url, current_url->urllen) == 0 && loc_url[current_url->urllen] == '/') loc_url += current_url->urllen+1; /* If the string doesn't match, it better be a relative value already * ... else we'll get an error. */ // else STOPIF(EINVAL, "%s not below %s", loc_url, current_url->url); } STOPIF( hlp__local2utf8(loc_url, &utf8_url, -1), NULL); DEBUGP("Got utf8=%s", utf8_url); /* Symlinks have a MD5, too ... so just do that here. */ /* How do we get the filesize here, to determine whether it's big * enough for manber block hashing? */ /* Short answer: We don't. * We need to get the MD5 anyway; there's svn_stream_checksummed(), * but that's just one chainlink more, and so we simply use our own * function. */ if (sts_for_manber) STOPIF( cs__new_manber_filter(sts_for_manber, output, &output, pool), NULL); /* If there's a fsvs:update-pipe, we would know when we have the file * here - which is a bit late, because we'd have to read/write the entire * file afresh. So we remember the property in the cb__record_changes() * call chain, and look for the string here. * * But that works only if we *know* that we're processing the right * version. If the filter changed for this file, we might try to decode * with the wrong one - eg. for diff, where multiple versions are handled * in a single call. * * We know that we have the correct value locally if the wanted revision * is the same as we've in the entry; if they are different, we have to * ask the repository for the data. * * * Note: we're trading network-round-trips for local disk bandwidth. * The other way would be to fetch the data encoded, *then* look in the * properties we just got for the pipe-command, and re-pipe the file * through the command given. * * But: the common case of updates uses cb__record_changes(), which * already gets the correct value. So in this case we need not look any * further. */ /* \todo For things like export we could provide a commandline parameter; * use network, or use disk. * For a local-remote diff we could pipe the data into the diff program; * but that wouldn't work for remote-remote diffing, as diff(1) doesn't * accept arbitrary filehandles as input (and /proc/self/fd/ isn't * portable). */ /* Fetch decoder from repository. */ if (decoder == DECODER_UNKNOWN) { STOPIF_SVNERR_TEXT( svn_ra_get_file, (current_url->session, utf8_url, revision, NULL, &revision, &properties, pool), "Fetching entry \"%s/%s\"@%s", current_url->url, loc_url, hlp__rev_to_string(revision)); prop_val=apr_hash_get(properties, propval_updatepipe, APR_HASH_KEY_STRING); decoder=prop_val ? prop_val->data : NULL; } /* First decode, then do manber-hashing. As the filters are prepended, we * have to do that after the manber-filter. */ if (decoder) { snprintf(target_rev, sizeof(target_rev), "%llu", (t_ull)revision); setenv(FSVS_EXP_TARGET_REVISION, target_rev, 1); STOPIF( hlp__encode_filter(output, decoder, 1, loc_url, &output, &encoder, pool), NULL); if (output_sts) encoder->output_md5= &(output_sts->md5); } STOPIF_SVNERR_TEXT( svn_ra_get_file, (current_url->session, utf8_url, revision, output, &revision, &properties, pool), "Fetching entry %s/%s@%s", current_url->url, loc_url, hlp__rev_to_string(revision)); DEBUGP("got revision %llu", (t_ull)revision); /* svn_ra_get_file doesn't close the stream. */ STOPIF_SVNERR( svn_stream_close, (output)); output=NULL; if (output_sts) { output_sts->repos_rev = revision; STOPIF( prp__set_from_aprhash( output_sts, properties, ONLY_KEEP_USERDEF, NULL, pool), NULL); } if (props) *props=properties; ex: return status; } /** -. * Mostly the same as \c rev__get_text_to_stream(), but returning a * (temporary) \a filename based on \a filename_base, if this is not \c * NULL. * * The entries' file type isn't taken into account; the file may have the * data "symlink XXX", etc. * * If \a filename_base is \c NULL, the file will be put in a real temporary * location. * * \a output_stat is used to store the parsed properties of the entry. * */ int rev__get_text_to_tmpfile(char *loc_url, svn_revnum_t revision, char *encoder, char *filename_base, char **filename, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool) { int status; apr_file_t *apr_f; svn_stream_t *output; status=0; STOPIF( waa__get_tmp_name( filename_base, filename, &apr_f, pool), NULL); output=svn_stream_from_aprfile(apr_f, pool); STOPIF( rev__get_text_to_stream( loc_url, revision, encoder, output, sts_for_manber, output_sts, props, pool), NULL); /* svn_ra_get_file() doesn't close. */ STOPIF( apr_file_close(apr_f), NULL); ex: return status; } /** -. * * Does no validation of input - might fill entire memory. */ int rev__get_text_into_buffer(char *loc_url, svn_revnum_t revision, const char *decoder, svn_stringbuf_t **output, struct estat *sts_for_manber, struct estat *output_sts, apr_hash_t **props, apr_pool_t *pool) { int status; svn_stringbuf_t *string; svn_stream_t *stream; status=0; string=svn_stringbuf_create("", pool); stream=svn_stream_from_stringbuf(string, pool); STOPIF( rev__get_text_to_stream(loc_url, revision, decoder, stream, sts_for_manber, output_sts, props, pool), NULL); *output=string; ex: return status; } /** -. * * Meta-data is set; an existing local entry gets atomically removed by \c * rename(). * * If the entry has no URL defined yet, but has a copy flag set (\c * RF_COPY_BASE or \c RF_COPY_SUB), this URL is taken. * * If \a revision is 0, the \c BASE revision is and \a decoder is used; * this is the copy base for copied entries. */ int rev__install_file(struct estat *sts, svn_revnum_t revision, char *decoder, apr_pool_t *pool) { int status; char *filename; char *filename_tmp; apr_hash_t *props; svn_stream_t *stream; apr_file_t *a_stream; apr_pool_t *subpool; char *special_data; char *url; svn_revnum_t rev_to_take; BUG_ON(!pool); STOPIF( ops__build_path(&filename, sts), NULL); /* We know that we have to do something here; but because the order is * depth-first, the parent directory isn't done yet (and shouldn't be, * because it needs permissions and mtime set!). * So it's possible that the target directory doesn't yet exist. * * Note: because we're here for *non-dir* entries, we always have a * parent. */ STOPIF( waa__mkdir(filename, 0), NULL); STOPIF( apr_pool_create(&subpool, pool), "Creating the filehandle pool"); /* When we get a file, old manber-hashes are stale. * So remove them; if the file is big enough, we'll recreate it with * correct data. */ STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); /* Files get written in files; we use the temporarily generated name for * special entries, too. */ /* We could use a completely different mechanism for temp-file-names; * but keeping it close to the target lets us see if we're out of * disk space in this filesystem. (At least if it's not a binding mount * or something similar - but then rename() should fail). * If we wrote the data somewhere else, we'd risk moving it again, across * filesystem boundaries. */ STOPIF( waa__get_tmp_name( filename, &filename_tmp, &a_stream, subpool), NULL); /* It's a bit easier to just take the (small) performance hit, and always * (temporarily) write the data in a file. * If it's a special entry, that will just get read immediately back and * changed to the correct type. * * It doesn't really make much difference, as the file is always created * to get a distinct name. */ stream=svn_stream_from_aprfile(a_stream, subpool); if (sts->url) { url=filename+2; rev_to_take=sts->repos_rev; current_url=sts->url; } else if (sts->flags & RF___IS_COPY) { STOPIF( cm__get_source( sts, filename, &url, &rev_to_take, 0), NULL); STOPIF( url__find( url, ¤t_url), NULL); } else BUG("cannot get file %s", filename); if (revision == 0) { /* BASE wanted; get decoder. */ STOPIF( up__fetch_decoder(sts), NULL); decoder=sts->decoder; } else { /* Arbitrary revision - get decoder. */ rev_to_take=revision; decoder=DECODER_UNKNOWN; } STOPIF( url__open_session(NULL, NULL), NULL); /* We don't give an estat for meta-data parsing, because we have to loop * through the property list anyway - for storing locally. */ STOPIF( rev__get_text_to_stream( url, rev_to_take, decoder, stream, sts, NULL, &props, pool), NULL); if (apr_hash_get(props, propname_special, APR_HASH_KEY_STRING)) { STOPIF( ops__read_special_entry( a_stream, &special_data, 0, NULL, filename_tmp, subpool), NULL); /* The correct type gets set on parsing. */ STOPIF( up__handle_special(sts, filename_tmp, special_data, subpool), NULL); } else { /* If it's not special, it must be an ordinary file. */ /* This is a kind of default value, the mode is set to the repository * value in up__set_meta_data(). */ sts->st.mode = (sts->st.mode & ~S_IFMT) | S_IFREG; sts->local_mode_packed=sts->new_rev_mode_packed= MODE_T_to_PACKED(sts->st.mode); } STOPIF( prp__set_from_aprhash(sts, props, STORE_IN_FS, NULL, subpool), NULL); /* We write all meta-data. If we got no values from the repository, we just * write what we have in the local filesystem back - the temporary file has * just some default values, after all. */ sts->remote_status |= FS_META_CHANGED; DEBUGP("setting meta-data"); STOPIF( up__set_meta_data(sts, filename_tmp), NULL); STOPIF( apr_file_close(a_stream), NULL); DEBUGP("rename to %s", filename); /* rename to correct filename */ STOPIF_CODE_ERR( rename(filename_tmp, filename)==-1, errno, "Cannot rename '%s' to '%s'", filename_tmp, filename); /* The rename changes the ctime. */ STOPIF( hlp__lstat( filename, &(sts->st)), "Cannot lstat('%s')", filename); sts->url=current_url; /* We have to re-sort the parent directory, as the inode has changed * after an rename(). */ sts->parent->to_be_sorted=1; apr_pool_destroy(subpool); subpool=NULL; ex: /* On error remove the temporary file. */ /* Return the original error. */ if (status) unlink(filename_tmp); return status; } /** -. * * The base name of the \a sts gets written to. * * If the merge gives no errors, the temporary files get deleted. * */ int rev__merge(struct estat *sts, const char *file1, const char *common, const char *file2) { int status; pid_t pid; char *output; int hdl; struct sstat_t stat; int retval; STOPIF( ops__build_path(&output, sts), NULL); /* Remember the meta-data of the target */ STOPIF( hlp__lstat(file2, &stat), NULL); pid=fork(); STOPIF_CODE_ERR( pid == -1, errno, "Cannot fork()" ); if (pid == 0) { /* Child. */ /* TODO: Is there some custom merge program defined? * We always use the currently defined property. */ /* TODO: how does that work if an update sends a wrong property? use * both? */ /* Open the output file. */ hdl=open(output, O_WRONLY | O_CREAT, 0700); STOPIF_CODE_ERR( hdl == -1, errno, "Cannot open merge output \"%s\"", output); STOPIF_CODE_ERR( dup2(hdl, STDOUT_FILENO) == -1, errno, "Cannot dup2"); /* No need to close hdl -- it's opened only for that process, and will * be closed when it exec()s. */ /* Remove the ./ at the front */ setenv(FSVS_EXP_CURR_ENTRY, output+2, 1); STOPIF_CODE_ERR( execlp( opt__get_string(OPT__MERGE_PRG), opt__get_string(OPT__MERGE_PRG), opt__get_string(OPT__MERGE_OPT), file1, common, file2, NULL) == -1, errno, "Starting the merge program \"%s\" failed", opt__get_string(OPT__MERGE_PRG)); } STOPIF_CODE_ERR( pid != waitpid(pid, &retval, 0), errno, "waitpid"); DEBUGP("merge returns %d (signal %d)", WEXITSTATUS(retval), WTERMSIG(retval)); /* Can that be? */ STOPIF_CODE_ERR( WIFSIGNALED(retval) || !WIFEXITED(retval), EINVAL, "\"%s\" quits by signal %d.", opt__get_string(OPT__MERGE_PRG), WTERMSIG(retval)); if (WEXITSTATUS(retval) == 0) { DEBUGP("Remove temporary files."); /* Ok, merge done. Remove temporary files, or at least try to. */ if (unlink(file1) == -1) status=errno; if (unlink(file2) == -1) status=errno; if (unlink(common) == -1) status=errno; STOPIF(status, "Removing one or more temporary files " "(merge of \"%s\") failed", output); } else { DEBUGP("non-zero return"); STOPIF_CODE_ERR( WEXITSTATUS(retval) != 1, EINVAL, "\"%s\" exited with error code %d", opt__get_string(OPT__MERGE_PRG), WEXITSTATUS(retval)); STOPIF( res__mark_conflict(sts, file1, file2, common, NULL), NULL); /* Means merge conflicts, but no error. */ } /* As we've just changed the text, set the current mtime. */ sts->st.mtim.tv_sec=time(NULL); /* Now set owner, group, and mode. * This does a lstat(), to get the current ctime and so on; * to make the changes visible, we use the meta-data of the target. */ STOPIF( up__set_meta_data(sts, NULL), NULL); sts->st=stat; ex: return status; } /** -. * */ int rev__get_props(struct estat *sts, char *utf8_path, svn_revnum_t revision, apr_pool_t *pool) { int status; svn_error_t *status_svn; apr_hash_t *props; char *filename; if (!utf8_path) { STOPIF( ops__build_path(&filename, sts), NULL); STOPIF( hlp__local2utf8(filename+2, &utf8_path, -1), NULL); } STOPIF_SVNERR( svn_ra_get_file, (current_url->session, utf8_path, revision, NULL, NULL, &props, pool) ); STOPIF( prp__set_from_aprhash(sts, props, STORE_IN_FS, NULL, pool), NULL); ex: return status; } /** Set, reset or fetch the mtime of a directory. * */ int rev___handle_dir_mtime(struct estat *dir, enum rev___dir_change_flag_e dir_flag) { int status; char *path; /* Now, after all has been said and done for the children, set and re-get * the actual meta-data - the mtime has been changed in the meantime * (because of child node creation), and maybe this filesystem's * granularity is worse than on commit; then the timestamps would be * wrong. */ status=0; DEBUGP("dir_flag says %X", dir_flag); if (dir_flag & SET_CURRENT) goto a; if (dir_flag & REVERT_MTIME) goto b; if (dir->remote_status & FS_META_CHANGED) goto c; if (dir_flag & GET_TSTAMP) goto d; /* Is there some better syntax? Some kind of switch with * case-expressions? * * I had a lot of if () { } with partially-overlapping conditions: * if (dir->flag & x) A(); * if (dir->flag & (x|y)) B(); * but gcc wouldn't simply emit a "jmp" to B() after the A() - and I * couldn't easily see that the statements were accumulative. * */ goto x; a: /* If there's an intentional change (like merging), the current time is * taken. */ time( & dir->st.mtim.tv_sec ); b: /* Make sure that the value is written back to the filesystem.*/ dir->remote_status |= FS_META_MTIME; c: STOPIF( up__set_meta_data(dir, NULL), NULL); d: /* ops__update_single_entry() would trash the entry_status field! */ STOPIF( ops__build_path(&path, dir), NULL); STOPIF( hlp__lstat(path, &dir->st), NULL); /* If it had changes, we'll have to check next time. */ if (dir->entry_status & FS_CHANGED) dir->flags |= RF_CHECK; x: ex: return status; } /** Revert action, called for every wanted entry. * Please note that contacting the repository is allowed, as we're only * looping through the local entries. * * Doing operations against the repository while being called *from* the ra * layer (eg. during an update) is not allowed! See also \c * svn_ra_do_update(): * \code * The caller may not perform any RA operations using @a session before * finishing the report, and may not perform any RA operations using * @a session from within the editing operations of @a update_editor. * \endcode * * We may not change \c sts->entry_status - the caller still needs it; and * as this is a revert to \c BASE, we must not modify the entry list * either. * */ int rev___revert_to_base(struct estat *sts, enum rev___dir_change_flag_e *dir_change_flag, apr_pool_t *pool) { int status; svn_revnum_t wanted; struct estat copy; char *path; status=0; STOPIF( ops__build_path(&path, sts), NULL); /* Garbage collection for entries that should be ignored happens in * waa__output_tree(); changing the tree while it's being traversed is * a bit nasty. */ if ( (sts->flags & RF_UNVERSION)) { /* Was marked as to-be-unversioned? Just keep it. */ sts->flags &= ~RF_UNVERSION; DEBUGP("removing unversion on %s", path); } else if ( sts->flags & RF_ADD ) { /* Added entry just gets un-added ... ie. unknown. */ sts->to_be_ignored=1; DEBUGP("removing add-flag on %s", path); } else if (!( (sts->flags & (RF_COPY_BASE | RF_COPY_SUB)) || sts->url ) ) { /* We have no URL, and no copyfrom source ... this is an unknown entry. * Has to be given directly on the command line, but could have happened * via a wildcard - so don't stop working. * Can't do anything about it. */ printf("Cannot revert unknown entry \"%s\".\n", path); status=0; goto ex; } else { /* We know where to get that from. */ DEBUGP("have an URL for %s", path); if ( sts->flags & RF_CONFLICT ) { *dir_change_flag |= REVERT_MTIME; STOPIF( res__remove_aux_files(sts), NULL); } /* If not seen as changed, but target is BASE, we don't need to do * anything. */ if (!opt_target_revisions_given && !(sts->entry_status & FS__CHANGE_MASK)) goto ex; wanted=opt_target_revisions_given ? opt_target_revision : sts->repos_rev; /* The base directory has no revision, and so can't have a meaningful * value printed. */ /* In case we're doing a revert which concernes multiple URLs, they might * have different BASE revisions. * Print the current revision. */ if (sts->parent && (!number_reverted || last_rev != wanted)) { if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("Reverting to revision %s:\n", hlp__rev_to_string(wanted)); last_rev=wanted; } number_reverted++; /* see below */ copy=*sts; DEBUGP("l_st=%s, r_st=%s, old=%p", st__status_string_fromint(sts->entry_status), st__status_string_fromint(sts->remote_status), sts->old); /* Parent directories might just have been created. */ if (!S_ISDIR(sts->st.mode)) { DEBUGP("file was changed, reverting"); /* \todo It would be nice if we could solve meta-data *for the current * revision* only changes without going to the repository - after all, * we know the old values. * \todo Maybe we'd need some kind of parameter, --meta-only? Keep * data, reset rights. * */ /* TODO - opt_target_revision ? */ STOPIF( rev__install_file(sts, wanted, sts->decoder, pool), "Unable to revert entry '%s'", path); *dir_change_flag |= REVERT_MTIME; } else { #if 0 if (sts->entry_status & FS_NEW) conflict(); #endif if (sts->entry_status & FS_REMOVED) { // sts->st.mode ist 040755 (file); old ist NULL. status = (mkdir(path, sts->st.mode & 07777) == -1) ? errno : 0; if (status == EEXIST) { DEBUGP("old=%p", sts->old); } DEBUGP("mkdir(%s) says %d", path, status); STOPIF(status, "Cannot create directory '%s'", path); *dir_change_flag |= REVERT_MTIME; /* As we just created the directory, we need *all* meta-data reset. * */ sts->remote_status |= FS_META_CHANGED; } else { /* Code for directories. * As the children are handled by the recursive options and by \a * ops__set_to_handle_bits(), we only have to restore the directories' * meta-data here. */ /* up__set_meta_data() checks remote_status, while we here have * entry_status set. */ sts->remote_status=sts->entry_status; } STOPIF( up__set_meta_data(sts, NULL), NULL); if (sts->entry_status) sts->flags |= RF_CHECK; } } /* There's no change anymore, we're at BASE. * But just printing "...." makes no sense ... show the old status. */ sts->flags |= RF_PRINT; ex: return status; } /** Reset local changes. */ int rev___no_local_change(struct estat *sts) { sts->entry_status=0; return st__progress(sts); } /** -. * Recurses for rev___revert_to_base. * * There's a bit of uglyness here, regarding deleted directories ... * * 1) If we do the tree depth-first, we have to build multiple levels of * directories at once - and store which have to have their meta-data * reset. * * 2) If we do level after level, we might end up with either * a) re-creating a directory, doing its children, then have to re-set the * meta-data of this directory, or * b) just store that the meta-data has to be done for later. * * Currently we do 2a - that seems the simplest, and has no big performance * penalty. */ int rev___local_revert(struct estat *dir, apr_pool_t *pool) { int status; int i, do_undo; struct estat *sts; apr_pool_t *subpool; enum rev___dir_change_flag_e dir_flag; status=0; subpool=NULL; dir_flag= NOT_CHANGED; for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; STOPIF( apr_pool_create(&subpool, pool), "Cannot get a subpool"); do_undo = sts->do_this_entry && (sts->entry_status & FS__CHANGE_MASK) && ops__allowed_by_filter(sts); DEBUGP("on %s: do_undo=%d, st=%s", sts->name, do_undo, st__status_string_fromint(sts->entry_status)); if (do_undo) STOPIF( rev___revert_to_base(sts, &dir_flag, subpool), NULL); if (S_ISDIR(sts->st.mode) && (sts->entry_status & FS_CHILD_CHANGED)) STOPIF( rev___local_revert(sts, subpool), NULL); if (do_undo) STOPIF( st__status(sts), NULL); apr_pool_destroy(subpool); subpool=NULL; } /* We cannot free the memory earlier - the data is needed for the status * output and recursion. */ STOPIF( ops__free_marked(dir, 0), NULL); /* The root entry would not be printed; do that here. */ if (!dir->parent) STOPIF( st__status(dir), NULL); STOPIF( rev___handle_dir_mtime(dir, dir_flag), NULL); ex: return status; } /** -. * Loads the stored tree (without updating), looks for the wanted entries, * and restores them from the repository. */ int rev__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; time_t delay_start; svn_revnum_t rev; status=0; /* For revert the default is non-recursive. */ opt_recursive--; if (!argc) ac__Usage_this(); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); if (opt_target_revisions_given) { STOPIF( wa__warn( WRN__MIXED_REV_WC, EINVAL, "Sorry, fsvs currently doesn't allow mixed revision working copies.\n" "Entries will still be compared against the BASE revision.\n"), NULL); // TODO: necessary? action->local_callback=rev___no_local_change; } else { /* No revision given - just go back to BASE. */ action->local_callback=st__progress; } /* This message can be seen because waa__find_common_base() looks for * an "url" file and not for a "dir" -- which means that this tree * was never committed, so we don't know what HEAD is. */ /* Maybe the user could still try with some revision number and we simply * check for the existence of the given path there? */ status=waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1); if (status == -ENOENT) STOPIF(status, "!We know nothing about previous or current versions, as this tree\n" "was never checked in.\n" "If you need such an entry reverted, you could either write the needed\n" "patch (and send it to dev@fsvs.tigris.org), or try with a 'sync-repos'\n" "command before (if you know a good revision number)\n"); else STOPIF(status, NULL); STOPIF( st__progress_uninit(), NULL); if (opt_target_revisions_given) { while ( ! ( status=url__iterator(&rev) ) ) { STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); } STOPIF_CODE_ERR( status != EOF, status, NULL); STOPIF( rev__do_changed(root, global_pool), NULL); } else { /* The local changes are taken as to be undone. * * We cannot go by estat::entry_status - things like RF_ADD have to be * undone, too. * * waa__do_sorted_tree() can't be used, either, because it does the * directory *before* the children - which makes the directories' mtime * wrong if children get created or deleted. */ STOPIF( rev___local_revert(root, global_pool), NULL); } /* If this was a revert with destination revision, we might have changed * the entire hierarchy - replaced directories with files, etc. * This changed tree must not be written, because it's not the state of * BASE. * [ And if we had to write the original (BASE) list for some cause, * we'd have to read the list afresh, and change what we have to. * Or, the other way: when getting the changes for the given revision * from the repository we'd have to put them in the estat::old shadow * tree, to keep the entry list correct. ] * * If this was a revert to BASE, we have to write the list, because the * ctime of the inodes will be changed - and would mark the entries as * "maybe changed". */ if (!opt_target_revisions_given) { delay_start=time(NULL); STOPIF( waa__output_tree(root), NULL); STOPIF( hlp__delay(delay_start, DELAY_REVERT), NULL); } ex: return status; } /** Takes the \c sts->remote_status, and does the changes mentioned there. * Depending on \c sts->entry_status a conflict might be set. * * Convenience function to reduce indenting. */ int rev___undo_change(struct estat *sts, enum rev___dir_change_flag_e *dir_change_flag, apr_pool_t *pool) { int status; char *fn; const char *unique_name_mine, *unique_name_remote, *unique_name_common; char revnum[12]; int j; struct estat *removed; struct sstat_t st; STOPIF( ops__build_path( &fn, sts), NULL); DEBUGP_dump_estat(sts); /* If we remove an entry, the entry_count gets decremented; * we have to repeat the loop *for the same index*. */ unique_name_mine=NULL; /* Conflict handling; depends whether it has changed locally. */ if (sts->entry_status & FS_CHANGED) switch (opt__get_int(OPT__CONFLICT)) { case CONFLICT_STOP: STOPIF( EBUSY, "!The entry %s has changed locally", fn); break; case CONFLICT_LOCAL: /* Next one, please. */ STOPIF_CODE_EPIPE( printf("Conflict for %s skipped.\n", fn), NULL); goto ex; case CONFLICT_REMOTE: /* Just ignore local changes. */ break; case CONFLICT_MERGE: case CONFLICT_BOTH: /* Rename local file to something like .mine. */ STOPIF( hlp__rename_to_unique(fn, ".mine", &unique_name_mine, pool), NULL); /* Now the local name is not used ... so get the file. */ break; default: BUG("unknown conflict resolution"); } /* If the entry has been removed in the repository, we remove it * locally, too (if it wasn't changed). * But the type in the repository may be another than the local one - * so we have to check what we currently have. */ /* An entry can be given as removed, and in the same step be created * again - possibly as another type. */ /* If the entry wasn't replaced, but only removed, there's no sts->old. * */ removed=sts->old ? sts->old : sts; if (removed->remote_status & FS_REMOVED) { /* Is the entry already removed? */ /* If there's a typechange involved, the old entry has been * renamed, and so doesn't exist in the filesystem anymore. */ if ((sts->entry_status & FS_REPLACED) != FS_REMOVED && !unique_name_mine) { /* Find type. Small race condition - it might be removed now. */ if (TEST_PACKED(S_ISDIR, removed->old_rev_mode_packed)) { STOPIF( up__rmdir(removed, sts->url), NULL); } else STOPIF( up__unlink(removed, fn), NULL); } *dir_change_flag|=REVERT_MTIME; } /* Is there some kind of garbage that has to be removed? */ if (TEST_PACKED(S_ISGARBAGE, sts->local_mode_packed)) { DEBUGP("cleaning garbage"); STOPIF_CODE_ERR( unlink(fn) == -1, errno, "Cannot remove garbage entry %s", fn); } /* If we change something in this directory, we have to re-sort the * entries by inode again. */ sts->parent->to_be_sorted=1; if ((sts->remote_status & FS_REPLACED) == FS_REMOVED) { sts->to_be_ignored=1; goto ex; } current_url=sts->url; if (S_ISDIR(sts->st.mode)) { *dir_change_flag|=REVERT_MTIME; STOPIF( waa__mkdir_mask(fn, 1, sts->st.mode), NULL); /* Meta-data is done later. */ /* An empty directory need not be sorted; if we get entries, * we'll mark it with \c to_be_sorted .*/ } else if (sts->remote_status & (FS_CHANGED | FS_REPLACED)) /* Not a directory */ { STOPIF( rev__install_file(sts, 0, sts->decoder, pool), NULL); *dir_change_flag|=REVERT_MTIME; /* We had a conflict; rename the file fetched from the * repository to a unique name. */ if (unique_name_mine) { *dir_change_flag|=SET_CURRENT; /* If that revision number overflows, we've got bigger problems. * */ snprintf(revnum, sizeof(revnum)-1, ".r%llu", (t_ull)sts->repos_rev); revnum[sizeof(revnum)-1]=0; STOPIF( hlp__rename_to_unique(fn, revnum, &unique_name_remote, pool), NULL); /* If we're updating and already have a conflict, we don't * merge again. */ if (sts->flags & RF_CONFLICT) { STOPIF_CODE_EPIPE( printf("\"%s\" already marked as conflict.\n", fn), NULL); STOPIF( res__mark_conflict(sts, unique_name_mine, unique_name_remote, NULL), NULL); } else if (opt__get_int(OPT__CONFLICT) == CONFLICT_BOTH) { STOPIF( res__mark_conflict(sts, unique_name_mine, unique_name_remote, NULL), NULL); /* Create an empty file, * a) to remind the user, and * b) to avoid a "Deleted" status. */ j=creat(fn, 0777); if (j != -1) j=close(j); STOPIF_CODE_ERR(j == -1, errno, "Error creating \"%s\"", fn); /* up__set_meta_data() does an lstat(), but we want the * original values. */ st=sts->st; STOPIF( up__set_meta_data(sts, fn), NULL); sts->st=st; } else if (opt__get_int(OPT__CONFLICT) == CONFLICT_MERGE) { STOPIF( rev__install_file(sts, sts->old_rev, NULL, pool), NULL); snprintf(revnum, sizeof(revnum)-1, ".r%llu", (t_ull)sts->old_rev); revnum[sizeof(revnum)-1]=0; STOPIF( hlp__rename_to_unique(fn, revnum, &unique_name_common, pool), NULL); STOPIF( rev__merge(sts, unique_name_mine, unique_name_common, unique_name_remote), NULL); } else BUG("why a conflict?"); } } else { /* If user-defined properties have changed, we have to fetch them * from the repository, as we don't store them in RAM (due to the * amount of memory possibly needed). */ if (sts->remote_status & FS_PROPERTIES) STOPIF( rev__get_props(sts, NULL, sts->repos_rev, pool), NULL); if (sts->remote_status & FS_META_CHANGED) { /* If we removed the file, it has no meta-data any more; * if we fetched it via rev__get_file(), it has it set already. * Only the case of *only* meta-data-change is to be done. */ STOPIF( up__set_meta_data(sts, fn), NULL); } } ex: return status; } /** -. * Used on update. */ int rev__do_changed(struct estat *dir, apr_pool_t *pool) { int status; int i; struct estat *sts; apr_pool_t *subpool; enum rev___dir_change_flag_e dir_flag; status=0; subpool=NULL; dir_flag= (dir->entry_status & FS_NEW) || (dir->remote_status & FS_NEW) ? REVERT_MTIME : NOT_CHANGED; /* If some children have changed, do a full run. * Else just repair meta-data. */ if (!(dir->remote_status & FS_CHILD_CHANGED)) DEBUGP("%s: no children changed", dir->name); else for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; STOPIF( apr_pool_create(&subpool, pool), "Cannot get a subpool"); if (sts->remote_status & FS__CHANGE_MASK) STOPIF( rev___undo_change(sts, &dir_flag, subpool), NULL); /* We always recurse now, even if the directory has no children. * Else we'd have to check for children in a few places above, which would * make the code unreadable. */ if (S_ISDIR(sts->st.mode) && (sts->remote_status & FS_REPLACED) != FS_REMOVED) { apr_pool_destroy(subpool); subpool=NULL; STOPIF( apr_pool_create(&subpool, pool), "subpool creation"); STOPIF( rev__do_changed(sts, subpool), NULL); } STOPIF( st__rm_status(sts), NULL); apr_pool_destroy(subpool); subpool=NULL; } /* We cannot free the memory earlier - the data is needed for the status * output and recursion. */ STOPIF( ops__free_marked(dir, 0), NULL); /* The root entry would not be printed; do that here. */ if (!dir->parent) STOPIF( st__rm_status(dir), NULL); /* If the directory had local modifications, we need to check it * next time -- as we take its current timestamp, * we'd miss the new or deleted entries. * Must be done before \c ops__update_single_entry() - That sets * \c dir->entry_status .*/ if (dir->entry_status & FS__CHANGE_MASK) dir->flags |= RF_CHECK; STOPIF( rev___handle_dir_mtime(dir, dir_flag), NULL); ex: return status; } fsvs-1.2.6/src/url.h0000644000202400020240000000777011214371514013223 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __URL_H__ #define __URL_H__ #include "actions.h" #include "global.h" /** \file * Header file for the \ref urls action and URL-related functions. */ /** A \c NULL terminated array of url parameters. */ extern char **url__parm_list; /** How many URLs were given as parameters. */ extern int url__parm_list_used; /** Whether the URL list in FSVS_CONF must be written. */ extern int url__must_write_defs; /** URLs action. */ work_t url__work; /** Loads the URLs for the given \a dir. */ int url__load_list(char *dir, int reserve_space); /** Wrapper for url__load_list(); Cries for \c ENOENT . */ int url__load_nonempty_list(char *dir, int reserve_space); /** Writes the URL list back. */ int url__output_list(void); /** Returns a struct \a url_t matching the given string. */ int url__find_by_name(const char *name, struct url_t **storage); /** Returns a struct \a url_t matching the given internal number. */ int url__find_by_intnum(int intnum, struct url_t **storage); /** Returns a struct \a url_t matching the given url. */ int url__find_by_url(char *url, struct url_t **storage); /** Returns the full URL for this entry. */ int url__full_url(struct estat *sts, char **url); /** Returns the full URL for this entry for some other than the highest * priority URL. */ int url__other_full_url(struct estat *sts, struct url_t *url, char **output); /** Parses the given string into the URL storage. */ int url__parse(char *input, struct url_t *storage, int *def_parms); /** Opens a session to the \a current_url . */ int url__open_session(svn_ra_session_t **session, char **missing_dirs); /** Looks for an URL matching \a url, and returns its address. */ int url__find(char *url, struct url_t **output); /** Returns whether \a current_url has a higher priority than the * URL to compare. */ int url__current_has_precedence(struct url_t *to_compare); /** Insert or replace URL. */ int url__insert_or_replace(char *eurl, struct url_t **storage, int *existed); /** Allocate additional space for the given number of URLs. */ int url__allocate(int reserve_space); /** Closes given RA session and frees associated memory. */ int url__close_session(struct url_t *cur); /** Closes all RA sessions. */ int url__close_sessions(void); /** Marks URLs for handling. */ int url__mark_todo(void); /** Remember URL name parameter for later processing. */ int url__store_url_name(char *parm); /** Returns whether \a url should be handled. */ static inline int url__to_be_handled(const struct url_t *url) { return (!url__parm_list_used) || url->to_be_handled; } /** Simple function setting \c current_url, and returning whether there's * something to do. */ int url__iterator2(svn_revnum_t *target_rev, int only_if_count, char **missing); static inline int url__iterator(svn_revnum_t *target_rev) { return url__iterator2(target_rev, 0, NULL); } /** Comparing two URLs. * * They get sorted by \a priority ascending (lower numbers, so higher * priority, first), then by \a url ascending (sort URLs alphabetically). * * This is necessary, as on update we walk the \a urllist in order, to * have lower priority entries appearing when higher priority entries are * removed. * * If the first URL has a higher priority, a negative value is returned. */ static inline int url__sorter(struct url_t *u1, struct url_t *u2) { if (u1->priority == u2->priority) return strcmp(u1->url, u2->url); else return u1->priority - u2->priority; } /** For use in \c qsort(). */ int url__indir_sorter(const void *a, const void *b); /** Changes the revision number, if \c SVN_INVALID_REVNUM, to the real * value. */ int url__canonical_rev( struct url_t *url, svn_revnum_t *rev); #endif fsvs-1.2.6/src/cat.c0000644000202400020240000000374611264677022013172 0ustar marekmarek/************************************************************************ * Copyright (C) 2008-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include "global.h" #include "waa.h" #include "revert.h" #include "url.h" #include "est_ops.h" /** \file * \ref cat action. * * \todo \code * fsvs cat [-r rev] [-u URLname] path * fsvs cat [-u URLname:rev] path * \endcode * */ /** * \addtogroup cmds * * \section cat * * \code * fsvs cat [-r rev] path * \endcode * * Fetches a file repository, and outputs it to \c STDOUT. * If no revision is specified, it defaults to BASE, ie. the current local * revision number of the entry. * */ /** -. * Main function. */ int cat__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; struct estat *sts; struct svn_stream_t *output; svn_error_t *status_svn; status=0; STOPIF_CODE_ERR( argc != 1, EINVAL, "!Exactly a single path must be given."); STOPIF_CODE_ERR( opt_target_revisions_given > 1, EINVAL, "!At most a single revision is allowed."); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( url__load_list(NULL, 0), NULL); STOPIF( waa__input_tree( root, NULL, NULL), NULL); STOPIF( ops__traverse(root, normalized[0], OPS__FAIL_NOT_LIST, 0, &sts), NULL); current_url=sts->url; STOPIF_CODE_ERR( !current_url, EINVAL, "!For this entry no URL is known."); STOPIF( url__open_session(NULL, NULL), NULL); STOPIF_SVNERR( svn_stream_for_stdout, (&output, global_pool)); STOPIF( rev__get_text_to_stream( normalized[0], opt_target_revisions_given ? opt_target_revision : sts->repos_rev, DECODER_UNKNOWN, output, NULL, NULL, NULL, global_pool), NULL); ex: return status; } fsvs-1.2.6/src/checkout.c0000644000202400020240000000755411264677022014231 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include "url.h" #include "waa.h" #include "helper.h" #include "commit.h" #include "export.h" /** \file * \ref checkout action * */ /** \addtogroup cmds * * \section checkout * * \code * fsvs checkout [path] URL [URLs...] * \endcode * * Sets one or more URLs for the current working directory (or the * directory \c path), and does an \ref checkout of these URLs. * * Example: * \code * fsvs checkout . http://svn/repos/installation/machine-1/trunk * \endcode * * The distinction whether a directory is given or not is done based on the * result of URL-parsing -- if it looks like an URL, it is used as an URL. * \n Please mind that at most a single path is allowed; as soon as two * non-URLs are found an error message is printed. * * If no directory is given, \c "." is used; this differs from the usual * subversion usage, but might be better suited for usage as a recovery * tool (where versioning \c / is common). Opinions welcome. * * The given \c path must exist, and \b should be empty -- FSVS will * abort on conflicts, ie. if files that should be created already exist. * \n If there's a need to create that directory, please say so; patches * for some parameter like \c -p are welcome. * * For a format definition of the URLs please see the chapter \ref * url_format and the \ref urls and \ref update commands. * * Furthermore you might be interested in \ref o_softroot and \ref * howto_backup_recovery. * */ /** -. * Writes the given URLs into the WAA, and gets the files from the * repository. */ int co__work(struct estat *root, int argc, char *argv[]) { int status; int l; char *path; time_t delay_start; path=NULL; /* The allocation uses calloc(), so current_rev is initialized to 0. */ STOPIF( url__allocate(argc+1), NULL); /* Append URLs. */ for(l=0; larg=path ? path : "."; STOPIF( waa__save_cwd( &path, NULL, 0), NULL); STOPIF( waa__create_working_copy(path), NULL); free(path); /* We don't use the loop above, because the user might give the same URL * twice - and we'd overwrite the fetched files. */ for(l=0; lcurrent_rev = target_revision; STOPIF( ci__set_revision(root, target_revision), NULL); printf("Checked out %s at revision\t%ld.\n", urllist[l]->url, urllist[l]->current_rev); } /* Store where we are ... */ delay_start=time(NULL); STOPIF( url__output_list(), NULL); STOPIF( waa__output_tree(root), NULL); STOPIF( hlp__delay(delay_start, DELAY_CHECKOUT), NULL); ex: return status; } fsvs-1.2.6/src/hash_ops.c0000644000202400020240000003323411106525735014220 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include "global.h" #include "waa.h" #include "helper.h" #include "hash_ops.h" /** \file * Hash operations for copy/move detection. * * The hash operations are binary clean; they don't care what kind of * key/value data they store. * * * \section hsh_storage Storage considerations * * The basic question is ... do we need an unlimited amount of list entries * in any hash bucket, or do we not? * * If we do, we have these possibilities: * \subsection hsh_store_pnt Extending the key, counting entries. * The first entry is written as now. * \dot * digraph { * node [shape=record]; * "key" -> "value1"; * } * \enddot * If we find a second entry with the same key, * - it is written as \c {key}1 * - and the other one as \c {key}2; * - the \c {key} entry is set to the integer \c 2, (which can easily be * distinguished because of the length) * \dot * digraph { * node [shape=record]; * "key" -> "2 (number of array elements)"; * "key.1" -> "value1"; * "key.2" -> "value2"; * } * \enddot * Every further entry with the same key would increment the counter, and * get stored at that position. \n * That's not optimal for performance: * - It means reading a value (does the key exist?), and * - possibly writing that key with another value again, and * - storing a value. * Note that this would require some end-of-key marker (like \c \\0), or * keys with a constant length. * * * \subsection hsh_store_list Storing a linked list in the hash. * Every entry is written with a header; the first for some given key gets * the number \c 0, as an \e end-of-list marker. * \dot * digraph { * node [shape=record]; * C0 [label ="<0>0 | value1" ]; * "key" -> C0; * } * \enddot * If this \c key has a collision, we * - increment the stored \c number, * - write the old value under the key \c {number}, with the header set * to 0. * - The new value gets the current \c number prepended, and stored with * the given \c key. * \dot * digraph { * node [shape=record]; * { * rank=same; * C0 [label = "

0 | value2" ]; * C1 [label = "

1 | value1" ]; * } * { * rank=same; * key; * 1; * } * key -> C1; * 1 -> C0; * * edge [style=dotted]; * edge [arrowhead=none, arrowtail=normal]; * 1 -> C1:p; * * edge [style=invis, weight=20]; * key -> 1; * } * \enddot * After several insertions, the situation might be like this: * \dot * digraph { * node [shape=record]; * { * rank=same; * Ca [label = " {

0 | value_A } " ]; * Cb [label = " {

0 | value_B2 } " ]; * Cb2 [label = " {

1 | value_B1 } " ]; * Cc3 [label = " {

3 | value_C3 } " ]; * Cc2 [label = " {

2 | value_C2 } " ]; * Cc [label = " {

0 | value_C1 } " ]; * } * { * rank=same; * key_A; * key_B; * key_C; * 1; * 2; * 3; * } * * "key_A" -> Ca; * * "key_B" -> Cb2; * "1" -> Cb; * * "key_C" -> Cc3; * "2" -> Cc; * "3" -> Cc2; * * edge [style=dotted]; * edge [arrowhead=none, arrowtail=normal]; * 1 -> Cb2:p; * 3 -> Cc3:p; * 2 -> Cc2:p; * * edge [style=invis, weight=20]; * key_A -> key_B; * 1 -> key_C; * } * \enddot * * * \subsection hsh_store_array Storing a verbatim array * * If there's a limited number of entries (with known length) to store, an * array with a defined size might be easiest. \n * A similar variant would be to simply concatenate the data in the hash * buckets, with some suitable separator. * - memory intensive, slow for big buckets (many bytes to copy). * - For array iteration some special convention for the \c key would * have to be used, like \c .dsize=0 and \c .dptr=array_def; the last * returned index would have to be stored in the array structure. * - Big advantage: fast for reading, doesn't have to seek around. * \dot * digraph { * node [shape=record]; * C1 [label = "num=4 | v1 | v2 | v3 | v4 | 0 | 0 | 0 | 0 | 0 | 0 | 0" ]; * "key" -> C1; * } * \enddot * * * \subsection Conclusio * * Barring other (better) ideas, the array solution is currently * implemented; the array is of fixed-size, can store only pointers, and * the function for getting a list allows returning a set of elements. * *


* */ /** \name Simple hash functions. * * @{ */ /** Bare open function for internal use. * * \a *fname_out, if not \c NULL, gets an allocated copy of the filename. * */ int hsh___new_bare(char *wcfile, char *name, int gdbm_mode, GDBM_FILE *output, char **fname_out) { int status; char *cp, *eos; GDBM_FILE db; status=0; db=NULL; if (gdbm_mode == HASH_TEMPORARY) { cp=waa_tmp_path; /* Use this bit, so that the open filehandle says what it was. */ eos=waa_tmp_fn; } else STOPIF( waa__get_waa_directory(wcfile, &cp, &eos, NULL, ( (gdbm_mode == GDBM_READER) ? 0 : GWD_MKDIR) | waa__get_gwd_flag(name)), NULL); strcpy(eos, name); if (gdbm_mode == GDBM_NEWDB || gdbm_mode == HASH_TEMPORARY) { /* libgdbm3=1.8.3-3 has a bug - with GDBM_NEWDB an existing database is * not truncated. Only the O_CREAT, not the O_TRUNC flag is used. * debian #447981. */ /** STOPIF_CODE_ERR\todo remove this bugfix sometime ... */ /* No error, and ENOENT are both ok. */ STOPIF_CODE_ERR( (unlink(cp) == -1) && (errno != ENOENT), errno, "Removing database file '%s'", cp); status=0; } db = gdbm_open(cp, 0, gdbm_mode, 0777, NULL); if (!db) { status=errno; if (status != ENOENT) STOPIF(status, "Cannot open database file %s", cp); } else { /* Temporary files can be removed immediately. */ if (gdbm_mode == HASH_TEMPORARY) STOPIF_CODE_ERR( unlink(cp) == -1, errno, "Removing database file '%s'", cp); if (fname_out) STOPIF( hlp__strdup( fname_out, cp), NULL); } ex: if (status) { if (db) gdbm_close(db); } else *output=db; return status; } /** -. * If \a flags is \c GDBM_NEWDB, the file gets deleted immediately; there's * no need to keep it around any longer, and it's not defined where it gets * located. * If another open mode is used, the entry is always created in the WAA or * CONF base directory for \a wcfile, ie. the hashed path for the working * copy root. */ int hsh__new(char *wcfile, char *name, int gdbm_mode, hash_t *output) { int status; hash_t hash; STOPIF( hlp__calloc( &hash, 1, sizeof(*hash)), NULL); /* Return errors silently. */ status=hsh___new_bare(wcfile, name, gdbm_mode & ~HASH_REMEMBER_FILENAME, & (hash->db), gdbm_mode & HASH_REMEMBER_FILENAME ? &(hash->filename) : NULL); ex: if (status) IF_FREE(hash); else *output=hash; return status; } /** -. * * The previously marked keys in the hash table are removed; it is not * checked for empty-ness nor reorganized. */ int hsh__collect_garbage(hash_t db, int *did_remove) { int status; int have_removed; datum key, next; status=0; have_removed=0; if (db && db->to_delete) { key=gdbm_firstkey(db->to_delete); while (key.dptr) { next=gdbm_nextkey(db->to_delete, key); STOPIF_CODE_ERR( gdbm_delete(db->db, key)!=0, gdbm_errno, "Removing entry"); free(key.dptr); key=next; have_removed++; } DEBUGP("%d cleanups", have_removed); gdbm_close(db->to_delete); db->to_delete=NULL; } if (did_remove) *did_remove=have_removed; ex: return status; } /** -. * * If \a has_failed is set, some error has happened, and the registered * keys are not used for deletion (like a \c ROLLBACK). */ int hsh__close(hash_t db, int has_failed) { int status; int have_removed; datum key; status=0; if (!db) goto ex; have_removed=0; if (db->to_delete) { if (!has_failed) STOPIF( hsh__collect_garbage(db, &have_removed), NULL); } /* No more data in that hash? */ if (hsh__first(db, &key) == ENOENT && db->filename) { DEBUGP("nothing found, removing %s", db->filename); STOPIF( waa__delete_byext(db->filename, NULL, 0), "Cleaning up the empty hash '%s'", db->filename); } else { DEBUGP("reorganize?"); /* At least fewer space used? */ if (have_removed) gdbm_reorganize(db->db); } DEBUGP("closing hash"); /* It's possible that some hash was never opened, because there was no * need to. */ if (db->db) { gdbm_close(db->db); db->db=NULL; } ex: if (db) IF_FREE(db->filename); IF_FREE(db); return status; } /** -. */ int hsh__fetch(hash_t db, datum key, datum *value) { static datum vl; if (!db) return ENOENT; vl=gdbm_fetch(db->db, key); if (value) *value=vl; else IF_FREE(vl.dptr); return (vl.dptr) ? 0 : ENOENT; } /** -. * */ int hsh__first(hash_t db, datum *key) { datum k; if (!db) return ENOENT; k=gdbm_firstkey(db->db); if (key) *key=k; return (k.dptr) ? 0 : ENOENT; } /** -. * * If \c oldkey==key the \c ->dptr is \c free()d; else the caller has to do * that. */ int hsh__next(hash_t db, datum *key, const datum *oldkey) { datum k; /* Get next key. */ k=gdbm_nextkey(db->db, *oldkey); /* Ev. free old key-data. */ if (oldkey == key) /* Should be IF_FREE(oldkey->dptr) -- but oldkey==key, and oldkey is * read-only. */ IF_FREE(key->dptr); /* Return new key. */ *key=k; return (k.dptr) ? 0 : ENOENT; } /** -. * */ int hsh__store(hash_t db, datum key, datum value) { int status; if (value.dsize == 0 || value.dptr == NULL) status=gdbm_delete(db->db, key); else status=gdbm_store(db->db, key, value, GDBM_REPLACE); STOPIF_CODE_ERR(status < 0, errno, "Error writing property %s", key.dptr); ex: return status; } /** -. * The delimiting \\0 is stored, too. */ int hsh__store_charp(hash_t db, char *keyp, char *valuep) { datum key, value; key.dptr=keyp; value.dptr=valuep; key.dsize=strlen(key.dptr)+1; value.dsize=strlen(value.dptr)+1; return hsh__store(db, key, value); } /** @} */ /** \name Hash list manipulation. * * @{ */ /** Structure for storing a number of data packets of size sizeof(void*). * */ struct hsh___list { /** Count of entries. * * If we put the overflow flag/count at the end, the structure is not * runtime-resizeable. If we put it at the start, it will always need * alignment for the pointers behind. * So we can just store the count ... a single bit wouldn't need fewer * bytes. * * If \c count>HASH__LIST_MAX, this entry is considered "overflown". */ int count; /** Array of pointers. */ void *entries[HASH__LIST_MAX]; }; /** -. * * * */ int hsh__insert_pointer(hash_t hash, datum key, void *value) { int status; datum listd; struct hsh___list list, *dst; status=hsh__fetch(hash, key, &listd); if (status == ENOENT) { /* None found. */ /* Don't know why this is not a (void*) ... possibly because it * couldn't add up :-) */ listd.dptr= (char*)&list; listd.dsize=sizeof(list); memset(&list, 0, sizeof(list)); list.count=1; list.entries[0]=value; status=0; } else { /* Already something there. We can re-use the storage, as it was * malloc()ed anyway. */ BUG_ON(listd.dsize != sizeof(list)); /* status is still 0, so per default the list would get written. */ dst=(struct hsh___list*)listd.dptr; if (dst->count == HASH__LIST_MAX) { /* On dst->count reaching MAX_DUPL_ENTRIES we have to write the * structure once again. */ /* Later we could avoid it ... although it might be interesting how many there are. Maybe even resizing? */ dst->count++; } else if (dst->count > HASH__LIST_MAX) { /* No more writes needed. */ status=EFBIG; } else { /* Normal operation, still space available. */ dst->entries[ dst->count ] = value; dst->count++; } } if (status != EFBIG) STOPIF( hsh__store(hash, key, listd), NULL); ex: return status; } /** -. * If \a next_key is not \c NULL, it is set so that the next query can use * this key for the next element. * If \c *next_key==NULL, no next element is there; everything has been * returned. * If no (more) entry could be found, \c ENOENT is returned, and \c * *found==0. * * In case of storage via an array hsh__list must store some internal * state; therefore only a single loop is (currently) possible. * */ int hsh__list_get(hash_t hash, datum current_key, datum *next_key, struct estat **arr[], int *found) { int status; /* We ensure at least a single NULL at the end. */ static struct estat *data[HASH__LIST_MAX+1]; datum value; struct hsh___list *list; int c; status=0; *found=0; *arr=NULL; /* No next key ... we return the array. */ if (next_key) { next_key->dsize=0; next_key->dptr=0; } status=hsh__fetch( hash, current_key, &value); /* Nothing here? Just report that. */ if (status == ENOENT) goto ex; STOPIF(status, NULL); list=(struct hsh___list*)value.dptr; memset(data, 0, sizeof(data)); c=list->count; /* Overflow? */ if (c == HASH__LIST_MAX+1) c=HASH__LIST_MAX; BUG_ON( c<=0 || c>HASH__LIST_MAX, "number of entries=%d", c); memcpy(data, list->entries, c*sizeof(data[0])); *found=c; *arr=data; ex: return status; } int hsh__register_delete(hash_t db, datum key) { int status; const datum data= { .dsize=1, .dptr="\0", }; status=0; if (!db->to_delete) { STOPIF( hsh___new_bare(NULL, "del", HASH_TEMPORARY, &(db->to_delete), NULL), NULL); } DEBUGP("storing %s", key.dptr); status=gdbm_store(db->to_delete, key, data, GDBM_REPLACE); STOPIF_CODE_ERR(status < 0, errno, "Error writing key"); ex: return status; } /** @} */ fsvs-1.2.6/src/doxygen-data/0000755000202400020240000000000012554717235014635 5ustar marekmarekfsvs-1.2.6/src/doxygen-data/Doxyfile-man0000644000202400020240000017557211213413040017107 0ustar marekmarek# Doxyfile 1.5.9 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = fsvs # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ../../doxygen/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = ./ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = ./ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 5 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = NO # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = NO # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = NO # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= NO # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = man # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = NO # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = YES # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . \ dox \ tools # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.c \ *.h \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = NO # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = NO #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = NO # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = doxygen-data/head.html # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = doxygen-data/foot.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = doxygen-data/doxygen.css # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to FRAME, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. Other possible values # for this tag are: HIERARCHIES, which will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list; # ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which # disables this behavior completely. For backwards compatibility with previous # releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE # respectively. GENERATE_TREEVIEW = YES # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .1 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = NO # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = NO # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = NO # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = NO # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = NO # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Options related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO fsvs-1.2.6/src/doxygen-data/foot.html0000644000202400020240000000037410571215725016470 0ustar marekmarek
Generated for $projectname by  doxygen $doxygenversion
fsvs-1.2.6/src/doxygen-data/head.html0000644000202400020240000000047510571215725016424 0ustar marekmarek $title fsvs-1.2.6/src/doxygen-data/doxygen.css0000644000202400020240000001764110571215725017027 0ustar marekmarek/* * $Id$ * */ BODY,H1,H2,H3,H4,H5,H6,P,CENTER,TD,TH,UL,DL,DIV { font-family: Geneva, Arial, Helvetica, sans-serif; } BODY,TD { font-size: 90%; } H1 { text-align: center; font-size: 160%; } H2 { font-size: 120%; } H3 { font-size: 100%; } CAPTION { font-weight: bold } DIV.qindex { width: 100%; background-color: #e8eef2; border: 1px solid #84b0c7; text-align: center; margin: 2px; padding: 2px; line-height: 140%; } DIV.nav { width: 100%; background-color: #e8eef2; border: 1px solid #84b0c7; text-align: center; margin: 2px; padding: 2px; line-height: 140%; } DIV.navtab { background-color: #e8eef2; border: 1px solid #84b0c7; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } TD.navtab { font-size: 70%; } A.qindex { text-decoration: none; font-weight: bold; color: #1A419D; } A.qindex:visited { text-decoration: none; font-weight: bold; color: #1A419D } A.qindex:hover { text-decoration: none; background-color: #ddddff; } A.qindexHL { text-decoration: none; font-weight: bold; background-color: #6666cc; color: #ffffff; border: 1px double #9295C2; } A.qindexHL:hover { text-decoration: none; background-color: #6666cc; color: #ffffff; } A.qindexHL:visited { text-decoration: none; background-color: #6666cc; color: #ffffff } A.el { text-decoration: none; font-weight: bold } A.elRef { font-weight: bold } A.code:link { text-decoration: none; font-weight: normal; color: #0000FF} A.code:visited { text-decoration: none; font-weight: normal; color: #0000FF} A.codeRef:link { font-weight: normal; color: #0000FF} A.codeRef:visited { font-weight: normal; color: #0000FF} A:hover { text-decoration: none; background-color: #f2f2ff } DL.el { margin-left: -1cm } .fragment { font-family: monospace, fixed; font-size: 95%; } PRE.fragment { border: 1px solid #CCCCCC; background-color: #f5f5f5; margin-top: 4px; margin-bottom: 4px; margin-left: 2px; margin-right: 8px; padding-left: 6px; padding-right: 6px; padding-top: 4px; padding-bottom: 4px; } DIV.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px } DIV.groupHeader { margin-left: 16px; margin-top: 12px; margin-bottom: 6px; font-weight: bold; } DIV.groupText { margin-left: 16px; font-style: italic; font-size: 90% } BODY { background: white; color: black; margin-right: 20px; margin-left: 20px; } TD.indexkey { background-color: #e8eef2; font-weight: bold; padding-right : 10px; padding-top : 2px; padding-left : 10px; padding-bottom : 2px; margin-left : 0px; margin-right : 0px; margin-top : 2px; margin-bottom : 2px; border: 1px solid #CCCCCC; } TD.indexvalue { background-color: #e8eef2; font-style: italic; padding-right : 10px; padding-top : 2px; padding-left : 10px; padding-bottom : 2px; margin-left : 0px; margin-right : 0px; margin-top : 2px; margin-bottom : 2px; border: 1px solid #CCCCCC; } TR.memlist { background-color: #f0f0f0; } P.formulaDsp { text-align: center; } IMG.formulaDsp { } IMG.formulaInl { vertical-align: middle; } SPAN.keyword { color: #008000 } SPAN.keywordtype { color: #604020 } SPAN.keywordflow { color: #e08000 } SPAN.comment { color: #800000 } SPAN.preprocessor { color: #806020 } SPAN.stringliteral { color: #002080 } SPAN.charliteral { color: #008080 } .mdescLeft { padding: 0px 8px 4px 8px; font-size: 80%; font-style: italic; background-color: #FAFAFA; border-top: 1px none #E0E0E0; border-right: 1px none #E0E0E0; border-bottom: 1px none #E0E0E0; border-left: 1px none #E0E0E0; margin: 0px; } .mdescRight { padding: 0px 8px 4px 8px; font-size: 80%; font-style: italic; background-color: #FAFAFA; border-top: 1px none #E0E0E0; border-right: 1px none #E0E0E0; border-bottom: 1px none #E0E0E0; border-left: 1px none #E0E0E0; margin: 0px; } .memItemLeft { padding: 1px 0px 0px 8px; margin: 4px; border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; border-top-color: #E0E0E0; border-right-color: #E0E0E0; border-bottom-color: #E0E0E0; border-left-color: #E0E0E0; border-top-style: solid; border-right-style: none; border-bottom-style: none; border-left-style: none; background-color: #FAFAFA; font-size: 80%; } .memItemRight { padding: 1px 8px 0px 8px; margin: 4px; border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; border-top-color: #E0E0E0; border-right-color: #E0E0E0; border-bottom-color: #E0E0E0; border-left-color: #E0E0E0; border-top-style: solid; border-right-style: none; border-bottom-style: none; border-left-style: none; background-color: #FAFAFA; font-size: 80%; } .memTemplItemLeft { padding: 1px 0px 0px 8px; margin: 4px; border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; border-top-color: #E0E0E0; border-right-color: #E0E0E0; border-bottom-color: #E0E0E0; border-left-color: #E0E0E0; border-top-style: none; border-right-style: none; border-bottom-style: none; border-left-style: none; background-color: #FAFAFA; font-size: 80%; } .memTemplItemRight { padding: 1px 8px 0px 8px; margin: 4px; border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; border-top-color: #E0E0E0; border-right-color: #E0E0E0; border-bottom-color: #E0E0E0; border-left-color: #E0E0E0; border-top-style: none; border-right-style: none; border-bottom-style: none; border-left-style: none; background-color: #FAFAFA; font-size: 80%; } .memTemplParams { padding: 1px 0px 0px 8px; margin: 4px; border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; border-top-color: #E0E0E0; border-right-color: #E0E0E0; border-bottom-color: #E0E0E0; border-left-color: #E0E0E0; border-top-style: solid; border-right-style: none; border-bottom-style: none; border-left-style: none; color: #606060; background-color: #FAFAFA; font-size: 80%; } .search { color: #003399; font-weight: bold; } FORM.search { margin-bottom: 0px; margin-top: 0px; } INPUT.search { font-size: 75%; color: #000080; font-weight: normal; background-color: #e8eef2; } TD.tiny { font-size: 75%; } a { color: #1A41A8; } a:visited { color: #2A3798; } .dirtab { padding: 4px; border-collapse: collapse; border: 1px solid #84b0c7; } TH.dirtab { background: #e8eef2; font-weight: bold; } HR { height: 1px; border: none; border-top: 1px solid black; } /* Style for detailed member documentation */ .memtemplate { font-size: 80%; color: #606060; font-weight: normal; } .memnav { background-color: #e8eef2; border: 1px solid #84b0c7; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } .memitem { padding: 4px; background-color: #eef3f5; border-width: 1px; border-style: solid; border-color: #dedeee; -moz-border-radius: 8px 8px 8px 8px; } .memname { white-space: nowrap; font-weight: bold; } .memdoc{ padding-left: 10px; } .memproto { background-color: #d5e1e8; width: 100%; border-width: 1px; border-style: solid; border-color: #84b0c7; font-weight: bold; -moz-border-radius: 8px 8px 8px 8px; } .paramkey { text-align: right; } .paramtype { white-space: nowrap; } .paramname { color: #602020; font-style: italic; white-space: nowrap; } /* End Styling for detailed member documentation */ /* for the tree view */ .ftvtree { font-family: sans-serif; margin:0.5em; } .directory { font-size: 9pt; font-weight: bold; } .directory h3 { margin: 0px; margin-top: 1em; font-size: 11pt; } .directory > h3 { margin-top: 0; } .directory p { margin: 0px; white-space: nowrap; } .directory div { display: none; margin: 0px; } .directory img { vertical-align: -30%; } fsvs-1.2.6/src/doxygen-data/Doxyfile0000644000202400020240000017561711213413040016336 0ustar marekmarek# Doxyfile 1.5.9 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = fsvs # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ../../doxygen/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = ./ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = ./ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 5 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = html # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = YES # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . \ dox \ tools # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.c \ *.h \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = doxygen-data/head.html # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = doxygen-data/foot.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = doxygen-data/doxygen.css # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to FRAME, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. Other possible values # for this tag are: HIERARCHIES, which will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list; # ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which # disables this behavior completely. For backwards compatibility with previous # releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE # respectively. GENERATE_TREEVIEW = YES # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = NO # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = NO # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = YES # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = YES # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = NO # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = gif # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Options related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO fsvs-1.2.6/src/cache.c0000644000202400020240000001435612467104255013464 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include "global.h" #include "cache.h" /** \file * Some small caching primitives. * * We have to do some caching - neither the APR-functions nor glibc caches * results of \c getpwnam() and similar. * On update or commit we call them many, many times ... there it's good to * have these values cached. * * It's not necessary for performance; but simply getting a \c char* back * from some function and using it, knowing that it's valid for a few more * calls of the same function, eases life tremendously. * * \todo Convert the other caches. * * \todo Let the \c apr_uid_get() calls from \c update.c go into that - * but they need a hash or something like that. Maybe reverse the test and * look whether the number (eg. uid) matches the string (username)? * */ /** -. * If a struct \ref cache_entry_t is used as a string, this might be * useful. * * If memory should be allocated, but not copied, specify \a data as \c * NULL. * For \a len \c ==-1 calls \c strlen(). * * If \a copy_old_data is set, old value in this cache entry is kept. * * Please note that memory may have to be reallocated, causing \c *cache to * change! */ int cch__entry_set(struct cache_entry_t **cache, cache_value_t id, const char *data, int len, int copy_old_data, char **copy) { int status; struct cache_entry_t *ce; int alloc_len; if (len == -1) len=strlen(data); status=0; ce=*cache; alloc_len=len+sizeof(struct cache_entry_t); if (!ce || alloc_len > ce->len || (ce->len - len) > 1024) { /* Round up a bit (including the struct). */ alloc_len = (alloc_len + 96-1) & ~64; if (copy_old_data) STOPIF( hlp__realloc( &ce, alloc_len), NULL); else { /* Note: realloc() copies the old data to the new location, but most * of the time we'd overwrite it completely just afterwards. */ free(*cache); STOPIF( hlp__alloc( &ce, alloc_len), NULL); } ce->len = alloc_len-sizeof(struct cache_entry_t)-1; *cache=ce; } ce->id = id; if (data) memcpy(ce->data, data, len); /* Just to be safe ... */ ce->data[len]=0; if (copy) *copy=ce->data; ex: return status; } /** -. * Can return \c ENOENT if not found. */ inline int cch__find(struct cache_t *cache, cache_value_t id, int *index, char **data, int *len) { int i; for(i=0; iused; i++) if (cache->entries[i]->id == id) { if (data) *data= cache->entries[i]->data; if (len) *len= cache->entries[i]->len; if (index) *index=i; return 0; } return ENOENT; } /** -. * * The given data is just inserted into the cache and marked as LRU. * An old entry is removed if necessary. */ int cch__add(struct cache_t *cache, cache_value_t id, const char *data, int len, char **copy) { int i; if ( cache->used >= cache->max) { i=cache->lru+1; if (i >= cache->max) i=0; } else i= cache->used++; cache->lru=i; /* Set data */ return cch__entry_set(cache->entries + i, id, data, len, 0, copy); } /** -. * \a id is a distinct numeric value for addressing this item. * The entry is set as LRU, eventually discarding older entries. * */ int cch__set_by_id(struct cache_t *cache, cache_value_t id, const char *data, int len, int copy_old_data, char **copy) { int i; /* Entry with same ID gets overwritten. */ if (cch__find(cache, id, &i, NULL, NULL) == ENOENT) { return cch__add(cache, id, data, len, copy); } /* Found, move to LRU */ cch__set_active(cache, i); /* Set data */ return cch__entry_set(cache->entries + i, id, data, len, copy_old_data, copy); } /** -. * */ void cch__set_active(struct cache_t *cache, int i) { struct cache_entry_t *tmp, **entries; entries=cache->entries; /* observe these 2 cases: */ if (i < cache->lru) { /* from | 6 5 i 3 2 1 LRU 9 8 7 | * to | 6 5 3 2 1 LRU i 9 8 7 | * -> move [i+1 to LRU] to i, i is the new LRU. */ tmp=entries[i]; memmove(entries+i, entries+i+1, (cache->lru-i) * sizeof(entries[0])); entries[cache->lru]=tmp; } else if (i > cache->lru) { /* from | 2 1 LRU 9 8 7 i 5 4 3 | * to | 2 1 LRU i 9 8 7 5 4 3 | * -> move [LRU+1 to i] to LRU+2; LRU++ */ cache->lru++; BUG_ON(cache->lru >= cache->max); /* lru is already incremented */ tmp=entries[i]; memmove(entries+cache->lru+1, entries+cache->lru, (i-cache->lru) * sizeof(entries[0])); entries[cache->lru]=tmp; } } /** A simple hash. * Copies the significant bits ' ' .. 'Z' (or, really, \\x20 .. \\x60) of * at most 6 bytes of \a stg into a packed bitfield, so that 30bits are * used. */ inline cache_value_t cch___string_to_cv(const char *stg) { union { cache_value_t cv; struct { unsigned int c0:5; unsigned int c1:5; unsigned int c2:5; unsigned int c3:5; unsigned int c4:5; unsigned int c5:5; unsigned int ignore_me:2; }; } __attribute__((packed)) result; result.cv=0; if (*stg) { result.c0 = *(stg++) - 0x20; if (*stg) { result.c1 = *(stg++) - 0x20; if (*stg) { result.c2 = *(stg++) - 0x20; if (*stg) { result.c3 = *(stg++) - 0x20; if (*stg) { result.c4 = *(stg++) - 0x20; if (*stg) { result.c5 = *(stg++) - 0x20; } } } } } } return result.cv; } /** -. * */ int cch__hash_find(struct cache_t *cache, const char *key, cache_value_t *data) { int status; cache_value_t id; int i; id=cch___string_to_cv(key); DEBUGP("looking for %lX = %s", id, key); if (cch__find(cache, id, &i, NULL, NULL) == 0 && strcmp(key, cache->entries[i]->data) == 0) { *data = cache->entries[i]->hash_data; DEBUGP("found %s=%ld", key, *data); status=0; } else status=ENOENT; return status; } /** -. * */ int cch__hash_add(struct cache_t *cache, const char *key, cache_value_t value) { int status; cache_value_t id; id=cch___string_to_cv(key); STOPIF( cch__add(cache, id, key, strlen(key), NULL), NULL); cache->entries[ cache->lru ]->hash_data = value; ex: return status; } fsvs-1.2.6/src/direnum.c0000644000202400020240000004035411146513443014055 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include "est_ops.h" #include "direnum.h" #include "warnings.h" #include "global.h" #include "helper.h" /** \file * Directory enumerator functions. */ /** \defgroup getdents Directory reading * \ingroup perf * How to read a million inodes as fast as possible * * \section getdents_why Why? * Why do we care for \a getdents64 instead of simply using the * (portable) \a readdir()? * - \a getdents64 gives 64bit inodes (which we need on big * filesystems) * - as \a getdents64 gives up to (currently) 4096 bytes of directory * data, we save some amount of library and/or kernel calls - * for 32 byte per directory entry (estimated, measured, averaged) * we get a maximum of about 128 directory entries per call - which * saves many syscalls and much time. * Not counting the overhead of the apr- and libc-layers ... which we * should (have to) use for eg. windows. * * \section getdents_how How? * We have two kinds of directory reading codes. * - A fast one with \a getdents64() (linux-specific) * - A compatibility layer using \a opendir() / \a readdir() / \a closedir(). * * Which one to use is defined by \c configure. * */ /** @{ */ #undef HAVE_GETDENTS64 #ifdef HAVE_LINUX_TYPES_H #ifdef HAVE_LINUX_UNISTD_H /** If the system fulfills all necessary checks to use getdents(), this macro * is set. */ #define HAVE_GETDENTS64 1 #endif #endif #ifdef HAVE_GETDENTS64 /* Fast linux version. */ #include #include /** The type of handle. */ typedef int dir__handle; /** A compatibility structure. * It has an inode; a name; and a record length in it, to get from one * record to the next. */ typedef struct dirent64 fsvs_dirent; /** Starts enumeration of the given \a path. The directory handle is returned * in \a *dirp. * \return 0 for success, or an error code. */ inline int dir__start_enum(dir__handle *dh, char *path) { int status; status=0; *dh=open(path, O_RDONLY | O_DIRECTORY); STOPIF_CODE_ERR( *dh <= 0, errno, "open directory %s for reading", path); ex: return status; } /** The enumeration function. * \param dh The handle given by dir__start_enum. * \param dirp The space where data should be returned * \param count The maximum number of bytes in \a dirp. * * \return The number of bytes used in \a dirp. */ inline int dir__enum(dir__handle dh, fsvs_dirent *dirp, unsigned int count) { return syscall(__NR_getdents64, dh, dirp, count); } /** Simply closes the handle \a dh. * */ inline int dir__close(dir__handle dh) { int status; status=0; STOPIF_CODE_ERR( close(dh) == -1, errno, "closing dir-handle"); ex: return status; } /** How to get the length of a directory (in bytes), from a handle \a dh, * into \a st->size. */ inline int dir__get_dir_size(dir__handle dh, struct sstat_t *st) { int status; status=0; STOPIF( hlp__fstat(dh, st), "Get directory size"); ex: return status; } #else /* We fake something compatible with what we need. * That's not the finest way, but it works (TM). */ #include #include struct fsvs_dirent_t { uint64_t d_ino; int d_reclen; char d_name[NAME_MAX+1]; }; typedef struct fsvs_dirent_t fsvs_dirent; typedef DIR* dir__handle; inline int dir__start_enum(dir__handle *dh, char *path) { int status; status=0; STOPIF_CODE_ERR( (*dh=opendir(path)) == NULL, errno, "Error opening directory %s", path); ex: return status; } /* Impedance matching .. don't like it. */ inline int dir__enum(dir__handle dh, fsvs_dirent *dirp, unsigned int count) { struct dirent *de; de=readdir(dh); /* EOD ? */ if (!de) return 0; dirp[0].d_ino = de->d_ino; strcpy( dirp[0].d_name, de->d_name); dirp[0].d_reclen = sizeof(dirp[0])-sizeof(dirp[0].d_name) + strlen(dirp[0].d_name) + 1; return dirp[0].d_reclen; } inline int dir__close(dir__handle dh) { int status; status=0; STOPIF_CODE_ERR( closedir(dh) == -1, errno, "Error closing directory handle"); ex: return status; } inline int dir__get_dir_size(dir__handle dh, struct sstat_t *st) { int status; status=0; st->size=0; #ifdef HAVE_DIRFD STOPIF( hlp__fstat(dirfd(dh), st), "Get directory size()"); #endif ex: return status; } #endif /** @} */ /** The amount of memory that should be allocated for directory reading. * This value should be bigger (or at least equal) than the number of * bytes returned by \a getdents(). * For the compatibility layer it's more or less the maximum filename length * plus the inode and record length lengths. * * This many bytes \b more will also be allocated for the filenames in a * directory; if we get this close to the end of the buffer, * the memory area will be reallocated. */ #define FREE_SPACE (4096) /** Compares two struct estat pointers by device/inode. * \return +2, +1, 0, -1, -2, suitable for \a qsort(). * * That is now an inline function; but without force gcc doesn't inline it * on 32bit, because of the size (64bit compares, 0x6b bytes). * [ \c __attribute__((always_inline)) in declaration]. */ int dir___f_sort_by_inodePP(struct estat *a, struct estat *b) { register const struct sstat_t* __a=&(a->st); register const struct sstat_t* __b=&(b->st); if (__a->dev > __b->dev) return +2; if (__a->dev < __b->dev) return -2; if (__a->ino > __b->ino) return +1; if (__a->ino < __b->ino) return -1; return 0; } /** Compares the data inside two struct estat pointers to pointers by * device/inode. * \return +2, +1, 0, -1, -2, suitable for \a qsort(). */ int dir___f_sort_by_inode(struct estat **a, struct estat **b) { return dir___f_sort_by_inodePP(*a, *b); } /** Compares two names/strings. * Used for type checking cleanliness. * 'C' as for 'Const'. * \return +2, +1, 0, -1, -2, suitable for \a qsort(). */ inline int dir___f_sort_by_nameCC(const void *a, const void *b) { return strcoll(a,b); } /** Compares the data inside two struct estat pointers to pointers * by name. * \return +2, +1, 0, -1, -2, suitable for \a qsort(). */ int dir___f_sort_by_name(const void *a, const void *b) { register const struct estat * const *_a=a; register const struct estat * const *_b=b; return dir___f_sort_by_nameCC((*_a)->name, (*_b)->name); } /** Compares a pointer to name (string) with a struct estat pointer * to pointer. * \return +2, +1, 0, -1, -2, suitable for \a qsort(). */ int dir___f_sort_by_nameCS(const void *a, const void *b) { register const struct estat * const *_b=b; return dir___f_sort_by_nameCC(a, (*_b)->name); } /** -. * If it has no entries, an array with NULL is nonetheless allocated. */ int dir__sortbyname(struct estat *sts) { int count, status; // BUG_ON(!S_ISDIR(sts->st.mode)); count=sts->entry_count+1; /* After copying we can release some space, as 64bit inodes * are smaller than 32bit pointers. * Or otherwise we may have to allocate space anyway - this * happens automatically on reallocating a NULL pointer. */ STOPIF( hlp__realloc( &sts->by_name, count*sizeof(*sts->by_name)), NULL); if (sts->entry_count!=0) { memcpy(sts->by_name, sts->by_inode, count*sizeof(*sts->by_name)); qsort(sts->by_name, sts->entry_count, sizeof(*sts->by_name), dir___f_sort_by_name); } sts->by_name[sts->entry_count]=NULL; status=0; ex: return status; } /** -. * */ int dir__sortbyinode(struct estat *sts) { // BUG_ON(!S_ISDIR(sts->st.mode)); if (sts->entry_count) { BUG_ON(!sts->by_inode); qsort(sts->by_inode, sts->entry_count, sizeof(*sts->by_inode), (comparison_fn_t)dir___f_sort_by_inode); } return 0; } /** -. * The entries are sorted by inode number and stat()ed. * * \param this a pointer to this directory's stat - for estimating * the number of entries. Only this->st.st_size is used for that - * it may have to be zeroed before calling. * \param est_count is used to give an approximate number of entries, to * avoid many realloc()s. * \param give_by_name simply tells whether the ->by_name array should be * created, too. * * The result is written back into the sub-entry array in \a this. * * To avoid reallocating (and copying!) large amounts of memory, * this function fills some arrays from the directory, then allocates the * needed space, sorts the data (see note below) and adds all other data. * See \a sts_array, \a names and \a inode_numbers. * * \note Sorting by inode number brings about 30% faster lookup * times on my test environment (8 to 5 seconds) on an \b empty cache. * Once the cache is filled, it won't make a difference. * * \return 0 for success, else an errorcode. */ int dir__enumerator(struct estat *this, int est_count, int give_by_name) { dir__handle dirhandle; int size; int count; int i,j,l; int sts_free; int status; /* Struct \a estat pointer for temporary use. */ struct estat *sts=NULL; /* The estimated number of entries. */ int alloc_count; /* Stores the index of the next free byte in \a strings. */ int mark; /* Filename storage space. Gets stored in the directories \a ->strings * for memory management purposes. */ void *strings=NULL; /* Array of filenames. As the data space potentially has to be * reallocated at first only the offsets into \a *strings is stored. * These entries must be of the same size as a pointer, because the array * is reused as \c sts_array[] .*/ long *names=NULL; /* The buffer space, used as a struct \a fsvs_dirent */ char buffer[FREE_SPACE]; /* points into and walks over the \a buffer */ fsvs_dirent *p_de; /* Array of the struct \a estat pointers. Reuses the storage space * of the \a names Array. */ struct estat **sts_array=NULL; /* Array of inodes. */ ino_t *inode_numbers=NULL; STOPIF( dir__start_enum(&dirhandle, "."), NULL); if (!this->st.size) STOPIF( dir__get_dir_size(dirhandle, &(this->st)), NULL); /* At least a long for the inode number, and 3 characters + * a \0 per entry. But assume an average of 11 characters + \0. * If that's incorrect, we'll have to do an realloc. Oh, well. * * Another estimate which this function gets is the number of files * last time this directory was traversed. * * Should maybe be tunable in the future. * * (On my system I have an average of 13.9 characters per entry, * without the \0) */ alloc_count=this->st.size/(sizeof(*p_de) - sizeof(p_de->d_name) + ESTIMATED_ENTRY_LENGTH +1); /* + ca. 20% */ est_count= (est_count*19)/16 +1; if (alloc_count > est_count) est_count=alloc_count; /* on /proc, which gets reported with 0 bytes, * only 1 entry is allocated. This entry multiplied with 19/16 * is still 1 ... crash. * So all directories reported with 0 bytes are likely virtual * file systems, which can have _many_ entries ... */ if (est_count < 32) est_count=32; size=FREE_SPACE + est_count*( ESTIMATED_ENTRY_LENGTH + 1 ); STOPIF( hlp__alloc( &strings, size), NULL); mark=count=0; inode_numbers=NULL; names=NULL; alloc_count=0; /* read the directory and count entries */ while ( (i=dir__enum(dirhandle, (fsvs_dirent*)buffer, sizeof(buffer))) >0) { /* count entries, copy name and inode nr */ j=0; while (j= alloc_count) { /* If we already started, put a bit more space here. * Should maybe be configurable. */ if (!alloc_count) alloc_count=est_count; else alloc_count=alloc_count*19/16; STOPIF( hlp__realloc( &names, alloc_count*sizeof(*names)), NULL); /* temporarily we store the inode number in the *entries_by_inode * space; that changes when we've sorted them. */ STOPIF( hlp__realloc( &inode_numbers, alloc_count*sizeof(*inode_numbers)), NULL); } p_de=(fsvs_dirent*)(buffer+j); DEBUGP("found %llu %s", (t_ull)p_de->d_ino, p_de->d_name); if (p_de->d_name[0] == '.' && ((p_de->d_name[1] == '\0') || (p_de->d_name[1] == '.' && p_de->d_name[2] == '\0')) ) { /* just ignore . and .. */ } else { /* store inode for sorting */ inode_numbers[count] = p_de->d_ino; /* Store pointer to name. * In case of a realloc all pointers to the strings would get * invalid. So don't store the addresses now - only offsets. */ names[count] = mark; /* copy name, mark space as used */ l=strlen(p_de->d_name); strcpy(strings+mark, p_de->d_name); mark += l+1; count++; } /* next */ j += p_de->d_reclen; } /* Check for free space. * We read at most FREE_SPACE bytes at once, * so it's enough to have FREE_SPACE bytes free. * Especially because there are some padding and pointer bytes * which get discarded. */ if (size-mark < FREE_SPACE) { /* Oh no. Have to reallocate. * But we can hope that this (big) chunk is on the top * of the heap, so that it won't be copied elsewhere. * * How much should we add? For now, just give about 30%. */ /* size*21: Let's hope that this won't overflow :-) */ size=(size*21)/16; /* If +20% is not at least the buffer size (FREE_SPACE), * take at least that much memory. */ if (size < mark+FREE_SPACE) size=mark+FREE_SPACE; STOPIF( hlp__realloc( &strings, size), NULL); DEBUGP("strings realloc(%p, %d)", strings, size); } } STOPIF_CODE_ERR(i<0, errno, "getdents64"); DEBUGP("after loop found %d entries, %d bytes string-space", count, mark); this->entry_count=count; /* Free allocated, but not used, memory. */ STOPIF( hlp__realloc( &strings, mark), NULL); /* If a _down_-sizing ever gives an error, we're really botched. * But if it's an empty directory, a NULL pointer will be returned. */ BUG_ON(mark && !strings); this->strings=strings; /* Now this space is used - don't free. */ strings=NULL; /* Same again. Should never be NULL, as the size is never 0. */ STOPIF( hlp__realloc( &inode_numbers, (count+1)*sizeof(*inode_numbers)), NULL); STOPIF( hlp__realloc( &names, (count+1)*sizeof(*names)), NULL); /* Store end-of-array markers */ inode_numbers[count]=0; names[count]=0; /* Now we know exactly how many entries, we build the array for sorting. * We don't do that earlier, because resizing (and copying!) * is slow. Doesn't matter as much if it's just pointers, * but for bigger structs it's worth avoiding. * Most of the structures get filled only after sorting! */ /* We reuse the allocated array for names (int**) for storing * the (struct estat**). */ sts_array=(struct estat**)names; sts_free=0; for(i=0; iname=this->strings + names[i]; sts->st.ino=inode_numbers[i]; /* now the data is copied, we store the pointer. */ sts_array[i] = sts; sts++; sts_free--; } /* now names is no longer valid - space was taken by sts_array. */ names=NULL; this->by_inode=sts_array; /* Now the space is claimed otherwise - so don't free. */ sts_array=NULL; /* See inodeSort */ STOPIF( dir__sortbyinode(this), NULL); // for(i=0; id_ino, de[i]->d_name); for(i=0; iby_inode[i]; sts->parent=this; sts->repos_rev=SVN_INVALID_REVNUM; status=hlp__lstat(sts->name, &(sts->st)); if (abs(status) == ENOENT) { DEBUGP("entry \"%s\" not interesting - maybe a fifo or socket?", sts->name); sts->to_be_ignored=1; } else STOPIF( status, "lstat(%s)", sts->name); /* New entries get that set, because they're "updated". */ sts->old_rev_mode_packed = sts->local_mode_packed= MODE_T_to_PACKED(sts->st.mode); } /* Possibly return list sorted by name. */ if (give_by_name) STOPIF(dir__sortbyname(this), NULL); else /* should not be needed - but it doesn't hurt, either. */ this->by_name=NULL; status=0; ex: IF_FREE(strings); IF_FREE(names); IF_FREE(inode_numbers); IF_FREE(sts_array); if (dirhandle>=0) dir__close(dirhandle); return status; } fsvs-1.2.6/src/url.c0000644000202400020240000012535411435776105013227 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include "url.h" #include "waa.h" #include "cache.h" #include "helper.h" #include "est_ops.h" #include "checksum.h" #include "racallback.h" /** \file * \ref urls action, and functions for URLs. * */ /** \addtogroup cmds * * \section urls * * \code * fsvs urls URL [URLs...] * fsvs urls dump * fsvs urls load * \endcode * * Initializes a working copy administrative area and connects * \c the current working directory to \c REPOS_URL. All commits and * updates will be done to this directory and against the given URL. * * Example: * \code * fsvs urls http://svn/repos/installation/machine-1/trunk * \endcode * * For a format definition of the URLs please see the chapter \ref * url_format. * * \note * If there are already URLs defined, and you use that command later again, * please note that as of 1.0.18 the older URLs are not overwritten * as before, but that the new URLs are \b appended to the given list! * If you want to start afresh, use something like * \code * true | fsvs urls load * \endcode * * * \subsection urls_load Loading URLs * * You can load a list of URLs from \c STDIN; use the \c load subcommand * for that. * * Example: * \code * ( echo 'N:local,prio:10,http://svn/repos/install/machine-1/trunk' ; * echo 'P:50,name:common,http://svn/repos/install/common/trunk' ) | * fsvs urls load * \endcode * * Empty lines are ignored. * * * \subsection urls_dump Dumping the defined URLs * * To see which URLs are in use for the current WC, you can use \c dump. * * As an optional parameter you can give a format statement: * *
\c %ppriority *
\c %nname *
\c %rcurrent revision *
\c %ttarget revision *
\c %Rreadonly-flag *
\c %uURL *
\c %Iinternal number for this URL *
* * \note That's not a real \c printf()-format; only these and a few \c \\ * sequences are recognized. * * Example: * \code * fsvs urls dump " %u %n:%p\\n" * http://svn/repos/installation/machine-1/trunk local:10 * http://svn/repos/installation/common/trunk common:50 * \endcode * * The default format is \c "name:%n,prio:%p,target:%t,ro:%r,%u\\n"; for a * more readable version you can use \ref glob_opt_verb "-v". * * * \subsection urls_load Modifying URLs * * You can change the various parameters of the defined URLs like this: * \code * # Define an URL * fsvs urls name:url1,target:77,readonly:1,http://anything/... * # Change values * fsvs urls name:url1,target:HEAD * fsvs urls readonly:0,http://anything/... * fsvs urls name:url1,prio:88,target:32 * \endcode * * \note FSVS as yet doesn't store the whole tree structures of all URLs. * So if you change the priority of an URL, and re-mix the directory trees * that way, you'll need a \ref sync-repos and some \ref revert commands. * I'd suggest to avoid this, until FSVS does handle that case better. * * */ /** \defgroup url_format Format of URLs * \ingroup userdoc * * FSVS can use more than one URL; the given URLs are \e overlaid according * to their priority. * * For easier managing they get a name, and can optionally take a target * revision. * * Such an extended URL has the form * \code * ["name:"{name},]["target:"{t-rev},]["prio:"{prio},]URL * \endcode * where URL is a standard URL known by subversion -- * something like http://...., svn://... or * svn+ssh://.... * * The arguments before the URL are optional and can be in any * order; the URL must be last. * * Example: * \code * name:perl,prio:5,svn://... * \endcode * or, using abbreviations, * \code * N:perl,P:5,T:324,svn://... * \endcode * * Please mind that the full syntax is in lower case, whereas the * abbreviations are capitalized! \n * Internally the \c : is looked for, and if the part before this character * is a known keyword, it is used. \n * As soon as we find an unknown keyword we treat it as an URL, ie. stop * processing. * * The priority is in reverse numeric order - the lower the number, the * higher the priority. (See \c url__current_has_precedence() ) * * * \section url_prio Why a priority? * * When we have to overlay several URLs, we have to know \b which URL * takes precedence - in case the same entry is in more than one. (Which * is \b not recommended!) * * * \section url_name Why a name? * * We need a name, so that the user can say "commit all outstanding * changes to the repository at URL x", without having to remember the * full URL. * After all, this URL should already be known, as there's a list of URLs to * update from. * * You should only use alphanumeric characters and the underscore here; or, * in other words, \c \\w or \c [a-zA-Z0-9_]. (Whitespace, comma and * semicolon get used as separators.) * * * \section url_target What can I do with the target revision? * * Using the target revision you can tell fsvs that it should use the given * revision number as destination revision - so update would go there, but * not further. * Please note that the given revision number overrides the \c -r * parameter; this sets the destination for all URLs. * * The default target is \c HEAD. * * \note In subversion you can enter \c URL\@revision - this syntax may be * implemented in fsvs too. (But it has the problem, that as soon as you * have a \c @ in the URL, you \b must give the target revision every * time!) * * * \section url_intnum There's an additional internal number - why that? * * This internal number is not for use by the user. \n * It is just used to have an unique identifier for an URL, without using * the full string. * * On my system the package names are on average 12.3 characters long * (1024 packages with 12629 bytes, including newline): * \code * COLUMNS=200 dpkg-query -l | cut -c5- | cut -f1 -d" " | wc * \endcode * * So if we store an \e id of the url instead of the name, we have * approx. 4 bytes per entry (length of strings of numbers from 1 to 1024). * Whereas using the needs name 12.3 characters, that's a difference of 8.3 * per entry. * * Multiplied with 150 000 entries we get about 1MB difference in filesize * of the dir-file. Not really small ... \n * And using the whole URL would inflate that much more. * * Currently we use about 92 bytes per entry. So we'd (unnecessarily) * increase the size by about 10%. * * That's why there's an url_t::internal_number. */ /** -. * * Does get \c free()d by url__close_sessions(). * * See \ref glob_opt_urls "-u" for the specification. */ char **url__parm_list=NULL; int url__parm_list_len=0, url__parm_list_used=0; int url__must_write_defs=0; /** -. * * Because this may be called below input_tree, returning \c ENOENT could * be interpreted as no dirlist found - which has to be allowed in * some cases. * So this returns \c EADDRNOTAVAIL. */ int url__find_by_name(const char *name, struct url_t **storage) { int status; int i; /* Normalize */ if (name && !*name) name=NULL; status=EADDRNOTAVAIL; for(i=0; iname ? (!name || !*name) : (strcmp(urllist[i]->name, name) == 0) ) { if (storage) *storage=urllist[i]; status=0; break; } } if (status) DEBUGP("url with name %s not found!", name); return status; } /** -. * * Because this may be called below input_tree, returning \c ENOENT could * be interpreted as no dirlist found - which has to be allowed in * some cases. * So this returns \c EADDRNOTAVAIL. */ int url__find_by_url_in_list(char *url, struct url_t **list, int count, struct url_t **storage) { int status; int i; status=EADDRNOTAVAIL; for(i=0; iurl, url) == 0) { if (storage) *storage=list[i]; status=0; break; } } if (status) DEBUGP("url with url %s not found!", url); return status; } /** Wrapper for url__find_by_url_in_list(). */ int url__find_by_url(char *url, struct url_t **storage) { return url__find_by_url_in_list(url, urllist, urllist_count, storage); } /** -. * */ int url__find_by_intnum(int intnum, struct url_t **storage) { int status; int i; /* We must not return ENOENT. Because this is called below input_tree, * returning ENOENT could be interpreted as "no dirlist found" - which * has to be allowed in some cases. */ status=EADDRNOTAVAIL; for(i=0; iinternal_number == intnum) { if (storage) *storage=urllist[i]; status=0; break; } } if (status) DEBUGP("url with intnum %d not found!", intnum); else DEBUGP("url with intnum %d is %s", intnum, (*storage)->url); return status; } /** \anchor url_flags * \name Flags to store which attributes we already got for this URL. */ /** @{ */ #define HAVE_NAME (1) #define HAVE_PRIO (2) #define HAVE_URL (4) #define HAVE_TARGET (8) #define HAVE_READONLY (16) /** @} */ /** -. * * This function preserves it's input. * If storage is non- \c NULL, it's \c ->name member get's a copy of the given * (or a deduced) name. * * In \a def_parms the parameters found are flagged - see \ref url_flags; * if \a def_parms is \c NULL, an URL \b must be present. * */ int url__parse(char *input, struct url_t *storage, int *def_parms) { int status; char *cp, *value, *end, *cur; struct url_t eurl; int nlen, vlen, have_seen; status=0; have_seen=0; memset(&eurl, 0, sizeof(eurl)); /* The internal number is initially unknown; we must not set one here, * as a later read URL may have the number we've chosen. * We have to give internal numbers in a second pass. */ eurl.internal_number=INVALID_INTERNAL_NUMBER; eurl.current_rev=0; eurl.target_rev=SVN_INVALID_REVNUM; eurl.current_target_override=0; eurl.head_rev=SVN_INVALID_REVNUM; cur=input; DEBUGP("input: %s", input); while (! (have_seen & HAVE_URL)) { /* Find first ':'. * Variables are as follows: * nlen=5 vlen=3 * [---][-] * name:xxx,prio:123,svn://xxxxxx * ^ ^ ^ * | | end * | value * cur * */ end=cur; value=NULL; while (*end) { /* There may be multiple ':' in a single value, eg. in the URL: * http://user:pass@host:port/. * Set value to the first occurrence. */ if (*end == ':' && !value) value = end+1; if (*end == ',') break; end++; } /* Don't count the ':'. */ nlen = (value ? value-1 : end) - cur; vlen = value ? end - value : 0; DEBUGP("cur=%s value=%s end=%s vlen=%d nlen=%d", cur, value, end, vlen, nlen); if (strncmp("name", cur, nlen) == 0 || strncmp("N", cur, nlen) == 0) { STOPIF_CODE_ERR( have_seen & HAVE_NAME, EINVAL, "!Found two names in URL '%s'; only one may be given.", input); if (!value) goto need_value; /* "" == NULL == empty name? */ if (vlen==0) DEBUGP("NULL name"); else if (storage) { /* If we need that name again, make a copy. * We cannot simply use strdup(), because this is not * \0-terminated. */ STOPIF( hlp__strnalloc(vlen, &eurl.name, value), NULL); DEBUGP("got a name '%s' (%d bytes), going on with '%s'", eurl.name, vlen, end); have_seen |= HAVE_NAME; } } else if (strncmp("target", cur, nlen) == 0 || strncmp("T", cur, nlen) == 0) { STOPIF_CODE_ERR( have_seen & HAVE_TARGET, EINVAL, "!Already got a target revision in URL '%s'.", input); if (!value) goto need_value; STOPIF( hlp__parse_rev( value, &cp, & eurl.target_rev), NULL); STOPIF_CODE_ERR( cp == value || cp != end, EINVAL, "The given target revision in '%s' is invalid.", input); DEBUGP("got target %s", hlp__rev_to_string(eurl.target_rev)); have_seen |= HAVE_TARGET; } else if (strncmp("prio", cur, nlen) == 0 || strncmp("P", cur, nlen) == 0) { STOPIF_CODE_ERR( have_seen & HAVE_PRIO, EINVAL, "!Found two priorities in URL '%s'; only one allowed.", input); if (!value) goto need_value; eurl.priority=strtol(value, &cp, 0); STOPIF_CODE_ERR( cp == value || cp != end, EINVAL, "!The given url \"%s\" is invalid; cannot parse the priority.", input); DEBUGP("got priority %d", eurl.priority); have_seen |= HAVE_PRIO; } else if (strncmp("readonly", cur, nlen) == 0 || strncmp("ro", cur, nlen) == 0) { STOPIF_CODE_ERR( have_seen & HAVE_READONLY, EINVAL, "!Found two readonly flags in URL \"%s\"; only one allowed.", input); if (value) { eurl.is_readonly=strtol(value, &cp, 0); STOPIF_CODE_ERR( cp == value || cp != end, EINVAL, "!Cannot parse the readonly flag in \"%s\".", input); } else eurl.is_readonly=1; have_seen |= HAVE_READONLY; } else { /* For URLs no abbreviation is allowed, so we check the length extra. * An exception is "svn+", which can have arbitrary tunnels after it; * see ~/.subversion/config for details. * * We cannot use strcmp(), as the URL has no '\0' at the given * position. * We test for the ":", too, so that "http\0" isn't valid. * */ nlen++; if (strncmp("svn+", cur, 4) == 0) { /* At least a single character after the '+'; but nlen is already * incremented. */ STOPIF_CODE_ERR(nlen <= 5, EINVAL, "!No tunnel given after \"svn+\" in \"%s\".", cur); } else if ( (nlen == 4 && strncmp("svn:", cur, nlen) == 0) || (nlen == 5 && (strncmp("http:", cur, nlen) == 0 || strncmp("file:", cur, nlen) == 0) ) || (nlen == 6 && strncmp("https:", cur, nlen) == 0)) DEBUGP("known protocol found"); else STOPIF_CODE_ERR(1, EINVAL, "!The protocol given in \"%s\" is unknown!", cur); /* The shortest URL is name="http:" and value="//a", or something * like that ;-) */ if (!value || vlen<3 || strncmp(value, "//", 2)!=0) STOPIF_CODE_ERR(1, EINVAL, "!The URL in \"%s\" is invalid.", cur); /* Must be an URL */ /* We remove any / at the end of the URL (which may have resulted from * bash-completion), otherwise we'll get an error: * subversion/libsvn_subr/path.c:114: * svn_path_join: Assertion `is_canonical (base, blen)' failed. * That's not necessary. */ /* Please note that URLs are defined to use a '/', not * (platform-dependent) PATH_SEPARATOR! */ while (vlen>3 && value[vlen-1] == '/') value[--vlen] = 0; /* We need the ':' and the "name" (protocol) too, but don't count the * '\0' at the end. * The ':' is already counted by the nlen++ above. */ eurl.urllen=nlen + 0 + 1 + vlen - 1; STOPIF( hlp__strdup( &eurl.url, cur), NULL); have_seen |= HAVE_URL; } while (*end == ',') end++; if (!*end) break; cur=end; } if (def_parms) *def_parms=have_seen; else STOPIF_CODE_ERR( !(have_seen & HAVE_URL), EINVAL, "!No URL found in %s", input); if (storage) *storage=eurl; /* Maybe not entirely correct here, because URLs might not be stored in * the URL list. */ url__must_write_defs=1; ex: return status; need_value: STOPIF(EINVAL, "!Specification '%s' is not a valid URL - ':' missing.", input); goto ex; } /** -. * This functions returns 0 for success. * Error codes (eg \c EADDRNOTAVAIL ) are possible. * * If \a *existed is non- \c NULL, it is set to * 0 for a new URL or \c EEXIST if an existing URL was overwritten. * * The URL is parsed into an empty space at the end of \a urllist , * which must already exist! * * If the same URL was already used, the old entry gets overwritten. */ int url__insert_or_replace(char *eurl, struct url_t **storage, int *existed) { int status; int seen; struct url_t target, *dupl, *dest, *by_name; status=0; STOPIF( url__parse(eurl, &target, &seen), NULL); by_name=NULL; /* No error checks necessary, pointer stays NULL if not found. */ if (seen & HAVE_NAME) url__find_by_name(target.name, &by_name); dupl=NULL; /* If an URL is given, this is what is used for replacement. */ if (seen & HAVE_URL) url__find_by_url(target.url, &dupl); else { /* If no URL, then try to find the name. */ dupl=by_name; } if (!dupl) { if (!(seen & HAVE_URL)) { STOPIF( EINVAL, !(seen & HAVE_NAME) ? "!No URL was given in \"%s\"." : "!Cannot find the name given in \"%s\", so cannot modify an URL.", eurl); } if (seen & HAVE_NAME) { /* The names must be unique. */ STOPIF_CODE_ERR( by_name, EADDRINUSE, "!There's already an url named \"%s\"", target.name); /* If we didn't find it, it's ok. */ status=0; } /* Copy to destination */ dest=urllist[urllist_count]; *dest = target; urllist_count++; } else { /* \todo Currently it is not defined whether the strings are * heap-allocated or not, so it's not easy to free them. * * This should not happen so often, so we ignore that. */ /* When it gets overwritten, we take care to not simply copy - * we just change the given values. */ if (seen & HAVE_TARGET) dupl->target_rev = target.target_rev; if (seen & HAVE_PRIO) dupl->priority = target.priority; if (seen & HAVE_READONLY) dupl->is_readonly = target.is_readonly; if (seen & HAVE_NAME) dupl->name = target.name; /* The URL is the same, so the length is the same, and the internal * number is generated or already present. */ dest=dupl; } if (existed) *existed = dupl ? EEXIST : 0; if (storage) *storage=dest; ex: return status; } /** Simple function to find an unused id. * Slow, but easy. I'd like to use the linux-kernel bitmap functions - * but they're not exported, and not available everywhere. */ int find_next_zero_bit(fd_set *fd, int from) { while (FD_ISSET(from, fd)) from++; return from; } /** Set the internal number of all URLs which don't already have one. * * I'm aware that a normal \c fd_set is normally limited to * a few hundred bits (eg. for use with 1024 filehandles); but the low-level * ops don't know what we're doing, anyway. So we could just extend the * bitmap, and it should work as before (although maybe * there's be a sanity test). * * Sadly find_next_zero_bit and friends are not exported from the kernel, * so we have to use \c FD_ISSET and similar; there might be * faster/better alternatives. Tell me if you know one. */ int url___set_internal_nums(void) { int status; int i, j, bit; fd_set bitmap; /* We need to store only so many bits as we have URLs. * If URLs have higher inums there will be free lower inums. */ STOPIF_CODE_ERR( sizeof(bitmap)*8 < urllist_count, EMFILE, "Your fd_set is too small for the number of urls.\n" "Please contact dev@fsvs.tigris.org for help."); status=0; FD_ZERO(&bitmap); /* Step 1: look which numbers are used. */ for(i=0; iinternal_number > urllist_count) { /* Note: For such values we still have to check whether two * internal numbers collide. */ for(j=i+1; jinternal_number == urllist[j]->internal_number, EINVAL, "The URLs %s and %s have identical internal numbers!", urllist[i]->url, urllist[j]->url); } else if (urllist[i]->internal_number != INVALID_INTERNAL_NUMBER) { STOPIF_CODE_ERR( FD_ISSET(urllist[i]->internal_number, &bitmap), EINVAL, "The URL %s has a duplicate internal number!", urllist[i]->url); FD_SET(urllist[i]->internal_number, &bitmap); } } /* Step 2: Fill invalid. Start with internal number 1. */ bit=1; for(i=0; iurl, urllist[i]->internal_number); if (urllist[i]->internal_number == INVALID_INTERNAL_NUMBER) { /* Find a free bit */ bit= find_next_zero_bit(&bitmap, bit); DEBUGP("found a free bit for %s: %d", urllist[i]->url, bit); urllist[i]->internal_number=bit; /* No need to set that bit here, just skip to the next. */ bit++; } } ex: return status; } /** -. */ int url__allocate(int reserve_space) { int status; struct url_t *url_mem; int i; status=0; /* We put a terminating NULL pointer at the end. */ STOPIF( hlp__realloc( &urllist, sizeof(*urllist) * (urllist_count+1+reserve_space)), NULL); STOPIF( hlp__calloc( &url_mem, sizeof(*url_mem), reserve_space), NULL); /* store url pointers */ for(i=0; iinternal_number=inum; target->current_rev=rev; i++; l += strlen(urllist_mem+l); } /* Skip over \0 */ l++; } STOPIF_CODE_ERR( close(fh) == -1, errno, "closing the url-list"); fh=-1; /* Read the current revisions from the WAA definition. * If we got data before, we need this here too. */ /* Exception for 1.1.18: Upgrade from 1.1.17. A non-existing file is * allowed, but will convert the data next time. */ status=waa__open_byext(dir, WAA__URL_REVS, WAA__READ, &fh); if (status==ENOENT) { DEBUGP("No file; upgrading?"); status=0; } else { /* Read the associated revisions. */ rev_in=fdopen(fh, "r"); while (1) { status=hlp__string_from_filep(rev_in, &buffer, NULL, 0); if (status == EOF) { status=0; break; } STOPIF( status, "Failed to read copyfrom source"); STOPIF_CODE_ERR( sscanf(buffer, "%d %lu 0 0 0 0\n", &intnum, &rev) != 2, EINVAL, "Error parsing line \"%s\" from %s", buffer, WAA__URL_REVS); STOPIF( url__find_by_intnum(intnum, &target), "URL number %d read from %s not found", intnum, WAA__URL_REVS); target->current_rev=rev; } STOPIF_CODE_ERR( fclose(rev_in)==-1, errno, "error closing %s", WAA__URL_REVS); fh=-1; } /* Sort list by priority */ qsort(urllist, urllist_count, sizeof(*urllist), url__indir_sorter); /* No writing would be necessary if nothing gets changed. */ url__must_write_defs=0; ex: /* urllist_mem must not be freed - our url-strings still live there! */ if (fh!=-1) { l=close(fh); STOPIF_CODE_ERR(l == -1 && !status, errno, "closing the url-list"); } return status; } /** -. * * This prints a message and stops if no URLs could be read. */ int url__load_nonempty_list(char *dir, int reserve_space) { int status, load_st; status=0; if (!dir) dir=wc_path; load_st=url__load_list(dir, reserve_space); STOPIF_CODE_ERR( load_st==ENOENT || urllist_count==0, ENOENT, "!No URLs have been defined for %s.", dir); ex: return status; } /** -. * The data is written in two different locations. * * The internal number was chosen as combining key, because the URL might * include strange characters, and there might not be a name. * * \todo is 1024 bytes always enough? Maybe there's an RFC. Make that * dynamically - look how much we'll need. */ int url__output_list(void) { int status, i, fh, l, fh_revs; char buffer[1024]; struct url_t *url; fh=-1; fh_revs=-1; STOPIF( url___set_internal_nums(), "Setting the internal numbers failed."); if (url__must_write_defs) STOPIF( waa__open_byext(NULL, WAA__URLLIST_EXT, WAA__WRITE, &fh), NULL); STOPIF( waa__open_byext(NULL, WAA__URL_REVS, WAA__WRITE, &fh_revs), NULL); for(i=0; itarget_rev == 0 && url->current_rev == 0) continue; if (fh != -1) { l=snprintf(buffer, sizeof(buffer), "%d %d T:%ld,N:%s,P:%d,ro:%u,%s", url->internal_number, 0, /* Previously the the current revision. */ url->target_rev, url->name ? url->name : "", url->priority, url->is_readonly, url->url); STOPIF_CODE_ERR( l > sizeof(buffer)-4, E2BIG, "You've got too long URLs; I'd need %d bytes. Sorry.", l); /* include the \0 */ l++; /** \todo: writev */ STOPIF_CODE_ERR( write(fh, buffer, l) != l, errno, "Error writing the URL list"); STOPIF_CODE_ERR( write(fh, "\n", 1) != 1, errno, "Error writing the URL list delimiter"); DEBUGP("writing line %s", buffer); } /* A few extra fields, to store some information later. */ l=snprintf(buffer, sizeof(buffer), "%d %ld 0 0 0 0\n", url->internal_number, url->current_rev); /* This can never happen, apart from being caused by radiation. */ BUG_ON( l > sizeof(buffer)-4); STOPIF_CODE_ERR( write(fh_revs, buffer, l) != l, errno, "Error writing the URL list"); } url__must_write_defs=0; ex: if (fh != -1) { i=waa__close(fh, status); fh=-1; STOPIF(i, "Error closing the URL list"); } if (fh_revs != -1) { i=waa__close(fh_revs, status); fh_revs=-1; STOPIF(i, "Error closing the revisions list"); } return status; } /** -. * * If \a missing_dirs is not \c NULL, this function returns in \c * *missing_dirs a \b copied string with the missing path components from * \c current_url->url (which should be freed later). * * \note The session is then registered at the \b existing part, so all * accesses must include this relative part! * * If the URL is ok, a \c NULL is returned (not a pointer to a \c \\0 ). * * This is needed for the \c mkdir_base option; we cannot create the * hierarchy here, because we need a commit editor for that, but in * ci__directory() we cannot use a session based on an non-existing URL. * */ int url__open_session(svn_ra_session_t **session, char **missing_dirs) { int status; svn_error_t *status_svn; apr_hash_t *cfg; char *buffer, *cp; int exists; svn_revnum_t head; status=0; if (!current_url->pool) { STOPIF( apr_pool_create_ex(& current_url->pool, global_pool, NULL, NULL), "no pool"); } STOPIF( hlp__get_svn_config(&cfg), NULL); if (current_url->session) goto ex; /* We wouldn't need to allocate this memory if the URL was ok; but we * don't know that here, and it doesn't hurt that much. */ STOPIF( hlp__strnalloc(current_url->urllen, &buffer, current_url->url), NULL); cp=buffer+current_url->urllen; BUG_ON(*cp); STOPIF_SVNERR_TEXT( svn_ra_open, (& current_url->session, buffer, &cb__cb_table, NULL, /* cbtable, cbbaton, */ cfg, /* config hash */ current_url->pool), "svn_ra_open(\"%s\")", current_url->url); head=SVN_INVALID_REVNUM; STOPIF( url__canonical_rev( current_url, &head), NULL); DEBUGP("Trying url %s@%ld", buffer, head); while (1) { /* Is the caller interested in this check? If not, then just return. */ if (!missing_dirs) break; /* Test whether the base directory exists; we need some lightweight * mechanism to detect that. * Sadly we don't get a result when we open the session. */ /* That's not entirely correct. * In the time between this test and the commit running someone could * create or remove the base path; then we would have tested against * the wrong revision, and might fail nonetheless. */ STOPIF( cb__does_path_exist(current_url->session, "", head, &exists, current_url->pool), NULL); if (exists) break; /* Doesn't exist. Try with the last part removed. */ /* We don't do URLs with less than a few characters. */ while (cp > buffer+4 && *cp != '/') cp--; /* If we're before the hostname, signified by a "//", we abort. */ STOPIF_CODE_EPIPE(cp[-1] == '/', EINVAL, "!Unsuccessfull svn_ra_stat() on every try for URL \"%s\".", current_url->url); /* We're at a slash, and try with a shortened URL. */ *cp=0; DEBUGP("Reparent to %s", buffer); STOPIF_SVNERR( svn_ra_reparent, (current_url->session, buffer, current_url->pool)); } /* See whether the original URL is valid. */ if (missing_dirs) { if (buffer + current_url->urllen == cp) { *missing_dirs=NULL; IF_FREE(buffer); } else { /* Return just the missing parts: * * url: http://aaa/11/22/33/44 * buffer: http://aaa/11/22 * return: 33/44 * * We return the characters that were cut off, without the '/'. */ strcpy(buffer, current_url->url + 1 + (cp - buffer)); DEBUGP("returning missing=%s", buffer); *missing_dirs=buffer; } } else IF_FREE(buffer); if (session) *session = current_url->session; ex: return status; } /** -. * */ int url__close_session(struct url_t *cur) { /* There's no svn_ra_close() or suchlike. * I hope it gets closed by freeing it's pool. */ if (cur->pool) { DEBUGP("closing session and pool for %s", cur->url); BUG_ON(cur->pool == NULL && cur->session != NULL); apr_pool_destroy(cur->pool); cur->session=NULL; cur->pool=NULL; } return 0; } /** -. * */ int url__close_sessions(void) { int status; int i; status=0; IF_FREE(url__parm_list); url__parm_list_len=url__parm_list_used=0; for(i=0; ipriority <= to_compare->priority); } /** Dumps the URLs to \c STDOUT . */ int url___dump(char *format) { int status; int i; char *cp; FILE *output=stdout; struct url_t *url; if (!format) format= opt__is_verbose()>0 ? "%u\\n\tname: \"%n\"; priority: %p; current revision: %r; " "target: %t; readonly:%R\\n" : "name:%n,prio:%p,target:%t,ro:%R,%u\\n"; status=0; for(i=0; i < urllist_count; i++) { url = urllist[i]; cp=format; while (*cp) { switch (cp[0]) { case '\\': switch (cp[1]) { case '\\': STOPIF_CODE_EPIPE( fputc('\\', output), NULL); break; case 'n': STOPIF_CODE_EPIPE( fputc('\n', output), NULL); break; case 'r': STOPIF_CODE_EPIPE( fputc('\r', output), NULL); break; case 't': STOPIF_CODE_EPIPE( fputc('\t', output), NULL); break; case 'f': STOPIF_CODE_EPIPE( fputc('\f', output), NULL); break; case 'x': status= cp[2] && cp[3] ? cs__two_ch2bin(cp+2) : -1; STOPIF_CODE_ERR(status <0, EINVAL, "A \"\\x\" sequence must have 2 hex digits."); STOPIF_CODE_EPIPE( fputc(status, output), NULL); /* There's a +2 below. */ cp+=2; break; default: STOPIF_CODE_ERR(1, EINVAL, "Unknown escape sequence '\\%c' in format.", cp[1]); break; } cp+=2; break; case '%': switch (cp[1]) { /* Allow internal number, too? */ case 'n': STOPIF_CODE_EPIPE( fputs(url->name ?: "", output), NULL); break; case 't': STOPIF_CODE_EPIPE( fputs( hlp__rev_to_string(url->target_rev), output), NULL); break; case 'r': STOPIF_CODE_EPIPE( fputs( hlp__rev_to_string(url->current_rev), output), NULL); break; case 'R': STOPIF_CODE_EPIPE( fprintf(output, "%u", url->is_readonly), NULL); break; case 'I': STOPIF_CODE_EPIPE( fprintf(output, "%u", url->internal_number), NULL); break; case 'p': STOPIF_CODE_EPIPE( fprintf(output, "%u", url->priority), NULL); break; case 'u': STOPIF_CODE_EPIPE( fputs(url->url, output), NULL); break; case '%': STOPIF_CODE_EPIPE( fputc('%', output), NULL); break; default: STOPIF_CODE_ERR(1, EINVAL, "Invalid placeholder '%%%c' in format.", cp[1]); break; } cp+=2; break; default: STOPIF_CODE_EPIPE( fputc(*cp, output), NULL); cp++; } } } status=0; ex: return status; } /** -. * The space for the output is allocated, and must not be freed. */ int url__other_full_url(struct estat *sts, struct url_t *url, char **output) { static const char none[]="(none)"; static struct cache_t *cache=NULL; int status, len; char *data, *path; status=0; if (url) { STOPIF( ops__build_path( &path, sts), NULL); len=url->urllen + 1 + sts->path_len+1; STOPIF( cch__new_cache(&cache, 4), NULL); STOPIF( cch__add(cache, 0, NULL, len, &data), NULL); strcpy( data, url->url); if (path[0]=='.' && path[1]==0) { /* Nothing to be done; just the base URL. */ } else { /* Remove ./ at start. */ if (path[0]=='.' && path[1]==PATH_SEPARATOR) path += 2; data[url->urllen]='/'; strcpy( data+url->urllen+1, path); } *output=data; } else *output=(char*)none; ex: return status; } /** -. */ int url__full_url(struct estat *sts, char **url) { int status; STOPIF( url__other_full_url(sts, sts->url, url), NULL); ex: return status; } /** -. */ int url__find(char *url, struct url_t **output) { int i; struct url_t *cur; /* The URLs are in sorted order (by priority!), so just do a linear * search. */ for(i=0; iurl, url, cur->urllen) == 0) { *output = cur; return 0; } } return ENOENT; } /** -. * Writes the given URLs into the WAA. */ int url__work(struct estat *root UNUSED, int argc, char *argv[]) { int status, fh, l, i, had_it; char *dir; char *cp; int have_space; struct url_t *target; struct url_t *tmp; struct url_t **old_urllist; int old_urllist_count; dir=NULL; fh=-1; STOPIF( waa__given_or_current_wd(NULL, &dir), NULL ); /* The current directory is the WC root. */ STOPIF( waa__set_working_copy(dir), NULL); /* If there's \b no parameter given, we default to dump. * - Use goto? * - Set argv[0] to parm_dump? * - Test for argc? That's what we'll do. */ if (argc>0 && strcmp(argv[0], parm_load) == 0) { /* In case the user had some URLs already defined and "load"s another * list, he would loose all URL internal numbers, so that a * "sync-repos" would be necessary. * * To avoid that we read the existing URLs (but ignore any errors, in * case the URLs are loaded again because the file is damaged). */ status=url__load_list(NULL, argc+1); if (!status || status == ENOENT) { /* all right, just ignore that. */ } else { /* Other errors are at least shown. */ STOPIF_CODE_ERR_GOTO( 1, status, ignore_err, "!Got an error reading the old URL list, so the internal URL mappings\n" "cannot be kept; a \"sync-repos\" might be necessary."); ignore_err: ; } /* Don't remember the old values. */ old_urllist_count=urllist_count; old_urllist=urllist; urllist=NULL; urllist_count=0; /* Surely write the list again. */ url__must_write_defs=1; status=0; /* Load URLs. We do not know how many we'll get, possibly we'll have * to allocate more memory. */ i=0; have_space=0; while (1) { if (have_space < 1) { have_space=32; STOPIF( url__allocate(have_space), NULL); } status=hlp__string_from_filep(stdin, &cp, NULL, SFF_WHITESPACE); if (status == EOF) break; DEBUGP("parsing %s into %d", cp, urllist_count); STOPIF( url__insert_or_replace(cp, &target, &had_it), NULL); DEBUGP("had=%d", had_it); if (!had_it) { have_space--; i++; } target->current_rev=0; /* Try to restore the internal number. */ if (url__find_by_url_in_list(target->url, old_urllist, old_urllist_count, &tmp) == 0) target->internal_number = tmp->internal_number; } IF_FREE(old_urllist); if (opt__is_verbose() >= 0) printf("%d URL%s loaded.\n", i, i==1 ? "" : "s"); } else { /* Read URLs, reserving space. */ status=url__load_list(NULL, argc+1); /* But ignore ENOENT */ if (status == ENOENT) urllist_count=0; else STOPIF_CODE_ERR( status, status, NULL); /* Needs status still set from url__load_list()! */ if (argc == 0 || strcmp(argv[0], parm_dump) == 0) { STOPIF_CODE_ERR( status==ENOENT, ENOENT, "!No URLs defined for \"%s\".", dir); /* Dump */ STOPIF( url___dump(argc ? argv[1] : NULL), NULL); goto ex; } /* Append/insert. */ DEBUGP("%d to parse", argc); /* Parse URLs */ for(l=0; lcurrent_rev=0; } } /* if load_from_stdin */ STOPIF( waa__create_working_copy(dir), NULL); /* Write the URL list */ STOPIF( url__output_list(), NULL); ex: return status; } /** -. * This function takes a list of URL names (and optionally target * revisions), and marks the URLs by setting url_t::to_be_handled. * * \ref url__parm_list gets destroyed. */ int url__mark_todo(void) { int status; char *parm, *url_string, *rev_str, **list; static const char delim[]=",; \t\r\n\f"; struct url_t *url; status=0; if (!url__parm_list_used) goto ex; /* Terminate the list */ url__parm_list[url__parm_list_used] = NULL; list=url__parm_list; while (*list) { parm=*(list++); url_string=strtok(parm, delim); while (url_string && *url_string) { DEBUGP("marking URL %s", url_string); rev_str=strchr(url_string, '@'); if (rev_str) *(rev_str++)=0; STOPIF( url__find_by_name(url_string, &url), "!No URL with name \"%s\" found", url_string); if (url->to_be_handled) DEBUGP("URL %s mentioned multiple times", url->url); url->to_be_handled=1; if (rev_str) { STOPIF( hlp__parse_rev(rev_str, NULL, & url->current_target_rev), NULL); url->current_target_override=1; } url_string=strtok(NULL, delim); } } ex: return status; } /** -. * * We may have to reallocate. We don't want to allocate a pointer * for each argument - we might be run with something like "find / * -type f | xargs fsvs update". */ int url__store_url_name(char *parm) { int status; status=0; /* The terminating NULL must be applied later, too. */ if (url__parm_list_used+2 >= url__parm_list_len) { url__parm_list_len= url__parm_list_len ? url__parm_list_len*2 : 8; STOPIF( hlp__realloc( &url__parm_list, url__parm_list_len*sizeof(*url__parm_list)), NULL); } url__parm_list[url__parm_list_used++] = parm; ex: return status; } /** -. * * DAV (http:// and https://) don't like getting \c * SVN_INVALID_REVNUM on some operations; they throw an 175007 "HTTP * Path Not Found", and "REPORT request failed on '...'". * * So we need the real \c HEAD. * * We try to be fast, and only fetch the value if we really need it. */ int url__canonical_rev( struct url_t *url, svn_revnum_t *rev) { int status; svn_error_t *status_svn; status=0; status_svn=NULL; if (*rev == SVN_INVALID_REVNUM) { if (url->head_rev == SVN_INVALID_REVNUM) { BUG_ON( !url->session ); /* As we ask at most once we just use the connection's pool - that * has to exist if there's a session. */ STOPIF_SVNERR( svn_ra_get_latest_revnum, (url->session, & url->head_rev, url->pool)); DEBUGP("HEAD of %s is at %ld", url->url, url->head_rev); } *rev=url->head_rev; } ex: return status; } /** -. * Returns 0 as long as there's an URL to process; \c current_url is set, * and opened. In \a target_rev the target revision (as per default of this * URL, or as given by the user) is returned. \n * If \c current_url is not NULL upon entry the connection to this URL is * closed, and its memory freed. * * If called with \a target_rev \c NULL, the internal index is reset, and * no URL initialization is done. * * At the end of the list \c EOF is given. * */ int url__iterator2(svn_revnum_t *target_rev, int only_if_count, char **missing) { int status; static int last_index=-1; svn_revnum_t rev; status=0; if (!target_rev) { last_index=-1; goto ex; } while (1) { last_index++; if (last_index >= urllist_count) { DEBUGP("no more URLs."); /* No more data. */ status=EOF; goto ex; } current_url=urllist[last_index]; if (only_if_count) { if (!current_url->entry_list_count) { DEBUGP("No changes for url %s.", current_url->url); continue; } DEBUGP("%d changes for url %s.", current_url->entry_list_count, current_url->url); } if (url__to_be_handled(current_url)) break; } STOPIF( url__open_session(NULL, missing), NULL); if (current_url->current_target_override) rev=current_url->current_target_rev; else if (opt_target_revisions_given) rev=opt_target_revision; else rev=current_url->target_rev; DEBUGP("doing URL %s @ %s", current_url->url, hlp__rev_to_string(rev)); STOPIF( url__canonical_rev(current_url, &rev), NULL); *target_rev = rev; ex: return status; } fsvs-1.2.6/src/warnings.h0000644000202400020240000000662011264677022014252 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __WARNINGS_H__ #define __WARNINGS_H__ #include "options.h" /** \file * Declarations and public enumerations for the warning subsystem. * */ /** List of possible actions for warning messages. */ typedef enum { /** Warn only once. This has to be 0 to be the default! */ WA__WARN_ONCE=0, /** Warn every time */ WA__WARN_ALWAYS, /** Print an error, stop execution, and exit with an error code. */ WA__STOP, /** Ignore this warning. */ WA__IGNORE, /** Just count this warning. If we got an \ref WA__WARN_ONCE warning it's * set to this value; this way it is still incremented and printed * in a summary. Ignored warnings are completely ignored. */ WA__COUNT, /** The maximum index. Keep this at the end! */ _WA__LAST_INDEX } warning_action_e; /** Definitions for warnings. */ struct wa__warnings { /** Short name for command line processing */ char text[24]; /** Action to take. When ONCE is reached, it gets changed to IGNORE. */ warning_action_e action; /** How often this warning occured. Always incremented; * may be >1 for WARN_ONCE. */ unsigned count; /** Whether the user set some value other than the default. */ enum opt__prio_e prio; }; /** List of defined warnings. */ typedef enum { /** Invalid mtime property. */ WRN__META_MTIME_INVALID, /** Invalid user property. */ WRN__META_USER_INVALID, /** Invalid group property. */ WRN__META_GROUP_INVALID, /** Invalid unix-mode property. */ WRN__META_UMASK_INVALID, /** No URL defined for entry. */ WRN__NO_URLLIST, /** \c LC_CTYPE and/or \c LC_ALL are invalid. */ WRN__CHARSET_INVALID, /** A normal user gets a \c EPERM on \c chmod(), if he is not owner. * Could happen if file data is the same, but meta-data has changed. */ WRN__CHMOD_EPERM, /** Other error codes of \c chmod() - not needed, as they should always * lead to a stop? */ WRN__CHMOD_OTHER, /** Normal users may not call \c chown(); they get an \c EPERM. */ WRN__CHOWN_EPERM, /** Other error codes of \c chown() - not needed, as they should always * lead to a stop? */ WRN__CHOWN_OTHER, /** A property should be set with an reserved name. */ WRN__PROP_NAME_RESERVED, /** Mixed revision working copies not allowed. */ WRN__MIXED_REV_WC, /** Diff returned an exit status of 2 (means error). * But as that is returned for diffing binary files, too, * the exit status is normally ignored. */ WRN__DIFF_EXIT_STATUS, /** Absolute ignore pattern doesn't match wc base. */ WRN__IGNPAT_WCBASE, /** Test warning - for debugging and automated testing. */ WRN__TEST_WARNING, /** Maximum index - keep this at the end! */ _WRN__LAST_INDEX } warning_e; /** Possibly print a warning. */ int wa__warn(warning_e index, int status, char *format, ...) __attribute__ ((format (printf, 3, 4) )); /** Set the action of one or warnings. */ int wa__set_warn_option(char *stg, enum opt__prio_e prio); /** Print the warning summary as debug messages. */ int wa__summary(void); /** Splits a string on whitespace, and sets warning options. */ int wa__split_process(char *warn, int prio); #endif fsvs-1.2.6/src/dev/0000755000202400020240000000000012554717234013026 5ustar marekmarekfsvs-1.2.6/src/dev/permutate-all-tests0000755000202400020240000002041711243313364016662 0ustar marekmarek#!/usr/bin/perl # vim: sw=2 ts=2 expandtab # # Runs the tests in various configurations # To be started from the src/ directory, to have matching paths # # If there's an environment variable MAKEFLAGS set, and it includes a # -j parameter, the tests are run in parallel. # # ########################################################################## # Copyright (C) 2005-2008 Philipp Marek. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. ########################################################################## use Encode qw(from_to); use Fcntl qw(FD_CLOEXEC F_SETFD F_GETFD); # ############################################################################# # Detection and preparation ############################################################################# { @locales=`locale -a`; # look for UTF8 ($utf8_locale)=grep(/\.utf-?8/i,@locales); chomp $utf8_locale; # look for non-utf8 ($loc_locale)=grep(!/(POSIX|C|utf-?8$)/i, @locales); chomp $loc_locale; ($cur_locale)=map { /LC_CTYPE="(.*)"/ ? ($1) : (); } `locale`; @test_locales=($utf8_locale, $loc_locale); ($cur_locale_norm = $cur_locale) =~ s#utf-8#utf8#i; push @test_locales, $cur_locale unless grep(lc($cur_locale_norm) eq lc($_), @test_locales); # Test the locales. ($utf8, $loc)=`make -C ../tests locale_strings BINARY=/bin/true`; # print $utf8,$loc; $target_enc="ISO-8859-1"; from_to($utf8, "utf-8", $target_enc, Encode::FB_CROAK); from_to($loc, $target_enc, "utf-8", Encode::FB_CROAK); # print $utf8,$loc; exit; # Use special directories, so that normal system operation is not harmed. $PTESTBASE="/tmp/fsvs-tests-permutated"; mkdir($PTESTBASE, 0777) || die $! if !-d $PTESTBASE; open(CSV, "> /tmp/fsvs-tests.csv") || die $!; select((select(CSV), $|=1)[0]); print CSV qq("Nr","Prot","LANG","priv","config","Result"\n); # To get some meaningful test name outputted $ENV{"CURRENT_TEST"}="ext-tests"; $start=time(); $ENV{"MAKEFLAGS"} =~ /-j\s*(\d*)\b/; $parallel=$ENV{"PARALLEL"} || ($1+0) || 1; MSG("INFO", "Parallel found as $parallel") if $parallel; # Used for status output $fail=0; # Used for counting $sum=0; # For parallel execution $running=0; # Wait for children $SIG{"CHLD"}="DEFAULT"; %results=(); %pid_to_result=(); MSG("INFO", StartText($start)); # We don't want no cache. $| =1; } ############################################################################# # Run the tests ############################################################################# { # My default is debug - so do that last, to have a # correctly configured environment :-) # Furthermore the "unusual" configurations are done first. # for $release ("--enable-debug") # for $release ("--enable-release") for $release ("--with-waa_md5=8", "", "--enable-release", "--enable-debug") { # make sure that the binary gets recompiled $conf_cmd="( cd .. && ./configure $release ) && ". "touch config.h && make -j$parallel"; system("( $conf_cmd ) > /tmp/fsvs-conf.txt 2>&1") && die "configure problem: $?"; # Start the slow, uncommon tasks first. for $prot ("svn+ssh", "file://") { for $user ("sudo", "") { for $lang (@test_locales) { $sum++; # We have to make the conf and waa directory depend on the # user, so that root and normal user don't share the same base - # the user would get some EPERM. # Furthermore parallel tests shouldn't collide. $PTESTBASE2="$PTESTBASE/u.$user" . ($parallel ? ".$sum" : ""); # Start the test asynchronous, and wait if limit reached. $pid=StartTest(); $running++; { my($tmp); $tmp="?"; $results{$lang}{$user}{$prot}{$release}=\$tmp; $pid_to_result{$pid}=\$tmp; } WaitForChilds($parallel); } } } # As we reconfigure on the next run, we have to wait for *all* pending # children. WaitForChilds(1); } } ############################################################################# # Summary ############################################################################# { $end=time(); MSG("INFO", EndText($start, $end)); if ($fail) { MSG("ERROR","$fail of $sum tests failed."); } else { MSG("SUCCESS", "All $sum tests passed."); } close CSV; } system qq(make gcov); exit; ############################################################################# # Functions ############################################################################# sub MSG { my($type, @text)=@_; # We use the same shell functions, to get a nice consistent output. Bash(". ../tests/test_functions\n\$$type '" . join(" ",@text) . "'"); } # Gets all parameters from global variables. sub StartTest { $pid=fork(); die $! unless defined($pid); return $pid if ($pid); # $x=(0.5 < rand())+0; print "$$: exit with $x\n"; exit($x); # this is the child ... pipe(FAILREAD, FAILWRITE) || die "pipe: $!"; # sudo closes the filehandles above 2, and I found no way to get it to # keep them open. # So we have to give a path name to the children. $tl=$ENV{"TEST_LIST"}; $parms="LANG=$lang" . " LC_MESSAGES=C" . " 'TESTBASEx=$PTESTBASE2'" . " 'PROTOCOL=$prot'" . " RANDOM_ORDER=1" . ($tl ? " 'TEST_LIST=$tl'" : "") . " TEST_FAIL_WRITE_HDL=/proc/$$/fd/".fileno(FAILWRITE) . # And it can have our STDERR. " TEST_TTY_HDL=/proc/$$/fd/2"; # To avoid getting N*N running tasks for a "-j N", we explicitly say 1. # Parallel execution within the tests is not done yet, but better safe # than sorry. $cmd="$user make run-tests -j1 $parms"; $start=time(); # Output on STDOUT is short; the logfile says it all. print "#$sum ", StartText($start); open(LOG, "> /tmp/fsvs-test-$sum.log"); select((select(LOG), $|=1)[0]); print LOG "Testing #$sum: (configure=$release) $parms\n", StartText($start), "\n$conf_cmd &&\n\t$cmd\n\n"; # The sources are already configured; just the tests have to be run. $pid=fork(); die $! unless defined($pid); if (!$pid) { close FAILREAD; $ENV{"MAKEFLAGS"}=""; open(STDIN, "< /dev/null") || die $!; open(STDOUT, ">&LOG") || die $!; open(STDERR, ">&LOG") || die $!; system("make -C ../tests diag BINARY=true LC_ALL=$lang"); $x=fcntl(FAILWRITE, F_GETFD, 0); fcntl(FAILWRITE, F_SETFD, $x & ~FD_CLOEXEC); # sudo removes some environment variables, so set all options via make. exec $cmd; die; } # Give the child some time to take the write side. # If we ever get more than 4/64 kB of failed tests this will hang. die $! if waitpid($pid, 0) == -1; $error=$?; # We have to close the write side of the pipe, so that on reading we'll # see an EOF. close FAILWRITE; @failed=map { chomp; $_; } ; close FAILREAD; $end=time(); $t=EndText($start, $end); if ($error) { $status="FAIL"; open(F, "< /proc/loadavg") && print(LOG "LoadAvg: ", ) && close(F); MSG("WARN", "#$sum failed; $t"); } else { $status="OK"; MSG("INFO", "#$sum done; $t"); system("sudo rm -rf $PTESTBASE2"); } print LOG "\n", "$t\n", "$status $error: $user $parms\n", "got failed as (", join(" ", @failed), ")\n", "\n", "$conf_cmd && $cmd\n"; close LOG; $u = $user || "user"; print CSV join(",", $sum, map { "'$_'"; } ($prot, $lang, $u, $release, $status, sort(@failed))), "\n"; close CSV; # We cannot return $error directly ... only the low 8bit would # be taken, and these are the signal the process exited with. # A normal error status would be discarded! exit($error ? 1 : 0); } sub WaitForChilds { my($allowed)=@_; my($pid, $ret); while ($running >= $allowed) { $pid=wait(); $ret=$?; die $! if $pid == -1; ${$pid_to_result{$pid}}=$ret; $fail++ if $ret; $running--; } } # Some of the things done in via the shell only works with bash; since # debian has moved to dash recently, we make sure to use the correct # program. sub Bash { die unless @_ == 1; system '/bin/bash', '-c', @_; } # The \n don't matter for the shell, and they help for direct output. sub StartText { my($start)=@_; return "Started at (" . localtime($start) . ").\n"; } sub EndText { my($start, $end)=@_; return "Finished after ". ($end - $start) . " seconds (" . localtime($end) . ")."; } fsvs-1.2.6/src/dev/make_doc.pl0000755000202400020240000000124410617616406015126 0ustar marekmarek#!/usr/bin/perl print "/* This file is generated, do not edit!\n", " * Last done on ", scalar(gmtime(time())),"\n", " * */\n", "\n\n"; while (<>) { chomp; next if /(_{30,})/; next if /^\s*$/ && !@text; $sect=$1 if /^_?([\w\-]{1,5}[a-zA-Z0-9])/; # print STDERR "sect=$sect old=$old_sect\n"; if ($sect ne $old_sect) { print "const char hlp_${old_sect}[]=\"" . join("\"\n \"", @text),"\";\n\n" if ($old_sect && $old_sect =~ /^[a-z]/); @text=(); $sect =~ s#-#_#g; $old_sect=$sect; } else { # make \ safe s#\\#\\\\#g; # make " safe s#"#\\"#g; # remove space at beginning # s#^ ##; push(@text,$_ . "\\n"); } } print "\n\n// vi: filetype=c\n"; fsvs-1.2.6/src/dev/check-version-output.pl0000755000202400020240000000112611132571223017447 0ustar marekmarek#!/usr/bin/perl $config = shift || die "which config.h?"; $output = shift || die "which .c?"; %ignore=map { ($_,1); } qw(__CONFIG_H__ FASTCALL MKDEV MAJOR MINOR); %syms=(); open(F,"<", $config) || die "open $config: $!"; while () { $syms{$1}++ if /^\s*#(?:define|undef)\s+(\w+)/ && !$ignore{$1}; } open(F,"<", $output) || die "open $output: $!"; undef $/; $file=; close F; ($_) = ($file =~ /\s Version \s* \( [^)]* \) \s* \n \{ ([\x00-\xff]*) \n \} /xm); die "No Version() found." unless $_; study($_); for $sym (keys %syms) { warn("Not seen: $sym\n") unless m#\b$sym\b#; } fsvs-1.2.6/src/dev/make_fsvs_release.pl0000755000202400020240000000257211556526402017046 0ustar marekmarek#!/usr/bin/perl $version=shift() || die "Welche Version??\n"; $version =~ m#^(\d+\.)+\d+$# || die "Version ungültig!!\n"; $url="http://fsvs.tigris.org/svn/fsvs"; $tagdir="fsvs-$version"; $tagurl="$url/tags/$tagdir"; system("svn cp -m 'Tagging $version' $url/branches/fsvs-1.2.x/ $tagurl"); warn "Fehler $? beim Taggen!" if $?; #print "Getaggt!! Warte auf Bestätigung.\n"; $_=; srand(); $tempdir="/tmp/" . $$ . ".tmp.".rand(); mkdir ($tempdir) || die "mkdir($tempdir): $!"; sub C { system("rm -rf $tempdir"); }; $SIG{"__DIE__"}=sub { print @_; C(); exit($! || 1); }; system("svn export $tagurl/fsvs $tempdir/$tagdir"); #system("svn export $url/trunk/fsvs $tempdir/$tagdir"); die "Fehler $?" if $?; chdir($tempdir); system("cd $tagdir && autoconf"); if ($?) { #die "Fehler $?" if $?; print "Fehler $?!!\n"; system("/bin/bash"); } open(CH, "< $tagdir/CHANGES") || die $!; open(CHHTML,"> CHANGES.html") || die $!; while() { chomp; last if /^\s*$/; print(CHHTML "$_\n
    \n"), next if (/^\w/); s#^- #
  • #; print CHHTML $_, "\n"; } print CHHTML "
\n"; close CH; close CHHTML; system("tar -cvf $tagdir.tar $tagdir"); die "Fehler $?" if $?; system("bzip2 -v9k $tagdir.tar"); die "Fehler $?" if $?; system("gzip -v9 $tagdir.tar"); die "Fehler $?" if $?; system("md5sum *.tar.* > MD5SUM"); die "Fehler $?" if $?; print "ok\n\n cd $tempdir\n\n"; #C(); exit(0); fsvs-1.2.6/src/dev/FAQ0000644000202400020240000000141610532473002013344 0ustar marekmarekWhy do the functions have a "-." in their comment block? - Ask doxygen. The first part of documentation is in the .h file, so doxygen throws away the "brief" part in the corresponding .c file. We need to have an empty sentence. How to run the tests? - Simple case: "make run-tests". - Running some tests: "make run-tests TEST_LIST=001*" - Running with valgrind: "make run-tests CHECKER=valgrind". It's strongly recommended to use TEST_LIST and test only single calls. What about gcov? - Configure with "--enable-debug --enable-gcov"; compile with make. Start a clean environment with "make gcov-clean". Start one or more tests. Look at the summary with "make gcov"; the details are in .gcov, and per-file summaries in .gcov.smry. Example: "fsvs.c.gcov.smry". fsvs-1.2.6/src/dev/dox2txt.pl0000755000202400020240000000062211213413040014755 0ustar marekmarek#!/usr/bin/perl $input=shift; $output=shift; open(STDIN, "lynx -dump -nolist -nonumbers $input |") || die $!; #open(STDOUT, "> $output") || die $!; # Cut until first

header while () { # I'd thought lynx had an option to not print these? # yes ... -nonumbers. s#\[\d+\]##; next if m#^\[#; # $p=m#^SYNOPSIS# .. m#^\s*-{30,}#; $p=m#^\w# .. m#^\s*_{30,}#; print if ($p =~ m#^\d+$#); } fsvs-1.2.6/src/dev/check-option-docs.pl0000755000202400020240000000117211146733460016673 0ustar marekmarek#!/usr/bin/perl # # Checks whether all defined options have some documentation. %opt=(); open(O, "< " . shift()) || die "can't read options.c: $!"; while () { chomp; if ( /^struct \s+ opt__list_t \s+ opt__list/x .. /^\S;/ ) { # print("found option $1\n"), $opt{$1}++ if /\.name \s* = \s* " ([^"]+) "/x; } } while (<>) { chomp; if (/\\\c (.+) - /) { map { # print("documented: $_\n"); delete $opt{$_}; } split(/, \\c /, $1); $opt{$1}++ if /\\ref\s+(\w+)/; } delete $opt{$2} if / \\(subsection|anchor) \s+ (\w+) /x; } exit if !keys %opt; die "Doc missing for ". join(", ", sort keys %opt) . "\n"; fsvs-1.2.6/src/dev/gcov-summary.pl0000755000202400020240000000623110760455265016021 0ustar marekmarek#!/usr/bin/perl # read whole files undef $/; $exe_lines=$sum_lines=0; %runs=(); while (<>) { ($c_file=$ARGV) =~ s#\.gcov\.smry$##; # File 'warnings.c' # Lines executed:85.71% of 28 # warnings.c:creating 'warnings.c.gcov' ($pct, $lines) = (m#File '$c_file'\s+Lines executed:([\d\.]+)% of (\d+)#); if (!$lines) { warn "Cannot parse (or no lines executed) for $ARGV.\n"; next; } open(SRC, "< " . $c_file) || die $!; @funcs_to_ignore = map { m#\s*/// FSVS GCOV MARK: (\w+)# ? $1 : (); } split(/\n/,); close(SRC); $ignored=0; for $func (@funcs_to_ignore) { ($fexec, $flines) = m#Function '$func'\s+Lines executed:([\d\.]+)\% of (\d+)#; if (!defined($flines)) { warn "Function $func should be ignored, but was not found!\n"; } elsif ($fexec>0) { warn "Function $func should be ignored, but was run!\n"; } else { $ignored += $flines; } } # #####: 77: STOPIF( st__status(sts, path), NULL); # TODO: Count the whole block; eg. DEBUG normally has more than a single # line. open(GCOV, "< $c_file.gcov"); { local($/)="\n"; $last_line=$cur=0; # find untested lines, and count them $this_run=0; while () { $cur++; if (/^\s*(#####|-):\s+\d+:\s+(STOPIF|BUG|BUG_ON|DEBUGP)?/) { $stopif_lines++ if $2; if ($last_line == $cur -1) { $old=delete $runs{$c_file . "\0" . $last_line}; # An line without executable code (mark '-') is taken as continuation, but # doesn't add to unexecuted lines. $runs{$c_file . "\0" . $cur} = [ $old->[0] + ($1 eq "#####" ? 1 : 0), $old->[1] || $cur ]; } $last_line=$cur; } } } $covered=int($lines*$pct/100.0+0.5); $lines -= $ignored; $pct=$covered/$lines*100.0; $cover{sprintf("%9.5f-%s",$pct,$ARGV)} = [$lines, $pct, $ARGV, $covered, $ignored]; $sum_lines+=$lines; $exe_lines+=$covered; } die "No useful information found!!\n" if !$sum_lines; $delim="---------+--------+--------+--------------------------------------------------\n"; print "\n\n", $delim; for (reverse sort keys %cover) { ($lines, $pct, $name, $covered, $ignored)=@{$cover{$_}}; $ntest=$lines-$covered; $name =~ s#\.gcov\.smry$##i; write; } format STDOUT_TOP= Percent | exec'd | #lines | #!test | #ignrd | Filename ---------+--------+--------+--------+--------+---------------------------- . format STDOUT= @##.##% | @##### | @##### | @##### | @##### | @<<<<<<<<<<<<<<<<<<<<<<<<<< $pct, $covered, $lines, $ntest, $ignored, $name . print $delim; $pct=100.0*$exe_lines/$sum_lines; $covered=$exe_lines; $lines=$sum_lines; $name="Total"; write; print $delim; printf " %6.2f%% coverage when counting %d error handling lines as executed\n", 100.0*($exe_lines+$stopif_lines)/$sum_lines, $stopif_lines; print "-" x (length($delim)-1), "\n\n"; # Print runs @runs_by_length=(); map { $runs_by_length[$runs{$_}[0]]{$_}=$runs{$_}; } keys %runs; $max=10; print "Longest runs:\n"; while ($max>0 && @runs_by_length) { $this_length=$#runs_by_length; printf " %3d# ",$this_length; $length_arr=delete $runs_by_length[$this_length]; for (sort keys %$length_arr) { ($file, $last)=split(/\0/); print " ",$file,":",$length_arr->{$_}[1]; $max--; } print "\n"; } print "\n\n"; fsvs-1.2.6/src/doc.g-c0000644000202400020240000007370011346140057013403 0ustar marekmarek/* This file is generated, do not edit! * Last done on Thu Mar 11 09:48:59 2010 * */ const char hlp_add[]=" fsvs add [-u URLNAME] PATH [PATH...]\n" "\n" " With this command you can explicitly define entries to be versioned,\n" " even if they have a matching ignore pattern. They will be sent to the\n" " repository on the next commit, just like other new entries, and will\n" " therefore be reported as New .\n" "\n" " The -u option can be used if you're have more than one URL defined for\n" " this working copy and want to have the entries pinned to the this URL.\n" "\n"; const char hlp_unvers[]=" fsvs unversion PATH [PATH...]\n" "\n" " This command flags the given paths locally as removed. On the next\n" " commit they will be deleted in the repository, and the local\n" " information of them will be removed, but not the entries themselves. So\n" " they will show up as New again, and you get another chance at ignoring\n" " them.\n" "\n"; const char hlp_build[]=" This is used mainly for debugging. It traverses the filesystem and\n" " builds a new entries file. In production it should not be used; as\n" " neither URLs nor the revision of the entries is known, information is\n" " lost by calling this function!\n" "\n" " Look at sync-repos.\n" "\n"; const char hlp_delay[]=" This command delays execution until time has passed at least to the\n" " next second after writing the data files used by FSVS (dir and urls).\n" "\n" " This command is for use in scripts; where previously the delay option\n" " was used, this can be substituted by the given command followed by the\n" " delay command.\n" "\n" " The advantage against the delay option is that read-only commands can\n" " be used in the meantime.\n" "\n" " An example:\n" " fsvs commit /etc/X11 -m \"Backup of X11\"\n" " ... read-only commands, like \"status\"\n" " fsvs delay /etc/X11\n" " ... read-write commands, like \"commit\"\n" "\n" " The optional path can point to any path in the WC.\n" "\n" " In the testing framework it is used to save a bit of time; in normal\n" " operation, where FSVS commands are not so tightly packed, it is\n" " normally preferable to use the delay option.\n" "\n"; const char hlp_cat[]=" fsvs cat [-r rev] path\n" "\n" " Fetches a file repository, and outputs it to STDOUT. If no revision is\n" " specified, it defaults to BASE, ie. the current local revision number\n" " of the entry.\n" "\n"; const char hlp_checko[]=" fsvs checkout [path] URL [URLs...]\n" "\n" " Sets one or more URLs for the current working directory (or the\n" " directory path), and does an checkout of these URLs.\n" "\n" " Example:\n" " fsvs checkout . http://svn/repos/installation/machine-1/trunk\n" "\n" " The distinction whether a directory is given or not is done based on\n" " the result of URL-parsing -- if it looks like an URL, it is used as an\n" " URL.\n" " Please mind that at most a single path is allowed; as soon as two\n" " non-URLs are found an error message is printed.\n" "\n" " If no directory is given, \".\" is used; this differs from the usual\n" " subversion usage, but might be better suited for usage as a recovery\n" " tool (where versioning / is common). Opinions welcome.\n" "\n" " The given path must exist, and should be empty -- FSVS will abort on\n" " conflicts, ie. if files that should be created already exist.\n" " If there's a need to create that directory, please say so; patches for\n" " some parameter like -p are welcome.\n" "\n" " For a format definition of the URLs please see the chapter Format of\n" " URLs and the urls and update commands.\n" "\n" " Furthermore you might be interested in Using an alternate root\n" " directory and Recovery for a non-booting system.\n" "\n"; const char hlp_commit[]=" fsvs commit [-m \"message\"|-F filename] [-v] [-C [-C]] [PATH [PATH ...]]\n" "\n" " Commits (parts of) the current state of the working copy into the\n" " repository.\n" "\n"; const char hlp_cp[]=" fsvs cp [-r rev] SRC DEST\n" " fsvs cp dump\n" " fsvs cp load\n" "\n" " The copy command marks DEST as a copy of SRC at revision rev, so that\n" " on the next commit of DEST the corresponding source path is sent as\n" " copy source.\n" "\n" " The default value for rev is BASE, ie. the revision the SRC (locally)\n" " is at.\n" "\n" " Please note that this command works always on a directory structure -\n" " if you say to copy a directory, the whole structure is marked as copy.\n" " That means that if some entries below the copy are missing, they are\n" " reported as removed from the copy on the next commit.\n" " (Of course it is possible to mark files as copied, too; non-recursive\n" " copies are not possible, but can be emulated by having parts of the\n" " destination tree removed.)\n" "\n" " Note:\n" " TODO: There will be differences in the exact usage - copy will\n" " try to run the cp command, whereas copied will just remember the\n" " relation.\n" "\n" " If this command are used without parameters, the currently defined\n" " relations are printed; please keep in mind that the key is the\n" " destination name, ie. the 2nd line of each pair!\n" "\n" " The input format for load is newline-separated - first a SRC line,\n" " followed by a DEST line, then an line with just a dot (\".\") as\n" " delimiter. If you've got filenames with newlines or other special\n" " characters, you have to give the paths as arguments.\n" "\n" " Internally the paths are stored relative to the working copy base\n" " directory, and they're printed that way, too.\n" "\n" " Later definitions are appended to the internal database; to undo\n" " mistakes, use the uncopy action.\n" "\n" " Note:\n" " Important: User-defined properties like fsvs:commit-pipe are not\n" " copied to the destinations, because of space/time issues\n" " (traversing through entire subtrees, copying a lot of\n" " property-files) and because it's not sure that this is really\n" " wanted. TODO: option for copying properties?\n" "\n" " Todo:\n" " -0 like for xargs?\n" "\n" " Todo:\n" " Are different revision numbers for load necessary? Should dump\n" " print the source revision number?\n" "\n" " Todo:\n" " Copying from URLs means update from there\n" "\n" " Note:\n" " As subversion currently treats a rename as copy+delete, the mv\n" " command is an alias to cp.\n" "\n" " If you have a need to give the filenames dump or load as first\n" " parameter for copyfrom relations, give some path, too, as in \"./dump\".\n" "\n" " Note:\n" " The source is internally stored as URL with revision number, so\n" " that operations like these\n" "\n" " $ fsvs cp a b\n" " $ rm a/1\n" " $ fsvs ci a\n" " $ fsvs ci b\n" "\n" " work - FSVS sends the old (too recent!) revision number as\n" " source, and so the local filelist stays consistent with the\n" " repository.\n" " But it is not implemented (yet) to give an URL as copyfrom\n" " source directly - we'd have to fetch a list of entries (and\n" " possibly the data!) from the repository.\n" "\n" " Todo:\n" " Filter for dump (patterns?).\n" "\n"; const char hlp_copyfr[]=" fsvs copyfrom-detect [paths...]\n" "\n" " This command tells FSVS to look through the new entries, and see\n" " whether it can find some that seem to be copied from others already\n" " known.\n" " It will output a list with source and destination path and why it could\n" " match.\n" "\n" " This is just for information purposes and doesn't change any FSVS\n" " state, (TODO: unless some option/parameter is set).\n" "\n" " The list format is on purpose incompatible with the load syntax, as the\n" " best match normally has to be taken manually.\n" "\n" " Todo:\n" " some parameter that just prints the \"best\" match, and outputs\n" " the correct format.\n" "\n" " If verbose is used, an additional value giving the percentage of\n" " matching blocks, and the count of possibly copied entries is printed.\n" "\n" " Example:\n" " $ fsvs copyfrom-list -v\n" " newfile1\n" " md5:oldfileA\n" " newfile2\n" " md5:oldfileB\n" " md5:oldfileC\n" " md5:oldfileD\n" " newfile3\n" " inode:oldfileI\n" " manber=82.6:oldfileF\n" " manber=74.2:oldfileG\n" " manber=53.3:oldfileH\n" " ...\n" " 3 copyfrom relations found.\n" "\n" " The abbreviations are:\n" " md5\n" "\n" " The MD5 of the new file is identical to that of one or more already\n" " committed files; there is no percentage.\n" "\n" " inode\n" "\n" " The device/inode number is identical to the given known entry; this\n" " could mean that the old entry has been renamed or hardlinked. Note: Not\n" " all filesystems have persistent inode numbers (eg. NFS) - so depending\n" " on your filesystems this might not be a good indicator!\n" "\n" " name\n" "\n" " The entry has the same name as another entry.\n" "\n" " manber\n" "\n" " Analysing files of similar size shows some percentage of\n" " (variable-sized) common blocks (ignoring the order of the blocks).\n" "\n" " dirlist\n" "\n" " The new directory has similar files to the old directory.\n" " The percentage is (number_of_common_entries)/(files_in_dir1 +\n" " files_in_dir2 - number_of_common_entries).\n" "\n" " Note:\n" " manber matching is not implemented yet.\n" " If too many possible matches for an entry are found, not all are\n" " printed; only an indicator ... is shown at the end.\n" "\n"; const char hlp_uncp[]=" fsvs uncopy DEST [DEST ...]\n" "\n" " The uncopy command removes a copyfrom mark from the destination entry.\n" " This will make the entry unknown again, and reported as New on the next\n" " invocations.\n" "\n" " Only the base of a copy can be un-copied; if a directory structure was\n" " copied, and the given entry is just implicitly copied, this command\n" " will return an error.\n" "\n" " This is not folded in revert, because it's not clear whether revert on\n" " copied, changed entries should restore the original copyfrom data or\n" " remove the copy attribute; by using another command this is no longer\n" " ambiguous.\n" "\n" " Example:\n" " $ fsvs copy SourceFile DestFile\n" " # Whoops, was wrong!\n" " $ fsvs uncopy DestFile\n" "\n"; const char hlp_diff[]=" fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...]\n" "\n" " This command gives you diffs between local and repository files.\n" "\n" " With -v the meta-data is additionally printed, and changes shown.\n" "\n" " If you don't give the revision arguments, you get a diff of the base\n" " revision in the repository (the last commit) against your current local\n" " file. With one revision, you diff this repository version against your\n" " local file. With both revisions given, the difference between these\n" " repository versions is calculated.\n" "\n" " You'll need the diff program, as the files are simply passed as\n" " parameters to it.\n" "\n" " The default is to do non-recursive diffs; so fsvs diff . will output\n" " the changes in all files in the current directory and below.\n" "\n" " The output for special files is the diff of the internal subversion\n" " storage, which includes the type of the special file, but no newline at\n" " the end of the line (which diff complains about).\n" "\n" " For entries marked as copy the diff against the (clean) source entry is\n" " printed.\n" "\n" " Please see also Options relating to the \"diff\" action and Using\n" " colordiff.\n" "\n" " Todo:\n" " Two revisions diff is buggy in that it (currently) always\n" " fetches the full trees from the repository; this is not only a\n" " performance degradation, but you'll see more changed entries\n" " than you want (like changes A to B to A). This will be fixed.\n" "\n"; const char hlp_export[]=" fsvs export REPOS_URL [-r rev]\n" "\n" " If you want to export a directory from your repository without storing\n" " any FSVS-related data you can use this command.\n" "\n" " This restores all meta-data - owner, group, access mask and\n" " modification time; its primary use is for data recovery.\n" "\n" " The data gets written (in the correct directory structure) below the\n" " current working directory; if entries already exist, the export will\n" " stop, so this should be an empty directory.\n" "\n"; const char hlp_help[]=" help [command]\n" "\n" " This command shows general or specific help (for the given command). A\n" " similar function is available by using -h or -? after a command.\n" "\n"; const char hlp_groups[]=" fsvs groups dump|load\n" " fsvs groups [prepend|append|at=n] group-definition [group-def ...]\n" " fsvs ignore [prepend|append|at=n] pattern [pattern ...]\n" " fsvs groups test [-v|-q] [pattern ...]\n" "\n" " This command adds patterns to the end of the pattern list, or, with\n" " prepend, puts them at the beginning of the list. With at=x the patterns\n" " are inserted at the position x , counting from 0.\n" "\n" " The difference between groups and ignore is that groups requires a\n" " group name, whereas the latter just assumes the default group ignore.\n" "\n" " For the specification please see the related documentation .\n" "\n" " fsvs dump prints the patterns to STDOUT . If there are special\n" " characters like CR or LF embedded in the pattern without encoding (like\n" " \\r or \\n), the output will be garbled.\n" "\n" " The patterns may include * and ? as wildcards in one directory level,\n" " or ** for arbitrary strings.\n" "\n" " These patterns are only matched against new (not yet known) files;\n" " entries that are already versioned are not invalidated.\n" " If the given path matches a new directory, entries below aren't found,\n" " either; but if this directory or entries below are already versioned,\n" " the pattern doesn't work, as the match is restricted to the directory.\n" "\n" " So:\n" " fsvs ignore ./tmp\n" "\n" " ignores the directory tmp; but if it has already been committed,\n" " existing entries would have to be unmarked with fsvs unversion.\n" " Normally it's better to use\n" " fsvs ignore ./tmp/**\n" "\n" " as that takes the directory itself (which might be needed after restore\n" " as a mount point anyway), but ignore all entries below.\n" " Currently this has the drawback that mtime changes will be reported and\n" " committed; this is not the case if the whole directory is ignored.\n" "\n" " Examples:\n" " fsvs group group:unreadable,mode:4:0\n" " fsvs group 'group:secrets,/etc/*shadow'\n" "\n" " fsvs ignore /proc\n" " fsvs ignore /dev/pts\n" " fsvs ignore './var/log/*-*'\n" " fsvs ignore './**~'\n" " fsvs ignore './**/*.bak'\n" " fsvs ignore prepend 'take,./**.txt'\n" " fsvs ignore append 'take,./**.svg'\n" " fsvs ignore at=1 './**.tmp'\n" "\n" " fsvs group dump\n" " fsvs group dump -v\n" "\n" " echo \"./**.doc\" | fsvs ignore load\n" " # Replaces the whole list\n" "\n" " Note:\n" " Please take care that your wildcard patterns are not expanded by\n" " the shell!\n" "\n"; const char hlp_rign[]=" fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...]\n" " fsvs ri [prepend|append|at=n] path-spec [path-spec ...]\n" "\n" " If you keep the same repository data at more than one working copy on\n" " the same machine, it will be stored in different paths - and that makes\n" " absolute ignore patterns infeasible. But relative ignore patterns are\n" " anchored at the beginning of the WC root - which is a bit tiring to\n" " type if you're deep in your WC hierarchy and want to ignore some files.\n" "\n" " To make that easier you can use the rel-ignore (abbreviated as ri)\n" " command; this converts all given path-specifications (which may include\n" " wildcards as per the shell pattern specification above) to WC-relative\n" " values before storing them.\n" "\n" " Example for /etc as working copy root:\n" " fsvs rel-ignore '/etc/X11/xorg.conf.*'\n" "\n" " cd /etc/X11\n" " fsvs rel-ignore 'xorg.conf.*'\n" "\n" " Both commands would store the pattern \"./X11/xorg.conf.*\".\n" "\n" " Note:\n" " This works only for shell patterns.\n" "\n" " For more details about ignoring files please see the ignore command and\n" " Specification of groups and patterns.\n" "\n"; const char hlp_info[]=" fsvs info [-R [-R]] [PATH...]\n" "\n" " Use this command to show information regarding one or more entries in\n" " your working copy.\n" " You can use -v to obtain slightly more information.\n" "\n" " This may sometimes be helpful for locating bugs, or to obtain the URL\n" " and revision a working copy is currently at.\n" "\n" " Example:\n" " $ fsvs info\n" " URL: file:\n" " .... 200 .\n" " Type: directory\n" " Status: 0x0\n" " Flags: 0x100000\n" " Dev: 0\n" " Inode: 24521\n" " Mode: 040755\n" " UID/GID: 1000/1000\n" " MTime: Thu Aug 17 16:34:24 2006\n" " CTime: Thu Aug 17 16:34:24 2006\n" " Revision: 4\n" " Size: 200\n" "\n" " The default is to print information about the given entry only. With a\n" " single -R you'll get this data about all entries of a given directory;\n" " with another -R you'll get the whole (sub-)tree.\n" "\n"; const char hlp_log[]=" fsvs log [-v] [-r rev1[:rev2]] [-u name] [path]\n" "\n" " This command views the revision log information associated with the\n" " given path at its topmost URL, or, if none is given, the highest\n" " priority URL.\n" "\n" " The optional rev1 and rev2 can be used to restrict the revisions that\n" " are shown; if no values are given, the logs are given starting from\n" " HEAD downwards, and then a limit on the number of revisions is applied\n" " (but see the limit option).\n" "\n" " If you use the -v -option, you get the files changed in each revision\n" " printed, too.\n" "\n" " There is an option controlling the output format; see the log_output\n" " option.\n" "\n" " Optionally the name of an URL can be given after -u; then the log of\n" " this URL, instead of the topmost one, is shown.\n" "\n" " TODOs:\n" " * --stop-on-copy\n" " * Show revision for all URLs associated with a working copy? In which\n" " order?\n" "\n"; const char hlp_prop_g[]=" fsvs prop-get PROPERTY-NAME PATH...\n" "\n" " Prints the data of the given property to STDOUT.\n" "\n" " Note:\n" " Be careful! This command will dump the property as it is, ie.\n" " with any special characters! If there are escape sequences or\n" " binary data in the property, your terminal might get messed up!\n" " If you want a safe way to look at the properties, use prop-list\n" " with the -v parameter.\n" "\n"; const char hlp_prop_s[]=" fsvs prop-set [-u URLNAME] PROPERTY-NAME VALUE PATH...\n" "\n" " This command sets an arbitrary property value for the given path(s).\n" "\n" " Note:\n" " Some property prefixes are reserved; currently everything\n" " starting with svn: throws a (fatal) warning, and fsvs: is\n" " already used, too. See Special property names.\n" "\n" " If you're using a multi-URL setup, and the entry you'd like to work on\n" " should be pinned to a specific URL, you can use the -u parameter; this\n" " is like the add command, see there for more details.\n" "\n"; const char hlp_prop_d[]=" fsvs prop-del PROPERTY-NAME PATH...\n" "\n" " This command removes a property for the given path(s).\n" "\n" " See also prop-set.\n" "\n"; const char hlp_prop_l[]=" fsvs prop-list [-v] PATH...\n" "\n" " Lists the names of all properties for the given entry.\n" " With -v, the value is printed as well; special characters will be\n" " translated, as arbitrary binary sequences could interfere with your\n" " terminal settings.\n" "\n" " If you need raw output, post a patch for --raw, or write a loop with\n" " prop-get.\n" "\n"; const char hlp_remote[]=" fsvs remote-status PATH [-r rev]\n" "\n" " This command looks into the repository and tells you which files would\n" " get changed on an update - it's a dry-run for update .\n" "\n" " Per default it compares to HEAD, but you can choose another revision\n" " with the -r parameter.\n" "\n" " Please see the update documentation for details regarding multi-URL\n" " usage.\n" "\n"; const char hlp_resolv[]=" fsvs resolve PATH [PATH...]\n" "\n" " When FSVS tries to update local files which have been changed, a\n" " conflict might occur. (For various ways of handling these please see\n" " the conflict option.)\n" "\n" " This command lets you mark such conflicts as resolved.\n" "\n"; const char hlp_revert[]=" fsvs revert [-rRev] [-R] PATH [PATH...]\n" "\n" " This command undoes local modifications:\n" " * An entry that is marked to be unversioned gets this flag removed.\n" " * For a already versioned entry (existing in the repository) the\n" " local entry is replaced with its repository version, and its status\n" " and flags are cleared.\n" " * An entry that is a modified copy destination gets reverted to the\n" " copy source data.\n" " * Manually added entries are changed back to \"N\"ew.\n" "\n" " Please note that implicitly copied entries, ie. entries that are marked\n" " as copied because some parent directory is the base of a copy, can not\n" " be un-copied; they can only be reverted to their original (copied-from)\n" " data, or removed.\n" "\n" " If you want to undo a copy operation, please see the uncopy command.\n" "\n" " See also HOWTO: Understand the entries' statii.\n" "\n" " If a directory is given on the command line all versioned entries in\n" " this directory are reverted to the old state; this behaviour can be\n" " modified with -R/-N, or see below.\n" "\n" " The reverted entries are printed, along with the status they had before\n" " the revert (because the new status is per definition unchanged).\n" "\n" " If a revision is given, the entries' data is taken from this revision;\n" " furthermore, the new status of that entry is shown.\n" "\n" " Note:\n" " Please note that mixed revision working copies are not (yet)\n" " possible; the BASE revision is not changed, and a simple revert\n" " without a revision arguments gives you that.\n" " By giving a revision parameter you can just choose to get the\n" " text from a different revision.\n" "\n"; const char hlp_status[]=" fsvs status [-C [-C]] [-v] [-f filter] [PATHs...]\n" "\n" " This command shows the entries that have been changed locally since the\n" " last commit.\n" "\n" " The most important output formats are:\n" " * A status columns of four (or, with -v , six) characters. There are\n" " either flags or a \".\" printed, so that it's easily parsed by\n" " scripts -- the number of columns is only changed by -q, -v --\n" " verbose/quiet.\n" " * The size of the entry, in bytes, or \"dir\" for a directory, or \"dev\"\n" " for a device.\n" " * The path and name of the entry, formatted by the path option.\n" "\n" " Normally only changed entries are printed; with -v all are printed, but\n" " see the filter option for more details.\n" "\n" " The status column can show the following flags:\n" " * 'D' and 'N' are used for deleted and new entries.\n" " * 'd' and 'n' are used for entries which are to be unversioned or\n" " added on the next commit; the characters were chosen as little\n" " delete (only in the repository, not removed locally) and little new\n" " (although ignored). See add and unversion.\n" " If such an entry does not exist, it is marked with an \"!\" in the\n" " last column -- because it has been manually marked, and so the\n" " removal is unexpected.\n" " * A changed type (character device to symlink, file to directory\n" " etc.) is given as 'R' (replaced), ie. as removed and newly added.\n" " * If the entry has been modified, the change is shown as 'C'.\n" " If the modification or status change timestamps (mtime, ctime) are\n" " changed, but the size is still the same, the entry is marked as\n" " possibly changed (a question mark '?' in the last column) - but see\n" " change detection for details.\n" " * A 'x' signifies a conflict.\n" " * The meta-data flag 'm' shows meta-data changes like properties,\n" " modification timestamp and/or the rights (owner, group, mode);\n" " depending on the -v/-q command line parameters, it may be splitted\n" " into 'P' (properties), 't' (time) and 'p' (permissions).\n" " If 'P' is shown for the non-verbose case, it means only property\n" " changes, ie. the entries filesystem meta-data is unchanged.\n" " * A '+' is printed for files with a copy-from history; to see the URL\n" " of the copyfrom source, see the verbose option.\n" "\n" " Here's a table with the characters and their positions:\n" " * Without -v With -v\n" " * .... ......\n" " * NmC? NtpPC?\n" " * DPx! D x!\n" " * R + R +\n" " * d d\n" " * n n\n" " *\n" "\n" " Furthermore please take a look at the stat_color option, and for more\n" " information about displayed data the verbose option.\n" "\n"; const char hlp_sync_r[]=" fsvs sync-repos [-r rev] [working copy base]\n" "\n" " This command loads the file list afresh from the repository.\n" " A following commit will send all differences and make the repository\n" " data identical to the local.\n" "\n" " This is normally not needed; the only use cases are\n" " * debugging and\n" " * recovering from data loss in the $FSVS_WAA area.\n" "\n" " It might be of use if you want to backup two similar machines. Then you\n" " could commit one machine into a subdirectory of your repository, make a\n" " copy of that directory for another machine, and sync this other\n" " directory on the other machine.\n" "\n" " A commit then will transfer only _changed_ files; so if the two\n" " machines share 2GB of binaries (/usr , /bin , /lib , ...) then these\n" " 2GB are still shared in the repository, although over time they will\n" " deviate (as both committing machines know nothing of the other path\n" " with identical files).\n" "\n" " This kind of backup could be substituted by two or more levels of\n" " repository paths, which get overlaid in a defined priority. So the base\n" " directory, which all machines derive from, will be committed from one\n" " machine, and it's no longer necessary for all machines to send\n" " identical files into the repository.\n" "\n" " The revision argument should only ever be used for debugging; if you\n" " fetch a filelist for a revision, and then commit against later\n" " revisions, problems are bound to occur.\n" "\n" " Note:\n" " There's issue 2286 in subversion which describes sharing\n" " identical files in the repository in unrelated paths. By using\n" " this relaxes the storage needs; but the network transfers would\n" " still be much larger than with the overlaid paths.\n" "\n"; const char hlp_update[]=" fsvs update [-r rev] [working copy base]\n" " fsvs update [-u url@rev ...] [working copy base]\n" "\n" " This command does an update on the current working copy; per default\n" " for all defined URLs, but you can restrict that via -u.\n" "\n" " It first reads all filelist changes from the repositories, overlays\n" " them (so that only the highest-priority entries are used), and then\n" " fetches all necessary changes.\n" "\n"; const char hlp_urls[]=" fsvs urls URL [URLs...]\n" " fsvs urls dump\n" " fsvs urls load\n" "\n" " Initializes a working copy administrative area and connects the current\n" " working directory to REPOS_URL. All commits and updates will be done to\n" " this directory and against the given URL.\n" "\n" " Example:\n" " fsvs urls http://svn/repos/installation/machine-1/trunk\n" "\n" " For a format definition of the URLs please see the chapter Format of\n" " URLs.\n" "\n" " Note:\n" " If there are already URLs defined, and you use that command\n" " later again, please note that as of 1.0.18 the older URLs are\n" " not overwritten as before, but that the new URLs are appended to\n" " the given list! If you want to start afresh, use something like\n" "\n" " true | fsvs urls load\n" "\n"; // vi: filetype=c fsvs-1.2.6/src/warnings.c0000644000202400020240000001442711264677022014251 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include "warnings.h" #include "interface.h" #include "global.h" /** \file * Functions, enumerations and other private parts of the warning subsystem. * */ /** The texts that are used for input/output of the warning actions. */ const char *wa__warn_action_text[_WA__LAST_INDEX] = { [WA__WARN_ONCE] = "once", [WA__WARN_ALWAYS] = "always", [WA__IGNORE] = "ignore", [WA__STOP] = "stop", [WA__COUNT] = "count", }; /** Names for warnings; actions default to WARN_ONCE */ static struct wa__warnings wa___warn_options[_WRN__LAST_INDEX]= { [WRN__META_MTIME_INVALID] = { "meta-mtime" }, [WRN__META_USER_INVALID] = { "meta-user" }, [WRN__META_GROUP_INVALID] = { "meta-group" }, [WRN__META_UMASK_INVALID] = { "meta-umask" }, [WRN__NO_URLLIST] = { "no-urllist" }, [WRN__CHARSET_INVALID] = { "charset-invalid" }, [WRN__CHMOD_EPERM] = { "chmod-eperm", WA__WARN_ONCE}, [WRN__CHOWN_EPERM] = { "chown-eperm", WA__WARN_ONCE}, [WRN__CHMOD_OTHER] = { "chmod-other", WA__STOP }, [WRN__CHOWN_OTHER] = { "chown-other", WA__STOP }, [WRN__MIXED_REV_WC] = { "mixed-rev-wc", WA__WARN_ALWAYS }, [WRN__PROP_NAME_RESERVED] = { "propname-reserved", WA__STOP }, [WRN__DIFF_EXIT_STATUS] = { "diff-status", WA__IGNORE }, [WRN__IGNPAT_WCBASE] = { "ignpat-wcbase", WA__WARN_ALWAYS }, [WRN__TEST_WARNING] = { "_test-warning", WA__IGNORE }, }; /** The filehandle to print the warnings to. * Currently always \c stderr. */ static FILE *warn_out; /** -. * */ int wa__split_process(char *warn, int prio) { int status; char *input; status=0; input=warn; while (warn && *warn) { warn=strtok(input, ",; \r\t\n"); if (!warn) break; /* As per the strtok() rules - input only set on first call. */ input=NULL; STOPIF( wa__set_warn_option(warn, prio), "In string %s", warn); } ex: return status; } /** * -. * The given string is of the format \c warning=action. * * \a action can be any of the wa__warn_action_text[] strings; \a warning * is the startstring of any of the \ref wa___warn_options. * If more than one warnings matches this string, all are set to the given * action. * The command line option looks like this: \c -Wmeta-mtime=ignore. */ int wa__set_warn_option(char *stg, enum opt__prio_e prio) { char *delim; int status; int index, action, len; struct wa__warnings *warning; status=0; delim=strchr(stg, '='); STOPIF_CODE_ERR(!delim, EINVAL, "!The warning option '%s' is invalid", stg); /* Look for action. We do that first, so that * multiple warnings can be switched at once: * -Wmeta=ignore */ for(action=_WA__LAST_INDEX-1; action>=0; action--) if (strcmp(wa__warn_action_text[action], delim+1) == 0) break; STOPIF_CODE_ERR(action < 0, EINVAL, "The warning action specification '%s' is invalid", delim+1); /* Look for option(s). * If we return ENOENT it looks to opt__load_env() as if it's simply no * valid key - and gets ignored. So we return EINVAL. */ status=EINVAL; len=delim-stg; warning=wa___warn_options; for(index=0; index<_WRN__LAST_INDEX; index++, warning++) { if (strncmp(warning->text, stg, len) == 0) { if (warning->prio <= prio) { warning->action=action; warning->prio=prio; DEBUGP("warning option set: %s=%s, prio %d", warning->text, wa__warn_action_text[warning->action], prio); } status=0; } } STOPIF_CODE_ERR(status, status, "The given warning option '%*s' matches no warnings", len, stg); ex: return status; } /** -. * * \param index The definition from \a warning_e. * \param stat The error code that could be used for stopping. * \param format \c printf()-style message. * */ int wa__warn(warning_e index, int stat, char *format, ...) { va_list va; int status, ret; if (!warn_out) warn_out=stderr; wa___warn_options[index].count++; status=0; switch (wa___warn_options[index].action) { case WA__IGNORE: case WA__COUNT: break; case WA__STOP: status=stat; if (!status) status=EAGAIN; /* Fall through. Modifying doesn't matter, we'll stop. */ case WA__WARN_ONCE: /* Fall through, printing a warning */ case WA__WARN_ALWAYS: /* Print a warning */ va_start(va, format); ret=fprintf(warn_out, "\nWARNING"); if (opt__is_verbose() > 0) ret|=fprintf(warn_out, "(%s)", wa___warn_options[index].text); ret|=fprintf(warn_out, ": "); ret|=vfprintf(warn_out, format, va); ret|=fprintf(warn_out, "\n\n"); if (!status) /* Any negative value from above (error) will turn ret negative, too. * We don't check if we're planning to stop. */ STOPIF_CODE_ERR(ret<0, errno, "Error while printing warning"); if (wa___warn_options[index].action == WA__WARN_ONCE) /* Switch to counting mode */ wa___warn_options[index].action=WA__COUNT; break; default: BUG("Invalid warning action encountered"); } ex: return status; } /** -. * * Warnings set to \ref WA__IGNORE are not printed. */ int wa__summary(void) { int i, status; int flag; status=0; /* Flush all streams, so that this warnings occur *after* * every other status output. */ STOPIF_CODE_EPIPE( fflush(NULL), NULL); flag=0; for(i=0; i<_WRN__LAST_INDEX; i++) { DEBUGP("%d# %s: %dx", i, wa___warn_options[i].text, wa___warn_options[i].count); if (wa___warn_options[i].action != WA__IGNORE && wa___warn_options[i].count) { /* This can only be called if there was at least a single warning, * and then warn_out is set, too. */ if (!flag++) STOPIF_CODE_ERR( fprintf(warn_out, "\nWarning summary:\n")<0, errno, "Error writing warning summary header"); STOPIF_CODE_ERR( fprintf(warn_out, " %s occurred %d time%s\n", wa___warn_options[i].text, wa___warn_options[i].count, wa___warn_options[i].count == 1 ? "" : "s") <0, errno, "Cannot write warning summary line"); } } ex: return status; } fsvs-1.2.6/src/status.h0000644000202400020240000000275111243307426013741 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __STATUS_H__ #define __STATUS_H__ #include "actions.h" #include "options.h" /** \file * \ref status output header file. */ /** A function to show the local status of an entry. */ action_t st__status; /** Status action. */ action_t st__action; /** A function to show the remote status of an entry. */ action_t st__rm_status; /** The \ref status worker function. */ work_t st__work; /** Percentual display of progress. */ action_t st__progress; /** Uninitializer for \ref st__progress. */ action_uninit_t st__progress_uninit; /** Shows detailed information about the entry. */ int st__print_entry_info(struct estat *sts); /** Returns a string describing the \a entry_status bits of struct \a * estat. */ volatile char* st__status_string(const struct estat * const sts); /** Same as \ref st__status_string, but directly given an status. */ volatile char* st__status_string_fromint(int mask); /** Return the string interpretation of the flags like \ref RF_CHECK. */ volatile char* st__flags_string_fromint(int mask); /** Return the type string - cdev, bdev, whatever. */ char *st__type_string(mode_t mode); #endif fsvs-1.2.6/src/racallback.h0000644000202400020240000000353111213650503014464 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __RACALLBACK_H__ #define __RACALLBACK_H__ #include /** \file * The cb__record_changes() and other callback functions header file. */ /** The callback table for cb__record_changes(). */ extern struct svn_ra_callbacks_t cb__cb_table; /** Initialize the callback functions. * \todo Authentication providers. */ svn_error_t *cb__init(apr_pool_t *pool); /** A change-recording editor. */ int cb__record_changes(struct estat *root, svn_revnum_t target, apr_pool_t *pool); /** Like cb__record_changes(), but allowing mixed reporting. */ int cb__record_changes_mixed(struct estat *root, svn_revnum_t target, char *other_paths[], svn_revnum_t other_revs, apr_pool_t *pool); /** This function adds a new entry below dir, setting it to * \c FS_NEW or \c FS_REPLACED. */ int cb__add_entry(struct estat *dir, const char *utf8_path, char **loc_path, const char *utf8_copy_path, svn_revnum_t copy_rev, int mode, int *has_existed, int may_create, void **new); /** Checks whether a given remote path exists. */ int cb__does_path_exist(svn_ra_session_t *session, char *path, svn_revnum_t rev, int *exists, apr_pool_t *pool); /** Removes all entries belonging to \a to_remove from the tree \a root. */ int cb__remove_from_url(struct estat *root, struct url_t *to_remove, int *was_changed); /** Remove all entries from the given URL, and mark it for deletion. */ int cb__remove_url(struct estat *root, struct url_t *to_remove); #endif fsvs-1.2.6/src/resolve.h0000644000202400020240000000163310770132650014072 0ustar marekmarek/************************************************************************ * Copyright (C) 2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __RESOLVE_H__ #define __RESOLVE_H__ /** \file * \ref resolve action header file. */ #include "global.h" #include "actions.h" /** This function takes a \c NULL terminated list of filenames, and appends * it to the conflict list of the given \a sts. */ int res__mark_conflict(struct estat *sts, ...) __attribute((sentinel)); /** Removes all stored files. */ int res__remove_aux_files(struct estat *sts); /** Resolve command main function. */ work_t res__work; /** Resolve command action function. */ action_t res__action; #endif fsvs-1.2.6/src/status.c0000644000202400020240000005335211555464732013751 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include "global.h" #include "actions.h" #include "status.h" #include "helper.h" #include "direnum.h" #include "cache.h" #include "url.h" #include "cp_mv.h" #include "ignore.h" #include "options.h" #include "est_ops.h" #include "waa.h" #include "checksum.h" #include "url.h" /** \file * Functions for \ref status reporting. * */ /** \addtogroup cmds * * \section status * * \code * fsvs status [-C [-C]] [-v] [-f filter] [PATHs...] * \endcode * * This command shows the entries that have been changed locally since the * last commit. * * The most important output formats are: * - A status columns of four (or, with \c -v , six) characters. * There are either flags or a "." printed, so that it's easily parsed by * scripts -- the number of columns is only changed by \ref * glob_opt_verb. * - The size of the entry, in bytes, or \c "dir" for a directory, or \c * "dev" for a device. * - The path and name of the entry, formatted by the \ref o_opt_path * "path" option. * * Normally only changed entries are printed; with \c -v all are printed, * but see the \ref o_filter "filter" option for more details. * * The status column can show the following flags: * - \c 'D' and \c 'N' are used for \e deleted and \e new entries. * - \c 'd' and \c 'n' are used for entries which are to be unversioned or * added on the next commit; the characters were chosen as little * delete (only in the repository, not removed locally) and little * new (although \ref ignore "ignored"). See \ref add and \ref * unversion. \n * If such an entry does not exist, it is marked with an \c "!" in the * last column -- because it has been manually marked, and so the removal * is unexpected. * - A changed type (character device to symlink, file to directory etc.) * is given as \c 'R' (replaced), ie. as removed and newly added. * - \anchor status_possibly * If the entry has been modified, the change is shown as \c 'C'. \n * If the modification or status change timestamps (mtime, ctime) are * changed, but the size is still the same, the entry is marked as * possibly changed (a question mark \c '?' in the last column) - but see * \ref o_chcheck "change detection" for details. * - A \c 'x' signifies a conflict. * - \anchor status_meta_changed * The meta-data flag \c 'm' shows meta-data changes like properties, * modification timestamp and/or the rights (owner, group, mode); * depending on the \ref glob_opt_verb "-v/-q" command line parameters, * it may be splitted into \c 'P' (properties), \c 't' (time) and \c 'p' * (permissions). \n * If \c 'P' is shown for the non-verbose case, it means \b only property * changes, ie. the entries filesystem meta-data is unchanged. * - A \c '+' is printed for files with a copy-from history; to see the URL * of the copyfrom source, see the \ref o_verbose "verbose" option. * * * Here's a table with the characters and their positions: * \verbatim * Without -v With -v * .... ...... * NmC? NtpPC? * DPx! D x! * R + R + * d d * n n * \endverbatim * * Furthermore please take a look at the \ref o_status_color "stat_color" * option, and for more information about displayed data the \ref o_verbose * "verbose" option. * */ static FILE *progress_output=NULL; static int max_progress_len=0; /** Returns the visible file size. * For devices the string \c dev is printed; for directories \c dir; files * and symlinks get their actual size printed. */ char * st___visible_file_size(struct estat *sts) { static char buffer[20]; switch ( (sts->st.mode ? sts->st.mode : sts->st.mode) & S_IFMT) { case S_IFBLK: case S_IFCHR: return "dev"; case S_IFDIR: return "dir"; default: /* When in doubt, believe it's a normal file. * We have that case for sync-repos - could be fixed some time. */ case S_IFREG: case S_IFLNK: sprintf(buffer, "%llu", (t_ull) sts->st.size); break; } return buffer; } /** Meta-data status string. */ inline char * st___meta_string(int status_bits, int flags) { static char buffer[4]; int prop; prop=(status_bits & FS_PROPERTIES) | (flags & RF_PUSHPROPS); if (opt__is_verbose() > 0) { buffer[0] = status_bits & FS_META_MTIME ? 't' : '.'; buffer[1] = status_bits & (FS_META_OWNER | FS_META_GROUP | FS_META_UMODE) ? 'p' : '.'; buffer[2] = prop ? 'P' : '.'; buffer[3] = 0; } else { buffer[0] = status_bits & FS_META_CHANGED ? 'm' : prop ? 'P' : '.'; buffer[1]=0; } return buffer; } /** Roses are red, grass is green ... */ char *st___color(int status_bits) { if ((status_bits & FS_REPLACED) == FS_REMOVED) return ANSI__RED; if (status_bits & FS_NEW) return ANSI__GREEN; if (status_bits & FS_CHANGED) return ANSI__BLUE; return ""; } /** Prints the entry in readable form. * This function uses the \c OPT__VERBOSE settings. */ int st__print_status(char *path, int status_bits, int flags, char* size, struct estat *sts) { int status; char *copyfrom, *url; int copy_inherited; FILE* output=stdout; DEBUGP("VERBOSITY=%d", opt__get_int(OPT__VERBOSE)); status=0; /* Should we be quiet or _very_ quiet? */ if (opt__verbosity() <= VERBOSITY_QUIET) goto ex; /* If the entry is new or deleted, got added or will be unversioned, we * know that all meta-data has changed; we show only the essential * information. */ if ((status_bits & (FS_NEW | FS_REMOVED)) || (flags & (RF_ADD | RF_UNVERSION))) status_bits &= ~(FS_META_CHANGED | FS_LIKELY | FS_CHANGED); /* For flags like RF_ADD or RF_UNVERSION, print. Don't print for * RF_CHECK. */ if (opt__is_verbose() > 0 || (status_bits & FS__CHANGE_MASK) || (flags & ~RF_CHECK)) { copyfrom=NULL; copy_inherited=0; /* Go to copied parent when RF_COPY_SUB is set, and re-construct the * entire copyfrom-URL? */ if (opt__get_int(OPT__VERBOSE) & VERBOSITY_COPYFROM) { copy_inherited= (flags & RF_COPY_SUB); if (flags & RF_COPY_BASE) { status=cm__get_source(sts, NULL, ©from, NULL, 0); BUG_ON(status == ENOENT, "Marked as copied, but no info?"); STOPIF(status, NULL); } } if (opt__get_int(OPT__VERBOSE) & VERBOSITY_TOP_URL) STOPIF( url__full_url(sts, &url), NULL); else url=NULL; /* We do this here, so that the debug output is not disturbed by the * printed status characters. */ STOPIF( hlp__format_path(sts, path, &path), NULL); /* We're no longer doing a single printf(); but setbuf() et. al. write * that the default for terminals is line buffered, and block buffered * in the redirected case, and that's exactly what we want. */ if (opt__get_int(OPT__STATUS_COLOR)) STOPIF_CODE_EPIPE( fputs(st___color(status_bits), output), NULL); if (opt__get_int(OPT__VERBOSE) & VERBOSITY_SHOWCHG) STOPIF_CODE_EPIPE( fprintf(output, "%c%s%c%c ", flags & RF_ADD ? 'n' : flags & RF_UNVERSION ? 'd' : (status_bits & FS_REPLACED) == FS_REPLACED ? 'R' : status_bits & FS_NEW ? 'N' : status_bits & FS_REMOVED ? 'D' : '.', st___meta_string(status_bits, flags), flags & RF_CONFLICT ? 'x' : status_bits & FS_CHANGED ? 'C' : '.', flags & RF___IS_COPY ? '+' : status_bits & FS_LIKELY ? '?' : /* An entry marked for unversioning or adding, * which does not exist, gets a '!' */ ( ( status_bits & FS_REMOVED ) && ( flags & (RF_UNVERSION | RF_ADD) ) ) ? '!' : '.' ), NULL); if (opt__get_int(OPT__VERBOSE) & VERBOSITY_SHOWSIZE) STOPIF_CODE_EPIPE( fprintf(output, "%8s ", size), NULL); if (opt__get_int(OPT__VERBOSE) & VERBOSITY_GROUP) STOPIF_CODE_EPIPE( fprintf(output, "%-*s", ign__max_group_name_len+2, sts->match_pattern ? sts->match_pattern->group_name : "(none)"), NULL); if (opt__get_int(OPT__VERBOSE) & VERBOSITY_SHOWNAME) STOPIF_CODE_EPIPE( fputs(path, output), NULL); if (opt__get_int(OPT__STATUS_COLOR)) STOPIF_CODE_EPIPE( fputs(ANSI__NORMAL, output), NULL); /* Here the comparison of OPT__VERBOSE is already included in the check * on copyfrom above. */ if (copyfrom || copy_inherited) STOPIF_CODE_EPIPE( fprintf(output, copy_inherited ? " (inherited)" : " (copied from %s)", copyfrom), NULL); if (url) STOPIF_CODE_EPIPE( fprintf(output, " %s", url), NULL); STOPIF_CODE_EPIPE( fputs("\n", output), NULL); } ex: return status; } /** -. * */ int st__status(struct estat *sts) { int status; int e_stat, flags; char *path; int would_be_ignored; status=0; STOPIF( ops__build_path(&path, sts), NULL); /* Is this entry already done? */ BUG_ON(sts->was_output, "%s was already output ...", path); sts->was_output=1; e_stat=sts->entry_status; flags=sts->flags; /* In case the file has been given directly as an argument to \ref * status, we wouldn't see that it's new - because ops__traverse() would * have created its path. */ if (flags & RF_ISNEW) { e_stat = ( e_stat & ~FS_REPLACED) | FS_NEW; flags &= ~RF_ADD; DEBUGP("Re-create the NEW status."); if (opt__get_int(OPT__VERBOSE) & VERBOSITY_GROUP) STOPIF( ign__is_ignore(sts, &would_be_ignored), NULL); } STOPIF( st__print_status(path, e_stat, flags, st___visible_file_size(sts), sts), NULL); ex: return status; } /** -. * */ int st__action(struct estat *sts) { int status; status = 0; if (hlp__only_dir_mtime_changed(sts)) return status; if (opt__get_int(OPT__STOP_ON_CHANGE) && sts->entry_status && (!(sts->entry_status & FS_CHILD_CHANGED))) /* Status is a read-only operation, so that works. */ exit(1); STOPIF( st__status(sts), NULL); ex: return status; } /** -. * */ int st__rm_status(struct estat *sts) { int status; char *path; status=0; STOPIF( ops__build_path(&path, sts), NULL); STOPIF( st__print_status(path, sts->remote_status, 0, st___visible_file_size(sts), sts), NULL); ex: return status; } /** -. * */ int st__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; /* On ENOENT (no working copy committed yet) - should we take the common * denominator as base, or the current directory? */ /* We do not call with FCB__WC_OPTIONAL; a base must be established (via * "urls" or "ignore"), so we always know where we are relative to our * base directory. */ STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); status=url__load_list(NULL, 0); /* Maybe no URL have been defined yet */ if (status != ENOENT) STOPIF(status, NULL); STOPIF( ign__load_list(NULL), NULL); if (opt__get_int(OPT__DIR_SORT) && !opt__get_int(OPT__STOP_ON_CHANGE)) { action->local_callback=st__progress; action->local_uninit=st__progress_uninit; } STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 0), "No working copy data could be found."); if (opt__get_int(OPT__DIR_SORT)) { action->local_callback=st__status; STOPIF( waa__do_sorted_tree(root, ac__dispatch), NULL); } if (opt__get_int(OPT__GROUP_STATS)) STOPIF( ign__print_group_stats(stdout), NULL); ex: return status; } #define BAR_CHART_WIDTH 20 /** -. * A secondary status function for commit and update (and other functions * which run silently through the filesystem), which shows local progress * when run on a tty. * On larger working copies the stat()ing alone can take some time, and * we want to keep the user informed that something happens. * * Commit and update themselves print the information send to/received from * the repository. */ int st__progress(struct estat *sts) { static unsigned int counter=0; static int is_tty=0; static int last_outp; static time_t last; static time_t too_many_new=0; int status; time_t now; int print; static const char bar_chart[BAR_CHART_WIDTH+1]="###################>"; float pct; status=0; now=time(NULL); /* gcc won't let us initialize that - it's not a constant. */ if (!progress_output) progress_output=stderr; if (is_tty == 0) { is_tty= isatty(fileno(progress_output)) ? +1 : -1; DEBUGP("we're on a tty"); } if (is_tty == +1) { /* We're on a tty. Give progress reports now and then. */ counter++; /* We do give a progress report for at least every ~2000 entries done. * For slow machines (or an empty dentry cache, eg. after an OOM * situation) we check every ~50 entries if there's been * more than a second between reports, and if there was, we show, too. * We take this (complicated) route because time() takes some time, * too; and I've seen too many programs spend 50% of runtime in * gettimeofday() to see whether they should print something. */ /* Mind: for & we need powers of 2 minus 1. */ print= (counter & 0xfff) == 0 ? 1 : 0; if (!print && ((counter & 0x3f) == 0)) { now=time(NULL); /* If ntp turns the clock back, the user gets what he deserves - * output. */ if (now != last) print=1; } if (print) { /* If we're at 99% percent for too long, we only print * the entries found. */ if (counter <= approx_entry_count && now 0.96 && !too_many_new) too_many_new=now+5; } else { /* If we don't know how many entries there are (first-time commit), * or when we find that the estimate was wrong (too small), * we just write how many were processed. */ print=fprintf(progress_output, "\r%8d entries done", counter); } STOPIF_CODE_ERR(print < 0, errno, "Progress status could not be written"); /* In the case of an invalid approx_entry_count we may have * to print some spaces after the string, to clean up the * previously printed bar chart. */ if (print < last_outp) fprintf(progress_output, "%*s ", last_outp-print, ""); last_outp=print; /* We store the maximum number of characters printed, to * print the "right" number of spaces later on. * This number should be constant - as long as we don't have more than * 100M entries to do. */ if (print > max_progress_len) max_progress_len=print; /* I thought briefly whether I should assign "now", and have "now" * recalculated whenever &~128 gets true (as then &~2048 must be true, * too) ... but that's not maintainer-friendly. * Although ... it would save a few microseconds :-) */ time(&last); } // usleep(171000); } ex: return status; } /** -. * Mostly needed to clear the cursor line, to avoid having part of a * progress line mixed with some other output. */ int st__progress_uninit(void) { static const char err[]="Clearing the progress space"; int status; char buff[max_progress_len+3]; status=0; if (max_progress_len>0) { /* if EOF were always -1, we could just OR the return values * together. */ buff[0]='\r'; memset(buff+1, ' ', max_progress_len); buff[1+max_progress_len]='\r'; buff[2+max_progress_len]=0; /* Maybe a fprintf(... "%*s" ) would be better? */ STOPIF_CODE_ERR( fputs(buff, progress_output) == EOF, errno, err); fflush(progress_output); } ex: return status; } struct st___bit_info { int val; char *string; int str_len; }; #define BIT_INFO(v, s) { .val=v, .string=s, .str_len=strlen(s) } /** Constructs a string from a bitmask, where one or more bits may be set. * * Must not be free()d. */ #define st___string_from_bits(v, a, t) _st___string_from_bits(v, a, sizeof(a)/sizeof(a[0]), t) volatile char *_st___string_from_bits(int value, const struct st___bit_info data[], int max, char *text_for_none) { int status; static struct cache_t *cache=NULL; static const char sep[]=", "; char *string; int i; int last_len, new_len; struct cache_entry_t **cc; STOPIF( cch__new_cache(&cache, 4), NULL); STOPIF( cch__add(cache, 0, NULL, 128, &string), NULL); cc=cache->entries + cache->lru; last_len=0; if (string) *string=0; for(i=0; i (*cc)->len) { STOPIF( cch__entry_set(cc, 0, NULL, new_len+64, 1, &string), NULL); string[last_len]=0; } if (last_len) { strcpy(string + last_len, sep); last_len += strlen(sep); } strcpy(string + last_len, data[i].string); last_len=new_len; #if 0 // Too verbose. DEBUGP("match bit 0x%X on 0x%X: %s", data[i].val, value, data[i].string); #endif } } ex: /* Is that good? */ if (status) return NULL; /* If no bits are set, return "empty" */ return string && *string ? string : text_for_none; } inline volatile char* st__flags_string_fromint(int mask) { const struct st___bit_info flags[]={ BIT_INFO( RF_ADD, "add"), BIT_INFO( RF_UNVERSION, "unversion"), /* This flag is not shown, as it's always set when we get here. * So there's no information. */ // BIT_INFO( RF_PRINT, "print"), BIT_INFO( RF_CHECK, "check"), BIT_INFO( RF_COPY_BASE, "copy_base"), BIT_INFO( RF_COPY_SUB, "copy_sub"), BIT_INFO( RF_CONFLICT, "conflict"), BIT_INFO( RF_PUSHPROPS, "push_props"), }; return st___string_from_bits(mask, flags, "none"); } inline volatile char* st__status_string_fromint(int mask) { const struct st___bit_info statii[]={ BIT_INFO( FS_NEW, "new"), BIT_INFO( FS_REMOVED, "removed"), BIT_INFO( FS_CHANGED, "changed"), BIT_INFO( FS_META_OWNER, "owner"), BIT_INFO( FS_META_GROUP, "group"), BIT_INFO( FS_META_MTIME, "mtime"), BIT_INFO( FS_META_UMODE, "umode"), BIT_INFO( FS_PROPERTIES, "props"), BIT_INFO( FS_CHILD_CHANGED, "child"), BIT_INFO( FS_LIKELY, "likely"), }; return st___string_from_bits(mask, statii, "unmodified"); } char *st__type_string(mode_t mode) { switch (mode & S_IFMT) { case S_IFDIR: return "directory"; case S_IFBLK: return "block-dev"; case S_IFCHR: return "char-dev"; case S_IFREG: return "file"; case S_IFLNK: return "symlink"; case S_IFSOCK: return "any-special"; case S_IFGARBAGE: return "garbage"; } return "invalid"; } inline volatile char* st__status_string(const struct estat * const sts) { return st__status_string_fromint(sts->entry_status); } int st__print_entry_info(struct estat *sts) { int status; char *path, *waa_path, *url, *copyfrom; svn_revnum_t copy_rev; status=errno=0; STOPIF( ops__build_path(&path, sts), NULL); STOPIF( url__full_url(sts, &url), NULL); copyfrom=NULL; if ((opt__get_int(OPT__VERBOSE) & VERBOSITY_COPYFROM) && (sts->flags & RF___IS_COPY)) { STOPIF( cm__get_source(sts, path, ©from, ©_rev, 0), NULL); } STOPIF_CODE_EPIPE( printf(" Type: \t%s\n", st__type_string(sts->st.mode)), NULL); if (S_ISDIR(sts->st.mode)) STOPIF_CODE_EPIPE( printf( " ChildCount:\t%u\n", sts->entry_count), NULL); STOPIF_CODE_EPIPE( printf(" URL: \t%s\n", url), NULL); STOPIF_CODE_EPIPE( printf(" Status:\t0x%X (%s)\n", sts->entry_status, st__status_string(sts)), NULL); STOPIF_CODE_EPIPE( printf(" Flags:\t0x%X (%s)\n", sts->flags & ~RF_PRINT, st__flags_string_fromint(sts->flags)), NULL); if (copyfrom) { STOPIF_CODE_EPIPE( printf(" Copyfrom:\trev. %llu of %s\n", (t_ull)copy_rev, copyfrom), NULL); } STOPIF_CODE_EPIPE( printf(" Dev: \t%llu\n", (t_ull)sts->st.dev), NULL); STOPIF_CODE_EPIPE( printf(" Inode: \t%llu\n", (t_ull)sts->st.ino), NULL); STOPIF_CODE_EPIPE( printf(" Mode: \t0%4o\n", sts->st.mode), NULL); STOPIF_CODE_EPIPE( printf(" UID/GID:\t%u (%s)/%u (%s)\n", sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"), sts->st.gid, hlp__get_grname(sts->st.gid, "undefined") ), NULL); /* Remove the \n at the end */ STOPIF_CODE_EPIPE( printf(" MTime: \t%.24s\n", ctime( &(sts->st.mtim.tv_sec) )), NULL); STOPIF_CODE_EPIPE( printf(" CTime: \t%.24s\n", ctime( &(sts->st.ctim.tv_sec) )), NULL); STOPIF( waa__get_waa_directory(path, &waa_path, NULL, NULL, GWD_WAA), NULL); STOPIF_CODE_EPIPE( printf(" WAA-Path:\t%s\n", waa_path), NULL); if (!sts->parent) { STOPIF( waa__get_waa_directory(path, &waa_path, NULL, NULL, GWD_CONF), NULL); STOPIF_CODE_EPIPE( printf(" Conf-Path:\t%s\n", waa_path), NULL); } /* The root entry has no URL associated, and so no revision number. * Print the current revision of the highest priority URL. */ STOPIF_CODE_EPIPE( printf(" Revision:\t%li\n", sts->parent ? sts->repos_rev : urllist[0]->current_rev), NULL); if (S_ISREG(sts->st.mode)) STOPIF_CODE_EPIPE( printf(" Repos-MD5:\t%s\n", cs__md5tohex_buffered(sts->md5)), NULL); if (S_ISBLK(sts->st.mode) || S_ISCHR(sts->st.mode)) { #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else STOPIF_CODE_EPIPE( printf(" Device nr.:\t%llu:%llu\n", (t_ull)MAJOR(sts->st.rdev), (t_ull)MINOR(sts->st.rdev)), NULL); #endif } else STOPIF_CODE_EPIPE( printf(" Size: \t%llu\n", (t_ull)sts->st.size), NULL); /* Any last words? */ STOPIF_CODE_EPIPE( printf("\n"), NULL); ex: return status; } fsvs-1.2.6/src/fsvs.c0000644000202400020240000011662611657263440013407 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "interface.h" #include "ignore.h" #include "checksum.h" #include "helper.h" #include "waa.h" #include "options.h" #include "cp_mv.h" #include "status.h" #include "url.h" #include "warnings.h" #include "options.h" #include "actions.h" #include "racallback.h" /** \file * The central parts of fsvs (main). * */ /** \defgroup add_unv_ign Adding and removing entries from versioning * * Normally all new entries are taken for versioning. * The following chapters show you how to get finer control. * * Furthermore please take a look at the \ref ignore , and \ref add * and \ref unversion commands. */ /** \defgroup dev Hints and documentation for developers * Some description of data structures, and similar. * */ /** \defgroup userdoc Documentation for users * * Here you find the basic documentations for FSVS. */ /** \defgroup cmds Commands and command line parameters * \ingroup userdoc * * fsvs is a client for subversion repositories; it is designed * for fast versioning of big directory trees. * * \section cmds_synopsis SYNOPSIS * * fsvs command [options] [args] * * * The following commands are understood by FSVS: * * \section cmds_local Local configuration and information: *
*
\ref urls
Define working copy base * directories by their URL(s) *
\ref status
Get a list of changed entries *
\ref info
Display detailed information about * single entries *
\ref log
Fetch the log messages from the repository *
\ref diff
Get differences between files (local and * remote) *
\ref cpfd "copyfrom-detect"
Ask FSVS about probably * copied/moved/renamed entries; see \ref cp *
* * \section cmds_au Defining which entries to take: *
*
\ref ignore and \ref rign
Define ignore patterns *
\ref unversion
Remove entries from versioning *
\ref add
Add entries that would be ignored *
\ref cp, \ref mv
Tell FSVS that entries were * copied *
* * \section cmds_rep Commands working with the repository: *
*
\ref commit
Send changed data to the repository *
\ref update
Get updates from the repository *
\ref checkout
Fetch some part of the repository, and * register it as working copy *
\ref cat
Get a file from the directory *
\ref revert and \ref uncp
Undo local changes and * entry markings *
\ref remote-status
Ask what an \ref update * would bring *
* * \section cmds_prop Property handling: *
*
\ref prop-set
Set user-defined properties *
\ref prop-get
Ask value of user-defined properties *
\ref prop-list
Get a list of user-defined properties *
* * \section cmds_rec Additional commands used for recovery and debugging: *
*
\ref export
Fetch some part of the repository *
\ref sync-repos
Drop local information about the entries, * and fetch the current list from the repository. *
* * \note Multi-url-operations are relatively new; there might be rough edges. * * * The return code is \c 0 for success, or \c 2 for an error. * \c 1 is returned if the option \ref o_stop_change is used, and * changes are found; see also \ref o_filter. * * * \section glob_opt Universal options * * * \subsection glob_opt_version -V -- show version * \c -V makes FSVS print the version and a copyright notice, and exit. * * * \subsection glob_opt_deb -d and -D -- debugging * If FSVS was compiled using \c --enable-debug you can enable printing * of debug messages (to \c STDOUT) with \c -d. * Per default all messages are printed; if you're only interested in a * subset, you can use \c -D \e start-of-function-name. * \code * fsvs -d -D waa_ status * \endcode * would call the \a status action, printing all debug messages of all WAA * functions - \c waa__init, \c waa__open, etc. * * For more details on the other debugging options \ref o_debug_output * "debug_output" and \ref o_debug_buffer "debug_buffer" please see the * options list. * * * \subsection glob_opt_rec -N, -R -- recursion * The \c -N and \c -R switches in effect just decrement/increment a * counter; the behaviour is chosen depending on that. So a command line of * -N -N -N -R -R is equivalent to -3 +2 = -1, this * results in \c -N. * * * \subsection glob_opt_verb -q, -v -- verbose/quiet * -v/-q set/clear verbosity flags, and so give more/less * output. * * Please see \ref o_verbose "the verbose option" for more details. * * * \subsection glob_opt_chksum -C -- checksum * \c -C chooses to use more change detection checks; please see \ref * o_chcheck "the change_check option" for more details. * * * \subsection glob_opt_filter -f -- filter entries * This parameter allows to do a bit of filtering of entries, or, for some * operations, modification of the work done on given entries. * * It requires a specification at the end, which can be any combination of * \c any, \c text, \c new, \c deleted (or \c removed), \c meta, \c mtime, \c group, \c mode, * \c changed or \c owner; \c default or \c def use the default value. * * By giving eg. the value \c text, with a \ref status action only entries * that are new or changed are shown; with \c mtime,group only entries * whose group or modification time has changed are printed. * * \note Please see \ref o_chcheck for some more information. * * \note If an entry gets replaced with an entry of a different type (eg. a * directory gets replaced by a file), that counts as \c deleted \b and \c * new. * * If you use \c -v, it's used as a \c any internally. * * If you use the string \c none, it resets the bitmask to \b no entries * shown; then you can built a new mask. * So \c owner,none,any,none,delete would show deleted entries. * If the value after all commandline parsing is \c none, it is reset to * the default. * * * \subsection glob_opt_warnings -W warning=action -- set warnings * * Here you can define the behaviour for certain situations that should not * normally happen, but which you might encounter. * * The general format here is \e specification = \e action, where \e * specification is a string matching the start of at least one of the * defined situations, and \e action is one of these: * - \e once to print only a single warning, * - \e always to print a warning message \b every time, * - \e stop to abort the program, * - \e ignore to simply ignore this situation, or * - \e count to just count the number of occurrences. * * If \e specification matches more than one situation, all of them are * set; eg. for \e meta=ignore all of \e meta-mtime, \e meta-user etc. are * ignored. * * If at least a single warning that is \b not ignored is encountered * during the program run, a list of warnings along with the number of * messages it would have printed with the setting \e always is displayed, * to inform the user of possible problems. * * The following situations can be handled with this: * *
\e meta-mtime, \e meta-user, \e meta-group, \e meta-umask * These warnings are issued if a meta-data property that was fetched * from the repository couldn't be parsed. This can only happen if some * other program or a user changes properties on entries.
* In this case you can use \c -Wmeta=always or \c -Wmeta=count, until the * repository is clean again. * *
\e no-urllist * This warning is issued if a \ref info action is executed, but no URLs * have been defined yet. * *
\e charset-invalid * If the function \c nl_langinfo(3) couldn't return the name of the current * character encoding, a default of UTF-8 is used. * You might need that for a minimal system installation, eg. on recovery. * *
\e chmod-eperm, \e chown-eperm * If you update a working copy as normal user, and get to update a file * which has another owner but which you may modify, you'll get errors * because neither the user, group, nor mode can be set. \n * This way you can make the errors non-fatal. * *
\e chmod-other, \e chown-other * If you get another error than \c EPERM in the situation above, you might * find these useful. * *
\e mixed-rev-wc * If you specify some revision number on a \ref revert, it will complain * that mixed-revision working copies are not allowed. \n * While you cannot enable mixed-revision working copies (I'm working on * that) you can avoid being told every time. * *
\e propname-reserved * It is normally not allowed to set a property with the \ref prop-set * action with a name matching some reserved prefixes. * *
\anchor warn_ign_abs_not_base \e ignpat-wcbase * This warning is issued if an \ref ignpat_shell_abs "absolute ignore * pattern" does not match the working copy base directory. \n * See \ref ignpat_shell_abs "absolute shell patterns" for more details. * *
\e diff-status * GNU diff has defined that it returns an exit code 2 in case of an error; * sadly it returns that also for binary files, so that a simply fsvs * diff some-binary-file text-file would abort without printing the * diff for the second file. \n * Because of this FSVS currently ignores the exit status of diff per * default, but this can be changed by setting this option to eg. \e stop. * *
* * Also an environment variable FSVS_WARNINGS is used and parsed; it is * simply a whitespace-separated list of option specifications. * * * \subsection glob_opt_urls -u URLname[@revision[:revision]] -- select URLs * * Some commands can be reduced to a subset of defined URLs; * the \ref update command is a example. * * If you have more than a single URL in use for your working copy, \c * update normally updates \b all entries from \b all URLs. By using * this parameter you can tell FSVS to update only the specified URLs. * * The parameter can be used repeatedly; the value can have multiple URLs, * separated by whitespace or one of \c ",;". * * \code * fsvs up -u base_install,boot@32 -u gcc * \endcode * * This would get \c HEAD of \c base_install and \c gcc, and set the target * revision of the \c boot URL for this command at 32. * * * \subsection glob_options -o [name[=value]] -- other options * This is used for setting some seldom used option, for which default can * be set in a configuration file (to be implemented, currently only * command-line). * * For a list of these please see \ref options. * * * \section Signals * * If you have a running FSVS, and you want to change its verbosity, you can send the process either * \c SIGUSR1 (to make it more verbose) or \c SIGUSR2 (more quiet). * */ /** -. * */ char parm_dump[]="dump", parm_test[]="test", parm_load[]="load"; int debuglevel=0, /** -. We start with recursive by default. */ opt_recursive=1; svn_revnum_t target_revision; svn_revnum_t opt_target_revision=SVN_INVALID_REVNUM; svn_revnum_t opt_target_revision2=SVN_INVALID_REVNUM; int opt_target_revisions_given=0; char *opt_commitmsg, *opt_debugprefix, *opt_commitmsgfile; /** -. * Is there some better way? And I don't want to hear about using C++ * templates and generating each function twice - once with output and once * without! * Maybe with some call that encapsulates this functionality, and uses some * stack? Although we can simply increment/decrement this value. */ int make_STOP_silent=0; /** Remember how the program was called. */ static char *program_name; /** -. */ char *start_path=NULL; /** -. */ int start_path_len=0; #ifdef HAVE_LOCALES char *local_codeset; #endif apr_pool_t *global_pool; struct url_t *current_url; /* For Solaris, which doesn't have one ... */ //char **environ=NULL; #ifdef NEED_ENVIRON_EXTERN extern #endif char**environ; /** Opens the debug output file or pipe, as specified. * * If a debug buffer is given, this is filled first; and only in case of a * buffer flush the given file or pipe is opened, to receive the buffer * contents. * * This function cannot return errors. */ void _DEBUGP_open_output(FILE **output, int *was_popened) { const char *fn; FILE *tmp; *output=stdout; *was_popened=0; fn=opt__get_string(OPT__DEBUG_OUTPUT); if (fn) { *was_popened= (fn[0] == '|'); if (*was_popened) tmp=popen(fn+1, "w"); else tmp=fopen(fn, "w"); if (tmp) *output=tmp; else DEBUGP("'%s' cannot be opened: %d=%s", opt__get_string(OPT__DEBUG_OUTPUT), errno, strerror(errno)); } } /** This constant is used to determine when to rotate the debug output * buffer. */ #define MAX_DEBUG_LINE_LEN (1024) /** -. * Never called directly, used only via the macro DEBUGP(). * * For uninitializing in the use case \c debug_buffer the \c line value is * misused to show whether an error occured. */ void _DEBUGP(const char *file, int line, const char *func, char *format, ...) { static struct timeval tv; static struct timezone tz; struct tm *tm; va_list va; static FILE *debug_out=NULL; static int was_popened=0; int ms; const char *fn; static char *buffer_start=NULL; static int did_wrap=0; FILE *real_out; long mem_pos; /* Uninit? */ if (!file) { if (line && opt__get_int(OPT__DEBUG_BUFFER) && debug_out) { /* Error in program, do output. */ _DEBUGP_open_output(&real_out, &was_popened); mem_pos=ftell(debug_out); if (mem_pos>=0 && did_wrap) { buffer_start[mem_pos]=0; /* Look for the start of a line. */ fn=strchr(buffer_start+mem_pos,'\n'); if (fn) fputs(fn+1, real_out); } fputs(buffer_start, real_out); /* This is just the mem stream */ fclose(debug_out); /* The "real" stream might be a pipe. */ debug_out=real_out; } /* Error checking makes not much sense ... */ if (debug_out) { if (was_popened) pclose(debug_out); else if (debug_out != stdout) fclose(debug_out); debug_out=NULL; } return; } if (!debuglevel) return; /* look if matching prefix */ if (opt_debugprefix && strncmp(opt_debugprefix, func, strlen(opt_debugprefix))) return; if (!debug_out) { /* Default to STDOUT. */ debug_out=stdout; #ifdef ENABLE_DEBUGBUFFER if (opt__get_int(OPT__DEBUG_BUFFER)) { buffer_start=malloc(opt__get_int(OPT__DEBUG_BUFFER)); if (buffer_start) debug_out=fmemopen(buffer_start, opt__get_int(OPT__DEBUG_BUFFER), "w+"); if (buffer_start && debug_out) { DEBUGP("using a buffer of %d bytes.", opt__get_int(OPT__DEBUG_BUFFER)); } else { opt__set_int(OPT__DEBUG_BUFFER, PRIO_MUSTHAVE, 0); debug_out=stdout; DEBUGP("cannot use memory buffer for debug"); } } else { _DEBUGP_open_output(&debug_out, &was_popened); } #else _DEBUGP_open_output(&debug_out, &was_popened); #endif } gettimeofday(&tv, &tz); tm=localtime(&tv.tv_sec); /* Just round down, else we'd have to increment the other fields for * >= 999500 us. */ ms=tv.tv_usec/1000; #ifdef ENABLE_DEBUGBUFFER if (opt__get_int(OPT__DEBUG_BUFFER)) { /* Check whether we should rotate. */ mem_pos=ftell(debug_out); if (mem_pos+MAX_DEBUG_LINE_LEN >= opt__get_int(OPT__DEBUG_BUFFER)) { /* What can possibly go wrong ;-/ */ fseek(debug_out, 0, SEEK_SET); did_wrap++; } } #endif fprintf(debug_out, "%02d:%02d:%02d.%03d %s[%s:%d] ", tm->tm_hour, tm->tm_min, tm->tm_sec, ms, func, file, line); va_start(va, format); vfprintf(debug_out, format, va); fputc('\n', debug_out); fflush(debug_out); } /** -. * It checks the given status code, and (depending on the command line flag * \ref glob_opt_verb "-v") prints only the first error or the whole call * stack. * If \ref debuglevel is set, prints some more details - time, file and * line. * Never called directly; only via some macros. * * In case the first character of the \a format is a "!", it's a * user error - here we normally print only the message, without the error * code line. The full details are available via \c -d and \c -v. * * \c -EPIPE is handled specially, in that it is passed up, but no message * is printed. */ int _STOP(const char *file, int line, const char *function, int errl, const char *format, ...) { static int already_stopping=0; static int error_number; int is_usererror; struct timeval tv; struct timezone tz; struct tm *tm; va_list va; FILE *stop_out=stderr; char errormsg[256]; if (make_STOP_silent) return errl; if (errl==-EPIPE) return errl; is_usererror= format && *format == '!'; if (is_usererror) format++; /* With verbose all lines are printed; else only the first non-empty. */ if ( (already_stopping || !format) && !(opt__get_int(OPT__VERBOSE) & VERBOSITY_STACKTRACE)) return error_number; if (! (already_stopping++)) { /* flush STDOUT and others */ fflush(NULL); if (is_usererror) { va_start(va, format); vfprintf(stop_out, format, va); if (!(debuglevel || opt__is_verbose()>0)) goto eol; } fputs("\n\nAn error occurred", stop_out); if (debuglevel || opt__is_verbose()>0) { gettimeofday(&tv, &tz); tm=localtime(&tv.tv_sec); fprintf(stop_out, " at %02d:%02d:%02d.%03d", tm->tm_hour, tm->tm_min, tm->tm_sec, (int)(tv.tv_usec+500)/1000); } errormsg[0]=0; svn_strerror (errl, errormsg, sizeof(errormsg)); fprintf(stop_out, ": %s (%d)\n", errormsg[0] ? errormsg : strerror(abs(errl)), errl); } /* Stacktrace */ fputs(" in ", stop_out); fputs(function, stop_out); if (debuglevel) fprintf(stop_out, " [%s:%d]", file, line); if (format) { fputs(": ", stop_out); va_start(va, format); vfprintf(stop_out, format, va); } eol: fputc('\n', stop_out); fflush(stop_out); error_number=errl; already_stopping=1; return errl; } #define _STRINGIFY(x) #x #define STRINGIFY(x) " " #x "=" _STRINGIFY(x) /** For keyword expansion - the version string. */ const char* Version(FILE *output) { static const char Id[] ="$Id: fsvs.c 2457 2011-11-11 18:19:12Z pmarek $"; fprintf(output, "FSVS (licensed under the GPLv3), (C) by Ph. Marek;" " version " FSVS_VERSION "\n"); if (opt__is_verbose()>0) { fprintf(output, "compiled on " __DATE__ " " __TIME__ ", with options:\n\t" #ifdef HAVE_VALGRIND STRINGIFY(HAVE_VALGRIND) #endif #ifdef HAVE_VALGRIND_VALGRIND_H STRINGIFY(HAVE_VALGRIND_VALGRIND_H) #endif #ifdef ENABLE_DEBUG STRINGIFY(ENABLE_DEBUG) #endif #ifdef ENABLE_GCOV STRINGIFY(ENABLE_GCOV) #endif #ifdef ENABLE_RELEASE STRINGIFY(ENABLE_RELEASE) #endif #ifdef HAVE_LOCALES STRINGIFY(HAVE_LOCALES) #endif #ifdef HAVE_UINT32_T STRINGIFY(HAVE_UINT32_T) #endif #ifdef AC_CV_C_UINT32_T STRINGIFY(AC_CV_C_UINT32_T) #endif #ifdef HAVE_LINUX_TYPES_H STRINGIFY(HAVE_LINUX_TYPES_H) #endif #ifdef HAVE_LINUX_UNISTD_H STRINGIFY(HAVE_LINUX_UNISTD_H) #endif #ifdef HAVE_DIRFD STRINGIFY(HAVE_DIRFD) #endif #ifdef HAVE_STRUCT_STAT_ST_MTIM STRINGIFY(HAVE_STRUCT_STAT_ST_MTIM) #endif #ifdef CHROOTER_JAIL STRINGIFY(CHROOTER_JAIL) #endif #ifdef HAVE_COMPARISON_FN_T STRINGIFY(HAVE_COMPARISON_FN_T) #endif #ifdef HAVE_O_DIRECTORY STRINGIFY(HAVE_O_DIRECTORY) #endif #ifdef O_DIRECTORY STRINGIFY(O_DIRECTORY) #endif #ifdef HAVE_LINUX_KDEV_T_H STRINGIFY(HAVE_LINUX_KDEV_T_H) #endif #ifdef ENABLE_DEV_FAKE STRINGIFY(ENABLE_DEV_FAKE) #endif #ifdef DEVICE_NODES_DISABLED STRINGIFY(DEVICE_NODES_DISABLED) #endif #ifdef HAVE_STRSEP STRINGIFY(HAVE_STRSEP) #endif #ifdef HAVE_LUTIMES STRINGIFY(HAVE_LUTIMES) #endif #ifdef HAVE_LCHOWN STRINGIFY(HAVE_LCHOWN) #endif #ifdef WAA_WC_MD5_CHARS STRINGIFY(WAA_WC_MD5_CHARS) #endif #ifdef HAVE_FMEMOPEN STRINGIFY(HAVE_FMEMOPEN) #endif #ifdef ENABLE_DEBUGBUFFER STRINGIFY(ENABLE_DEBUGBUFFER) #endif STRINGIFY(NAME_MAX) "\n"); } return Id; } /** \addtogroup cmds * * \section help * * \code * help [command] * \endcode * * This command shows general or specific \ref help (for the given * command). A similar function is available by using \c -h or \c -? after * a command. * */ /** -. * Prints help for the given action. * */ int ac__Usage(struct estat *root UNUSED, int argc UNUSED, char *argv[]) { int status; int i, hpos, len; char const* const*names; status=0; Version(stdout); /* Show help for a specific command? */ if (argv && *argv) { STOPIF( act__find_action_by_name(*argv, &action), NULL); printf("\n" "Help for command \"%s\".\n", action->name[0]); names=action->name+1; if (*names) { printf("Aliases: "); while (*names) { printf("%s%s", names[0], names[1] ? ", " : "\n"); names++; } } puts(""); puts(action->help_text); } else { /* Print generic help text: list of commands, parameters. */ printf( "\n" "Known commands:\n" "\n "); hpos=2; for(i=0; i= 75) { printf("\n "); hpos=2; } printf("%s%s", action_list[i].name[0], i+1 == action_list_count ? "\n" : ", "); hpos += 2 + len; } puts( "\n" "Parameters:\n" "\n" "-v increase verbosity\n" "-q decrease verbosity (quiet)\n" "\n" "-C checksum possibly changed files;\n" " if given twice checksum *all* files.\n" "\n" "-V show version\n" "\n" "Environment variables:\n" "\n" "$FSVS_CONF defines the location of the FSVS Configuration area\n" " Default is " DEFAULT_CONF_PATH ", but any writeable directory is allowed.\n" "$FSVS_WAA defines the location of the Working copy Administrative Area\n" " Default is " DEFAULT_WAA_PATH ", but any writeable directory is allowed.\n" ); } ex: exit(status); /* Never done */ return 0; } /** USR1 increases FSVS' verbosity. */ void sigUSR1(int num) { if (opt__verbosity() < VERBOSITY_DEFAULT) opt__set_int(OPT__VERBOSE, PRIO_MUSTHAVE, VERBOSITY_DEFAULT); else if (debuglevel < 3) { debuglevel++; DEBUGP("more debugging via SIGUSR1"); } } /** USR2 decreases FSVS' verbosity. */ void sigUSR2(int num) { if (debuglevel) { DEBUGP("less debugging via SIGUSR2"); debuglevel--; } else if (opt__verbosity() >= VERBOSITY_DEFAULT) opt__set_int(OPT__VERBOSE, PRIO_MUSTHAVE, VERBOSITY_QUIET); } /** Handler for SIGPIPE. * We give the running action a single chance to catch an \c EPIPE, to * clean up on open files and similar; if it doesn't take this chance, the * next \c SIGPIPE kills FSVS. */ void sigPipe(int num) { DEBUGP("got SIGPIPE"); signal(SIGPIPE, SIG_DFL); } /** Signal handler for debug binaries. * If the \c configure run included \c --enable-debug, we intercept * \c SIGSEGV and try to start \c gdb. * * We use a pipe to stop the parent process; debugging within gdb normally * starts with a \c bt (backtrace), followed by up 3 to skip over * the signal handler and its fallout. */ #ifdef ENABLE_DEBUG /// FSVS GCOV MARK: sigDebug should not be executed void sigDebug(int num) { char ppid_str[20]; int pid; int pipes[2]; /* if already tried to debug, dump core on next try. */ signal(SIGSEGV, SIG_DFL); /* Try to spew the debug buffer. */ _DEBUGP(NULL, EBUSY, NULL, NULL); /* We use a pipe here for stopping/continuing the active process. * * The child tries to read from the pipe. That blocks. * The parent tries to start gdb. * - If the exec() returns with an error, we simply close * the pipe, and the child re-runs into its SEGV. * - If the exec() works, the child will be debugged. * When gdb exits, the pipe end is closed, so the child * will no longer be blocked. */ pipes[0]=pipes[1]=-1; if ( pipe(pipes) == -1) goto ex; pid=fork(); if (pid == -1) return; if (pid) { /* Parent tries to start gdb for child. * We already have the correct pid. */ close(pipes[0]); sprintf(ppid_str, "%d", pid); execlp("gdb", "gdb", program_name, ppid_str, NULL); close(pipes[1]); exit(1); } else { /* Parent. * Either gdb attaches, or the child interrupts the read with * a signal. */ close(pipes[1]); pipes[1]=-1; read(pipes[0], &pid, 1); } ex: if (pipes[0] != -1) close(pipes[0]); if (pipes[1] != -1) close(pipes[1]); } /** This function is used for the component tests. * When FSVS is run with "-d" as parameter, a call to fileno() is * guaranteed to happen here; a single "up" gets into this stack frame, and * allows easy setting/quering of some values. */ void *_do_component_tests(int a) { /* How not to have them optimized out? */ static int int_array[10]; static void *voidp_array[10]; static char *charp_array_1[10]; static char *charp_array_2[10]; static char **charpp; static char buffer[1024]; static struct estat *estat_array[10]; int_array[0]=fileno(stdin); voidp_array[0]=stdin+fileno(stdout); buffer[0]=fileno(stderr); charpp=charp_array_2+4; switch(a) { case 4: return int_array; case 9: return voidp_array; case 6: return buffer; case 2: return charp_array_1; case 3: return estat_array; case 7: return charpp; case 8: return charp_array_2; } return NULL; } #endif /** The main function. * * It does the following things (not in that order): * - Initializes the various parts * - APR (apr_initialize()), * - WAA (waa__init()), * - RA (svn_ra_initialize()), * - Callback functions (cb__init()) ... * - Local charset (\c LC_CTYPE) * - Processes the command line. In glibc the options are reordered to the * front; on BSD systems this is not done, so there's an extra loop to do * that manually. * We want all options processed, and then the paths (or other parameters) * in a clean list. * - And calls the main action. * *

How the parameters get mungled - example

* * On entry we have eg. this: * \dot * digraph { * rankdir=LR; * node [shape=rectangle, fontsize=10]; * * parm [shape=record, * label="{ { <0>fsvs | <1>update | <2>-u | <3>baseinstall | <4>/bin }}"] * * list [shape=record, * label="{ args | { <0>0 | <1>1 | <2>2 | <3>3 | <4>4 | NULL } }" ]; * * list:0:e -> parm:0:w; * list:1:e -> parm:1:w; * list:2:e -> parm:2:w; * list:3:e -> parm:3:w; * list:4:e -> parm:4:w; * * N1 [label="NULL", shape=plaintext]; * N2 [label="NULL", shape=plaintext]; * url__parm_list -> N1; * * program_name -> N2; * } * \enddot * * After command line parsing we have: * \dot * digraph { * rankdir=LR; * node [shape=rectangle, fontsize=10]; * * parm [shape=record, * label="{ { <0>fsvs | <1>update | <2>-u | <3>baseinstall | <4>/bin }}"] * * list [shape=record, * label="{ args | { <0>0 | <1>1 | NULL | <3>3 | <4>4 | NULL }}" ]; * * list:0:e -> parm:1:w; * list:1:e -> parm:4:w; * list:3:e -> parm:3:w; * list:4:e -> parm:4:w; * * program_name -> parm:0:w; * * ulist [shape=record, * label="{ url__parm_list | { <0>0 | NULL } }" ]; * ulist:0:e -> parm:3:w; * } * \enddot * * *

Argumentation for parsing the urllist

* * I'd have liked to keep the \ref url__parm_list in the original \a args * as well; but we cannot easily let it start at the end, and putting it * just after the non-parameter arguments * - might run out of space before some argument (because two extra \c NULL * pointers are needed, and only a single one is provided on startup), * - and we'd have to move the pointers around every time we find a * non-option argument. * * Consider the case [fsvs, update, /bin/, -uURLa, -uURLb, /lib, * NULL]. * That would be transformed to * -# [update, NULL, /bin/, -uURLa, -uURLb, /lib, NULL] * -# [update, /bin/, NULL, -uURLa, -uURLb, /lib, NULL] * -# And now we'd have to do [update, /bin/, NULL, -uURLa, NULL, * -uURLb, /lib, NULL]; this is too long. * We could reuse the \c NULL at the end ... but that's not that fine, * either -- the \ref url__parm_list wouldn't be terminated. * * So we go the simple route - allocate an array of pointers, and store * them there. * * */ int main(int argc, char *args[], char *env[]) { struct estat root = { }; int status, help; char *cmd; svn_error_t *status_svn; int eo_args, i; void *mem_start, *mem_end; help=0; eo_args=1; environ=env; program_name=args[0]; #ifdef ENABLE_DEBUG /* If STDOUT and STDIN are on a terminal, we could possibly be * debugged. Depends on "configure --enable-debug". */ if (isatty(STDIN_FILENO) && isatty(STDOUT_FILENO)) signal(SIGSEGV, sigDebug); /* Very early debugging */ cmd=getenv(FSVS_DEBUG_ENV); if (cmd) debuglevel = atoi(cmd); #endif signal(SIGPIPE, sigPipe); signal(SIGUSR1, sigUSR1); signal(SIGUSR2, sigUSR2); mem_start=sbrk(0); #ifdef HAVE_LOCALES /* Set the locale from the environment variables, so that we get the * correct codeset told. * Do that while still in the chroot jail. */ cmd=setlocale(LC_ALL, ""); DEBUGP("LC_ALL gives %s", cmd); /* The second call is in case that the above fails. * Sometimes eg. LC_PAPER is set to an invalid value; then the first * call fails, but the seconds succeeds. * See also the fsvs dev@ mailing list (April 2006), where a post * to dev@subversion is referenced */ cmd=setlocale(LC_CTYPE, ""); DEBUGP("LC_CTYPE gives %s", cmd); local_codeset=nl_langinfo(CODESET); if (!local_codeset) { STOPIF( wa__warn(WRN__CHARSET_INVALID, EINVAL, "Could not retrieve the current character set - assuming UTF-8."), "nl_langinfo(CODESET) failed - check locale configuration."); } else { DEBUGP("codeset found to be %s", local_codeset); if (strcmp(local_codeset, "UTF-8")==0) /* man page says "This pointer MAY point to a static buffer ..." * so no freeing. */ local_codeset=NULL; } if (!local_codeset) DEBUGP("codeset: using identity"); #else DEBUGP("build without locales"); #endif /* Are we running in a chroot jail? Try to preload libraries, and escape. * * Originally there was an "#ifdef CHROOTER_JAIL" around this line. * But if this is compiled in unconditionally a precompiled binary of * your favourite distribution should work, too! (So there could be a * script that fetches all needed libraries out of eg. debian-testing and * prepares a chroot for you. Any volunteers ;-? * * As this function keeps quiet if \b no needed environment variable is * set it's just a small bit of additional work. */ STOPIF( hlp__chrooter(), NULL); /* Load options from environment variables. */ STOPIF( opt__load_env(environ), NULL); STOPIF( waa__save_cwd(&start_path, &start_path_len, 0), NULL); if (!isatty(STDOUT_FILENO)) opt__set_int( OPT__STATUS_COLOR, PRIO_PRE_CMDLINE, 0); /* direct initialization doesn't work because * of the anonymous structures */ root.repos_rev=0; root.name=root.strings="."; root.st.size=0; root.st.mode=S_IFDIR | 0700; root.entry_count=0; /* The problem is that the root entry is never done explicitly; so we * have to hard-code that here; but it is the default for all entries * anyway. */ root.do_filter_allows=1; root.do_filter_allows_done=1; while (1) { /* The GNU version of getopt re-orders the parameters and looks * after non-options too; the BSD versions do not. * So we have to look ourselves whether there's a -- or * end-of-arguments. * And we reorder to the front of the array, so that the various * parts can parse their data. */ status=getopt(argc, args, "+a:VhdvCm:F:D:qf:r:W:NRo:u:?"); if (status == -1) { DEBUGP("no argument at optind=%d of %d",optind, argc); /* End of array or -- found? */ if (optind == argc) break; /* Note that this is safe - the program name is always at * front (optind starts with 1), so it's never negative */ if (strcmp("--", args[optind-1])==0) { /* Copy the rest of the arguments and stop processing */ while (optind < argc) args[eo_args++] = args[optind++]; break; } /* Normal argument (without "-") found, put to front. */ args[eo_args++]=args[optind++]; continue; } switch (status) { case '?': case 'h': default: help=1; break; case 'W': STOPIF( wa__set_warn_option(optarg, PRIO_CMDLINE), "Warning option '%s' is invalid", optarg); break; case 'C': i = hlp__rightmost_0_bit(opt__get_int(OPT__CHANGECHECK)); opt__set_int(OPT__CHANGECHECK, PRIO_CMDLINE, opt__get_int(OPT__CHANGECHECK) | i); break; case 'o': STOPIF( opt__parse( optarg, NULL, PRIO_CMDLINE, 0), "!Cannot parse option string '%s'.", optarg); break; case 'f': STOPIF( opt__parse_option(OPT__FILTER, PRIO_CMDLINE, optarg), NULL); break; case 'u': /* Some functions want URLs \b and some filename parameter (eg. * update, to define the wc base), and we have to separate them. * * As update will take an arbitrary number of URLs and filenames, * there's no easy way to tell them apart; and we have to do that * _before_ we know the wc base, so we cannot just try to lstat() * them; and we know the URLs (and their names) only _after- we * know the wc root. So we would have to guess here. */ STOPIF( url__store_url_name(optarg), NULL); break; /* Maybe we should warn if -R or -N are used for commands that * don't use them? Or, better yet, use them everywhere (where * applicable). */ case 'R': opt_recursive++; break; case 'N': opt_recursive--; break; case 'F': if (opt_commitmsg) ac__Usage_this(); opt_commitmsgfile=optarg; break; case 'm': if (opt_commitmsgfile) ac__Usage_this(); opt_commitmsg=optarg; break; case 'r': /* strchrnul is GNU only - won't work on BSD */ cmd=strchr(optarg, ':'); if (cmd) *(cmd++)=0; STOPIF( hlp__parse_rev(optarg, NULL, &opt_target_revision), NULL); opt_target_revisions_given=1; /* Don't try to parse further if user said "-r 21:" */ if (cmd && *cmd) { STOPIF( hlp__parse_rev(cmd, NULL, &opt_target_revision2), NULL); opt_target_revisions_given=2; } break; #if ENABLE_RELEASE case 'D': case 'd': fprintf(stderr, "This image was compiled as a release " "(without debugging support).\n" "-d and -D are not available.\n\n"); exit(1); #else case 'D': opt_debugprefix=optarg; if (!debuglevel) debuglevel++; /* Yes, that could be merged with the next lines ... :*/ break; case 'd': /* Debugging wanted. * If given twice, any debugbuffer and debug_output settings are * overridden, to get clear, find debug outputs directly to the * console. */ if (debuglevel == 1) { /* Close any redirection or memory buffer that might already be * used. */ _DEBUGP(NULL, 0, NULL, NULL); /* Manual override. */ opt__set_string(OPT__DEBUG_OUTPUT, PRIO_MUSTHAVE, NULL); opt__set_int(OPT__DEBUG_BUFFER, PRIO_MUSTHAVE, 0); /* Now we're going directly. */ DEBUGP("Debugging set to unfiltered console"); } debuglevel++; break; #endif case 'q': /* -q gives a default level, or, if already there, goes one level * down. * If we would like to remove one bit after another from the * bitmask, we could use * i=opt__get_int(OPT__VERBOSE); * if (i < VERBOSITY_DEFAULT) i=0; * else i &= ~hlp__rightmost_0_bit(~i); * opt__set_int(OPT__VERBOSE, PRIO_CMDLINE, i); * (similar to the 'v' case) */ opt__set_int(OPT__VERBOSE, PRIO_CMDLINE, opt__verbosity() <= VERBOSITY_QUIET ? VERBOSITY_VERYQUIET : VERBOSITY_QUIET); break; case 'v': /* A bit more details. */ i=opt__get_int(OPT__VERBOSE); if (i == VERBOSITY_QUIET) i=VERBOSITY_DEFAULT; else i |= hlp__rightmost_0_bit(i); opt__set_int(OPT__VERBOSE, PRIO_CMDLINE, i); /* If not overridden by the commandline explicitly */ opt__set_int(OPT__FILTER, PRIO_PRE_CMDLINE, FILTER__ALL); break; case 'V': Version(stdout); exit(0); } } /* Now limit the number of arguments to the ones really left */ argc=eo_args; /* Set delimiter */ args[argc]=NULL; /* Say we're starting with index 1, to not pass the program name. */ optind=1; /* Special case: debug buffer means capture, but don't print normally. */ if (opt__get_int(OPT__DEBUG_BUFFER) && opt__get_prio(OPT__DEBUG_BUFFER)==PRIO_CMDLINE && !debuglevel) { debuglevel++; DEBUGP("debug capturing started by the debug_buffer option."); } /* first non-argument is action */ if (args[optind]) { cmd=args[optind]; optind++; STOPIF( act__find_action_by_name(cmd, &action), NULL); if (help) ac__Usage_this(); } else { if (help) ac__Usage_dflt(); action=action_list+0; } DEBUGP("optind=%d per_sts=%d action=%s rec=%d filter=%s verb=0x%x", optind, (int)sizeof(root), action->name[0], opt_recursive, st__status_string_fromint( opt__get_int(OPT__FILTER)), opt__verbosity()); for(eo_args=1; eo_argswork(&root, argc-optind, args+optind), "action %s failed", action->name[0]); /* Remove copyfrom records in the database, if any to do. */ STOPIF( cm__get_source(NULL, NULL, NULL, NULL, status), NULL); /* Maybe we should try that even if we failed? * Would make sense in that the warnings might be helpful in determining * the cause of a problem. * But the error report would scroll away, so we don't do that. */ STOPIF( wa__summary(), NULL); STOPIF( url__close_sessions(), NULL); ex: mem_end=sbrk(0); DEBUGP("memory stats: %p to %p, %llu KB", mem_start, mem_end, (t_ull)(mem_end-mem_start)/1024); if (status == -EPIPE) { DEBUGP("got EPIPE, ignoring."); status=0; } _DEBUGP(NULL, status, NULL, NULL); if (status) return 2; return 0; } /* vim: set cinoptions=*200 fo-=a : */ fsvs-1.2.6/src/racallback.c0000644000202400020240000005676212467104255014507 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * Callback functions, and cb__record_changes() editor. * */ #include #include #include #include #include #include #include #include "global.h" #include "helper.h" #include "update.h" #include "est_ops.h" #include "checksum.h" #include "status.h" #include "cache.h" #include "url.h" #include "racallback.h" svn_error_t *cb__init(apr_pool_t *pool) { int status; svn_error_t *status_svn; apr_hash_t *cfg_hash; svn_config_t *cfg; char *cfg_usr_path; cfg_usr_path = NULL; STOPIF( hlp__get_svn_config(&cfg_hash), NULL); cfg = apr_hash_get(cfg_hash, SVN_CONFIG_CATEGORY_CONFIG, APR_HASH_KEY_STRING); /* make sure that folders for storing authentications credentials are created */ STOPIF_SVNERR( svn_config_ensure, (cfg_usr_path, pool)); /* Set up Authentication stuff. */ STOPIF_SVNERR( svn_cmdline_setup_auth_baton, (&cb__cb_table.auth_baton, !(isatty(STDIN_FILENO) && isatty(STDOUT_FILENO)), opt__get_int(OPT__AUTHOR) ? opt__get_string(OPT__AUTHOR) : NULL, opt__get_int(OPT__PASSWD) ? opt__get_string(OPT__PASSWD) : NULL, cfg_usr_path, 0, /* no_auth_cache */ cfg, NULL, /* cancel function */ NULL, /* cancel baton */ pool) ); BUG_ON(!cb__cb_table.auth_baton); ex: RETURN_SVNERR(status); } /*---------------------------------------------------------------------------- * RA-layer callback functions *--------------------------------------------------------------------------*/ /// FSVS GCOV MARK: cb__open_tmp should not be executed /** This function has to be defined, but gets called only with * http-URLs. */ svn_error_t *cb__open_tmp (apr_file_t **fp, void *callback_baton, apr_pool_t *pool) { int status; STOPIF( waa__get_tmp_name( NULL, NULL, fp, pool), NULL); ex: RETURN_SVNERR(status); } struct svn_ra_callbacks_t cb__cb_table= { .open_tmp_file = cb__open_tmp, .auth_baton = NULL, .get_wc_prop = NULL, .set_wc_prop=NULL, .push_wc_prop=NULL, .invalidate_wc_props=NULL, }; /*---------------------------------------------------------------------------- * \defgroup changerec Change-Recorder * An editor which simply remembers which entries are changed. * @{ *--------------------------------------------------------------------------*/ svn_revnum_t cb___dest_rev; /** A txdelta consumer which ignores the data. */ svn_error_t *cb__txdelta_discard(svn_txdelta_window_t *window UNUSED, void *baton UNUSED) { return NULL; } /** If \a may_create is \c 0, \c ENOENT may be returned (ie. was not * found). * * If \a mode doesn't include some permission bits, like \c 0700 or \c * 0600, a default value is chosen. * * If it didn't exist, or if this is a higher priority URL, the parents get * FS_CHILD_CHANGED set. * * \a path gets set (if not \c NULL) to \a utf8_path in local encoding. */ int cb__add_entry(struct estat *dir, const char *utf8_path, char **loc_path, const char *utf8_copy_path, svn_revnum_t copy_rev, int mode, int *has_existed, int may_create, void **new) { int status; struct estat *sts; char *filename; char* path; char* copy_path; int overwrite; struct sstat_t st; overwrite=0; STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); if (loc_path) *loc_path=path; STOPIF( hlp__utf82local(utf8_copy_path, ©_path, -1), NULL ); STOPIF_CODE_ERR(copy_path, EINVAL, "don't know how to handle copy_path %s@%ld", copy_path, copy_rev); /* The path should be done by open_directory descending. * We need only the file name. */ filename = ops__get_filename(path); STOPIF( ops__find_entry_byname(dir, filename, &sts, 0), "cannot lookup entry %s", path); DEBUGP("entry %s, mode 0%03o; %sfound, may %screate", path, mode, sts ? "" : "not ", may_create ? "" : "not "); if (sts) { if (has_existed) *has_existed=EEXIST; if (!url__current_has_precedence(sts->url)) goto no_change; /* This file already exists, or an update from another URL just * brought it in. * * The caller knows whether we should overwrite it silently. */ if (sts->remote_status & FS_REMOVED) { STOPIF( ops__make_shadow_entry(sts, SHADOWED_BY_REMOTE), NULL); overwrite=1; } } else { STOPIF_CODE_ERR(!may_create, ENOENT, NULL); STOPIF( ops__allocate(1, &sts, NULL), NULL); memset(sts, 0, sizeof(*sts)); /* To avoid the memory allocator overhead we would have to do our own * memory management here - eg. using dir->string. * But that would have to be tuned for performance - we get here often. * TODO. * */ STOPIF( hlp__strdup(&sts->name, filename), NULL); sts->remote_status = FS_NEW; overwrite=1; /* Put into tree. */ STOPIF( ops__new_entries(dir, 1, &sts), NULL); /* Directory got changed. */ dir->remote_status |= FS_CHANGED; if (has_existed) *has_existed=0; } if (overwrite) { sts->parent=dir; /* This memset above implicitly clears all other references to the copy * data - entry_count, by_inode, by_name, strings. * But we need the copy itself. */ sts->entry_count=0; sts->by_inode=NULL; sts->by_name=NULL; sts->strings=NULL; sts->decoder=NULL; sts->has_orig_md5=0; memset(& sts->md5, 0, sizeof(sts->md5)); memset(& sts->st, 0, sizeof(sts->st)); /* Some permission bits must be set; suid/sgid/sticky are not enough. * Directories need an "x" bit, too. * */ if (!(mode & 0777)) mode |= S_ISDIR(mode) ? 0700 : 0600; /* Until we know better */ sts->st.mode = mode; /* Default is current time. */ time( & sts->st.mtim.tv_sec ); /* To avoid EPERM on chmod() etc. */ sts->st.uid=getuid(); sts->st.gid=getgid(); } sts->url=current_url; ops__mark_parent_cc(sts, remote_status); /* If it's a new entry or not, we set the current type. */ sts->new_rev_mode_packed = mode ? MODE_T_to_PACKED(mode) : sts->old_rev_mode_packed; if (sts->local_mode_packed == S_IFUNDEF) { /* We want to know which type it is locally. * We trust the filename we get from subversion. */ status=hlp__lstat(path, &st); if (status == ENOENT) { /* sts->local_mode_packed is still 0(invalid), because of the * memset() above. */ } else if (!status || status == -ENOENT) { /* That works for normal and garbage entries. */ sts->local_mode_packed=MODE_T_to_PACKED(st.mode); } else { /* We have to ignore errors here, as we might add eg. * 'sub1/sub2/entry', but locally 'sub1' is a file - and then we'd get * ENOTDIR. */ } /* No error to be delivered. */ status=0; } DEBUGP("%s is locally a %s", path, st__type_string(PACKED_to_MODE_T(sts->local_mode_packed))); no_change: /* Even if this entry has lower priority, we have to have a baton for it. * It may be a directory, and subentries might be visible. */ *new = sts; ex: return status; } inline int cb___store_prop(struct estat *sts, const char *utf8_name, const svn_string_t *value, apr_pool_t *pool) { int status; int user_prop; apr_pool_t *u_p_pool; char *copy; #ifdef DEBUG static long u_p_count=0, u_p_bytes=0; #endif status=0; if (!url__current_has_precedence(sts->url)) goto ex; user_prop=0; STOPIF( up__parse_prop(sts, utf8_name, value, &user_prop, pool), NULL); ops__mark_parent_cc(sts, remote_status); DEBUGP("have name=%s; user? %d", utf8_name, user_prop); if (action->keep_user_prop && user_prop) { if (!sts->user_prop) { /* The root entry has no associated URL, so it has no pool. * Normally there shouldn'd be any user-properties, though. */ STOPIF( apr_pool_create(&u_p_pool, sts->url ? sts->url->pool : global_pool), NULL); sts->user_prop=apr_hash_make(u_p_pool); apr_hash_set(sts->user_prop, "", 0, u_p_pool); } else u_p_pool=apr_hash_get(sts->user_prop, "", 0); /* apr_hash_set() only stores an address; we have to take care to not * loose the length and data, because the pool they're in might be * invalid after closing this entry. */ copy=apr_palloc(u_p_pool, strlen(utf8_name)+1); strcpy(copy, utf8_name); apr_hash_set(sts->user_prop, copy, APR_HASH_KEY_STRING, svn_string_dup(value, u_p_pool) ); #ifdef ENABLE_DEBUG u_p_count++; u_p_bytes += value->len + sizeof(char*) + sizeof(*value); DEBUGP("%lu user-props stored, with %lu bytes.", u_p_count, u_p_bytes); #endif } ex: return status; } svn_error_t *cb___set_target_revision(void *edit_baton, svn_revnum_t rev, apr_pool_t *pool) { int status; struct estat *root UNUSED=edit_baton; status=0; DEBUGP("setting revision to %llu", (t_ull)rev); cb___dest_rev=rev; RETURN_SVNERR(status); } svn_error_t *cb___open_root(void *edit_baton, svn_revnum_t base_revision, apr_pool_t *dir_pool UNUSED, void **root_baton) { struct estat *sts=edit_baton; *root_baton=sts; return SVN_NO_ERROR; } svn_error_t *cb___delete_entry(const char *utf8_path, svn_revnum_t revision UNUSED, void *parent_baton, apr_pool_t *pool) { int status; struct estat *dir=parent_baton; struct estat *sts; char* path; int chg; STOPIF( hlp__utf82local(utf8_path, &path, -1), NULL ); STOPIF( ops__find_entry_byname(dir, path, &sts, 0), NULL); if (sts) { DEBUGP("deleting entry %s", path); ops__mark_parent_cc(sts, remote_status); STOPIF( cb__remove_from_url(sts, current_url, &chg), NULL); } else { DEBUGP("entry %s not found!", path); /** \todo conflict? */ } ex: RETURN_SVNERR(status); } svn_error_t *cb___add_directory(const char *utf8_path, void *parent_baton, const char *utf8_copy_path, svn_revnum_t copy_rev, apr_pool_t *dir_pool, void **child_baton) { struct estat *dir=parent_baton; struct estat *sts; int status; int has_existed; STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, copy_rev, S_IFDIR, &has_existed, 1, child_baton), NULL ); sts=*child_baton; if (!has_existed) { /* Initialize the directory-specific data. * If this was a file before, it may have old values in the * shared storage space. */ sts->entry_count=0; sts->by_inode = sts->by_name = NULL; sts->strings = NULL; sts->other_revs=sts->to_be_sorted=0; } ex: RETURN_SVNERR(status); } svn_error_t *cb___open_directory(const char *utf8_path, void *parent_baton, svn_revnum_t base_revision UNUSED, apr_pool_t *dir_pool, void **child_baton) { struct estat *dir=parent_baton; int status; /** \todo conflict - removed locally? added */ STOPIF( cb__add_entry(dir, utf8_path, NULL, NULL, 0, S_IFDIR, NULL, 0, child_baton), NULL); ex: RETURN_SVNERR(status); } svn_error_t *cb___change_dir_prop(void *dir_baton, const char *utf8_name, const svn_string_t *value, apr_pool_t *pool) { int status; /* We do this additional call to get a meaningful backtrace. */ STOPIF( cb___store_prop(dir_baton, utf8_name, value, pool), NULL); ex: RETURN_SVNERR(status); } int cb___close(struct estat *sts) { int status; status=0; sts->repos_rev = cb___dest_rev; if (action->repos_feedback) STOPIF( action->repos_feedback(sts), NULL); ex: return status; } svn_error_t *cb___close_directory( void *dir_baton, apr_pool_t *pool) { struct estat *sts=dir_baton; int status; /* Release some memory; that was likely needed by cb__add_entry(), but is no * longer. */ IF_FREE(sts->by_name); STOPIF( cb___close(sts), NULL); ex: RETURN_SVNERR(status); } /// FSVS GCOV MARK: cb___absent_directory should not be executed svn_error_t *cb___absent_directory(const char *utf8_path, void *parent_baton, apr_pool_t *pool) { struct estat *dir UNUSED =parent_baton; DEBUGP("in %s", __PRETTY_FUNCTION__); return SVN_NO_ERROR; } svn_error_t *cb___add_file(const char *utf8_path, void *parent_baton, const char *utf8_copy_path, svn_revnum_t copy_rev, apr_pool_t *file_pool, void **file_baton) { struct estat *dir=parent_baton; struct estat *sts; int status; /* Unless we get the svn:special property, we can assume that it's a * regular file. */ STOPIF( cb__add_entry(dir, utf8_path, NULL, utf8_copy_path, copy_rev, S_IFREG, NULL, 1, file_baton), NULL); sts=*file_baton; ex: RETURN_SVNERR(status); } svn_error_t *cb___open_file(const char *utf8_path, void *parent_baton, svn_revnum_t base_revision, apr_pool_t *file_pool, void **file_baton) { struct estat *dir=parent_baton; struct estat *sts; int status; int was_there; /* Do we get an prop-del for "svn:special", if the entry reverts to being * a file? * * We don't get "svn:special" for an entry that's returned at the same * revision as we reported it, so we wouldn't know that it's eg. a * symlink. * * Keep the same type, unless we're being told otherwise. */ STOPIF( cb__add_entry(dir, utf8_path, NULL, NULL, 0, 0, &was_there, 0, file_baton), NULL); sts=(struct estat*)*file_baton; /* Get the old value, so that we know what we had even if we don't get it * reported again. */ if (was_there) STOPIF( up__fetch_decoder(sts), NULL); sts->decoder_is_correct=1; ex: RETURN_SVNERR(status); } svn_error_t *cb___apply_textdelta(void *file_baton, const char *base_checksum UNUSED, apr_pool_t *pool UNUSED, svn_txdelta_window_handler_t *handler, void **handler_baton) { struct estat *sts UNUSED=file_baton; int status; status=0; if (url__current_has_precedence(sts->url)) ops__mark_changed_parentcc(sts, remote_status); *handler = cb__txdelta_discard; *handler_baton=sts; RETURN_SVNERR(status); } svn_error_t *cb___change_file_prop(void *file_baton, const char *utf8_name, const svn_string_t *value, apr_pool_t *pool) { int status; /* We do this additional call to get a meaningful backtrace. */ STOPIF( cb___store_prop(file_baton, utf8_name, value, pool), NULL); ex: RETURN_SVNERR(status); } svn_error_t *cb___close_file(void *file_baton, const char *text_checksum, apr_pool_t *pool) { struct estat *sts=file_baton; int status; STOPIF( cb___close(sts), NULL); if (!S_ISDIR(sts->st.mode)) { if (sts->has_orig_md5 || sts->decoder) DEBUGP("Has an original MD5, %s not used", text_checksum); else if (text_checksum) STOPIF( cs__char2md5(text_checksum, NULL, sts->md5 ), NULL); } ex: RETURN_SVNERR(status); } /// FSVS GCOV MARK: cb___absent_file should not be executed svn_error_t *cb___absent_file(const char *utf8_path, void *parent_baton, apr_pool_t *pool) { struct estat *dir UNUSED=parent_baton; DEBUGP("in %s", __PRETTY_FUNCTION__); return SVN_NO_ERROR; } svn_error_t *cb___close_edit(void *edit_baton, apr_pool_t *pool UNUSED) { int status; struct estat *root UNUSED=edit_baton; status=0; /* For sync-repos the root was printed with a close_directory call, and * others print it in rev__do_changed(). */ RETURN_SVNERR(status); } /// FSVS GCOV MARK: cb___abort_edit should not be executed svn_error_t *cb___abort_edit(void *edit_baton, apr_pool_t *pool UNUSED) { struct estat *sts UNUSED=edit_baton; return SVN_NO_ERROR; } const svn_delta_editor_t cb___change_recorder = { .set_target_revision = cb___set_target_revision, .open_root = cb___open_root, .delete_entry = cb___delete_entry, .add_directory = cb___add_directory, .open_directory = cb___open_directory, .change_dir_prop = cb___change_dir_prop, .close_directory = cb___close_directory, .absent_directory = cb___absent_directory, .add_file = cb___add_file, .open_file = cb___open_file, .apply_textdelta = cb___apply_textdelta, .change_file_prop = cb___change_file_prop, .close_file = cb___close_file, .absent_file = cb___absent_file, .close_edit = cb___close_edit, .abort_edit = cb___abort_edit, }; /** @} */ int cb___report_path_rev(struct estat *dir, const svn_ra_reporter2_t *reporter, void *report_baton, apr_pool_t *pool) { int status, i; struct estat *sts; svn_error_t *status_svn; char *fn; status=0; for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; STOPIF( ops__build_path(&fn, sts), NULL ); /* We have to cut the "./" in front. */ /* We report the directories' revision too. */ /* As we're doing the children of a directory, there must always * be a parent. */ /* \todo: the parent might be from another URL. What should we do? */ if ( sts->repos_rev != sts->parent->repos_rev) { DEBUGP("reporting %s at %llu", fn, (t_ull)sts->repos_rev); STOPIF_SVNERR( reporter->set_path, (report_baton, fn+2, sts->repos_rev, 0, "", pool)); } if (S_ISDIR(sts->st.mode) && sts->other_revs) { STOPIF( cb___report_path_rev(sts, reporter, report_baton, pool), NULL); } } ex: return status; } /** Helper function for cb__remove_from_url(). * * Returns the highest-priority URL that's used by an entry below \a sts * and which has a lower priority than \a to_remove in \a hp; this must be * initialized to \c NULL before calling. */ int cb___remover(struct estat *sts, struct url_t *to_remove, struct url_t **hp, int *has_changes) { int status; struct estat **list; struct url_t *hp_url; int child_changes; status=0; DEBUGP("clean tree %s url %s", sts->name, to_remove->name); if (ops__has_children(sts)) { hp_url=NULL; child_changes=0; list=sts->by_inode; while (*list) { STOPIF( cb___remover(*list, to_remove, &hp_url, &child_changes), NULL); list++; } if (sts->parent && hp_url) { /* It's an error if any child has a higher priority URL than the * parent, unless this gets removed now. */ BUG_ON(sts->url != to_remove && url__sorter(hp_url, sts->url) < 0); sts->url=hp_url; } } if (!sts->parent) { } else { DEBUGP("entry %s has url %s", sts->name, sts->url->name); if (sts->url == to_remove) { DEBUGP("really removing"); sts->remote_status=FS_REMOVED; ops__mark_changed_parentcc(sts, remote_status); *has_changes=1; if (action->repos_feedback) STOPIF( action->repos_feedback(sts), NULL); } else { if (!*hp) *hp=sts->url; else if (url__sorter(sts->url, *hp) < 0) *hp=sts->url; DEBUGP("New hp %s", (*hp)->name); } } if (child_changes) sts->remote_status |= FS_CHILD_CHANGED; ex: return status; } /** -. * While recursion we look for the highest priority URL in the children * (within each level); if there is one, we mark the directory as belonging * to that URL. * * Will be easier with mixed-WC operation; currently it's not correct if * there are overlayed non-directory entries. * */ int cb__remove_from_url(struct estat *root, struct url_t *to_remove, int *was_changed) { struct url_t *nevermind; int status; *was_changed=0; STOPIF( cb___remover(root, to_remove, &nevermind, was_changed), NULL); to_remove->current_rev=0; ex: return status; } /** -. */ int cb__remove_url(struct estat *root, struct url_t *to_remove) { int status; struct url_t *nevermind; int vvoid; STOPIF( cb___remover(root, to_remove, &nevermind, &vvoid), NULL); to_remove->current_rev=0; url__must_write_defs=1; ex: return status; } /** -. * Just a proxy; calls cb__record_changes_mixed() with the \a root, \a target * and \a pool, and default values for the rest. */ int cb__record_changes(struct estat *root, svn_revnum_t target, apr_pool_t *pool) { int status; STOPIF( cb__record_changes_mixed(root, target, NULL, 0, pool), NULL); ex: return status; } /** -. * Calls the svn libraries and records which entries would be changed * on this update on \c current_url. * \param root The root entry of this wc tree * \param target The target revision. \c SVN_INVALID_REVNUM is not valid. * \param other_paths A \c NULL-terminated list of paths that are sent to * the svn_ra_reporter2_t::set_path(). * \param other_revs The revision to be sent for \a other_paths. * \param pool An APR-pool. * * When a non-directory entry gets replaced by a directory, its * MD5 is lost (because the directory is initialized to * \c entry_count=0 , \c by_inode=by_name=NULL ); that should not matter, * since we have modification flags in \c entry_status . * * If a non-directory gets replaced by a directory, \c entry_count and * \c by_inode are kept - we need them for up__rmdir() to remove * known child entries. * * Please note that it's not possible to run \e invisible entries (that are * not seen because some higher priority URL overlays them) to run as \c * baton==NULL (although that would save quite a bit of * url__current_has_precedence() calls), because it's possible that some * file in a directory below can be seen. * * \a other_paths is a \c NULL -terminated list of pathnames (which may * have the \c "./" in front, ie. the \e normalized paths) that are to be * reported at revision \a other_revs. * * If \a other_paths is \c NULL, or doesn't include an "." entry, * the WC root is reported to be at \c current_url->current_rev or, if this * is \c 0, to be at \a target, but empty. * */ int cb__record_changes_mixed(struct estat *root, svn_revnum_t target, char *other_paths[], svn_revnum_t other_revs, apr_pool_t *pool) { int status; svn_error_t *status_svn; void *report_baton; const svn_ra_reporter2_t *reporter; int sent_wcroot; char *cur, **op; status=0; cb___dest_rev=target; STOPIF_SVNERR( svn_ra_do_status, (current_url->session, &reporter, &report_baton, "", target, TRUE, &cb___change_recorder, root, pool) ); sent_wcroot=0; cur=NULL; op=NULL; if (other_paths) { op=other_paths; while ( (cur=*op) ) { if (cur[0] == '.' && cur[1] == 0) break; op++; } } /* If this is a checkout, we need to set the base directory at HEAD, but * empty. We cannot use the base at revision 0, because it probably didn't * exist there. */ if (cur) STOPIF_SVNERR( reporter->set_path, (report_baton, "", other_revs, FALSE, NULL, pool)); else if (current_url->current_rev == 0) STOPIF_SVNERR( reporter->set_path, (report_baton, "", target, TRUE, NULL, pool)); else STOPIF_SVNERR( reporter->set_path, (report_baton, "", current_url->current_rev, FALSE, NULL, pool)); if (other_paths) { /* The root entry must be the first to be reported (because of * subversion/libsvn_repos/reporter.c). * So we have to loop through the list - in case the user does * "fsvs diff file ." * or something like that. */ while ( (cur=*other_paths) ) { /* cur loops through the entries, but *op is still set. */ if (op != other_paths) { DEBUGP("reporting %s@%llu", cur, (t_ull)other_revs); if (cur[0] == '.' && cur[1] == PATH_SEPARATOR) cur+=2; STOPIF_SVNERR( reporter->set_path, (report_baton, cur, other_revs, FALSE, NULL, pool)); } other_paths++; } } DEBUGP("Getting changes from %llu to %llu", (t_ull)current_url->current_rev, (t_ull)target); #if 0 STOPIF( cb___report_path_rev( root, reporter, report_baton, pool), NULL); #endif STOPIF_SVNERR( reporter->finish_report, (report_baton, global_pool)); current_url->current_rev=cb___dest_rev; ex: return status; } /** -. * We need a valid revision number, \c SVN_INVALID_REVNUM (for \c HEAD) * isn't. */ int cb__does_path_exist(svn_ra_session_t *session, char *path, svn_revnum_t rev, int *exists, apr_pool_t *pool) { int status; svn_dirent_t *dirent; svn_error_t *status_svn; status=0; STOPIF_SVNERR( svn_ra_stat, (session, path, rev, &dirent, pool)); *exists = dirent != NULL; ex: return status; } fsvs-1.2.6/src/resolve.c0000644000202400020240000001100011104767264014061 0ustar marekmarek/************************************************************************ * Copyright (C) 2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include "global.h" #include "est_ops.h" #include "helper.h" #include "url.h" #include "status.h" #include "resolve.h" #include "waa.h" #include "hash_ops.h" #include "actions.h" /** \file * The \ref resolve command source file. * * */ /** \addtogroup cmds * * \section resolve * * \code * fsvs resolve PATH [PATH...] * \endcode * * When FSVS tries to update local files which have been changed, a * conflict might occur. (For various ways of handling these please see the * \ref o_conflict "conflict" option.) * * This command lets you mark such conflicts as resolved. */ /** -. * * The conflict flag \b must be set by this function, so that it knows * whether it has to purge any (wrongly) pre-existing \ref cflct file or to * just append. * */ int res__mark_conflict(struct estat *sts, ...) { int status; char *filename; va_list va; int filehdl; int len; struct iovec io[2] = { { 0 }, { .iov_base="\n", .iov_len=1 } }; status=0; va_start(va, sts); filehdl=-1; STOPIF( ops__build_path(&filename, sts), NULL); STOPIF( waa__open_byext(filename, WAA__CONFLICT_EXT, (sts->flags & RF_CONFLICT) ? WAA__APPEND : WAA__WRITE, & filehdl), NULL ); while ( (filename =va_arg(va, char*)) ) { len=strlen(filename); io[0].iov_base=filename; /* Take the \0 */ io[0].iov_len=len+1; STOPIF_CODE_ERR( writev(filehdl, io, sizeof(io)/sizeof(io[0])) != len+1+1, errno, "Writing the conflict list for %s", filename); } sts->flags |= RF_CONFLICT; ex: if (filehdl != -1) { len=waa__close(filehdl, status); filehdl=-1; STOPIF_CODE_ERR( !status && len==-1, errno, "Closing the conflict list for %s", filename); } return status; } /** -. * */ int res__action(struct estat *sts) { int status; status=0; if (sts->flags & RF_ISNEW) { /* We're not going recursively, so there's no need to process * sub-entries. */ sts->to_be_ignored=1; } else { if ( sts->flags & RF_CONFLICT ) STOPIF( res__remove_aux_files(sts), NULL); STOPIF( st__status(sts), NULL); } ex: return status; } /** -. * */ int res__remove_aux_files(struct estat *sts) { int status; char *filename, *to_remove; int filehdl; int len; char *mapped; struct sstat_t st; status=0; filehdl=-1; mapped=MAP_FAILED; STOPIF( ops__build_path(&filename, sts), NULL); STOPIF( waa__open_byext(filename, WAA__CONFLICT_EXT, WAA__READ, &filehdl), NULL ); STOPIF( hlp__fstat( filehdl, &st), NULL); mapped=mmap(NULL, st.size, PROT_READ, MAP_SHARED, filehdl, 0); STOPIF_CODE_ERR( mapped==MAP_FAILED, errno, "Can't map handle %d", filehdl); to_remove=mapped; while ( to_remove - mapped != st.size ) { BUG_ON(to_remove - mapped > st.size); if (unlink(to_remove) == -1) STOPIF_CODE_ERR( errno != ENOENT, errno, "Cannot remove conflict file \"%s\" (from \"%s\")", to_remove, filename); to_remove += strlen(to_remove)+1; if (*to_remove == '\n') to_remove++; } sts->flags &= ~RF_CONFLICT; STOPIF( waa__delete_byext(filename, WAA__CONFLICT_EXT, 0), NULL); ex: if (filehdl != -1) { len=waa__close(filehdl, status); filehdl=-1; STOPIF_CODE_ERR( !status && len==-1, errno, "Closing the conflict list for %s", filename); } if (mapped != MAP_FAILED) STOPIF_CODE_ERR( munmap(mapped, st.size) == -1, errno, "Cannot munmap()"); return status; } /** -. * */ int res__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; status=0; /* Don't recurse. */ opt_recursive=-1; STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); if (argc == 0) ac__Usage_this(); STOPIF( url__load_nonempty_list(NULL, 0), NULL); /* Maybe we should add a flag saying that we don't want unknown entries, * like it can easily happen with "fsvs resolve *". * But then we'd get an error, and this is not so user-friendly like just * ignoring these entries in res__action(). */ status=waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1); if (status == -ENOENT) STOPIF(status, "!No data about current entries is available."); STOPIF(status, NULL); STOPIF( waa__output_tree(root), NULL); ex: return status; } fsvs-1.2.6/src/remote.h0000644000202400020240000000113310756467655013725 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __REMOTE_H__ #define __REMOTE_H__ /** \file * \ref remote-status header file. * * Currently empty, as it's the same as an update - without getting the * files' contents and changing local data. */ #endif fsvs-1.2.6/src/commit.h0000644000202400020240000000146410756467655013731 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __COMMIT_H__ #define __COMMIT_H__ #include #include "actions.h" /** \file * \ref commit action header file. */ /** Mark entries' parents as to-be-traversed. */ action_t ci__action; /* Main commit function. */ work_t ci__work; /** Sets the given revision \a rev recursive on all entries correlating to * \a current_url. */ int ci__set_revision(struct estat *this, svn_revnum_t rev); #endif fsvs-1.2.6/src/remote.c0000644000202400020240000000160611264677022013707 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * \ref remote-status command. * */ /** \addtogroup cmds * * \section remote-status * * \code * fsvs remote-status PATH [-r rev] * \endcode * * This command looks into the repository and tells you which files would * get changed on an \ref update - it's a dry-run for \ref update . * * Per default it compares to \c HEAD, but you can choose another * revision with the \c -r parameter. * * Please see the \ref update "update" documentation for details regarding * multi-URL usage. * */ fsvs-1.2.6/src/ignore.h0000644000202400020240000000340111157411270013667 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __IGNORE_H__ #define __IGNORE_H__ /** \file * \ref ignore patterns header file. */ #include "global.h" #include "actions.h" /** \name Where a new pattern should be inserted. */ /** @{ */ /** At the front. */ #define PATTERN_POSITION_START (0) /** Behind all other patterns (appended). */ #define PATTERN_POSITION_END (-1) /** @} */ extern int ign__max_group_name_len; /** Group structure. * Needed by commit, too. */ struct grouping_t { char *group_name; apr_hash_t *auto_props; struct url_t *url; int is_ignore:1; int is_take:1; }; /** Ignore command main function. */ work_t ign__work; /** Rel-ignore command main function. */ work_t ign__rign; /** Adds a list of new ignore patterns to the internal list. */ int ign__new_pattern(unsigned count, char *pattern[], char *ends, int user_pattern, int position); /** Tells whether the given entry is to be ignored. */ int ign__is_ignore(struct estat *sts, int *is_ignored); /** Loads the ignore list from the WAA. */ int ign__load_list(char *dir); /** Print the grouping statistics. */ int ign__print_group_stats(FILE *output); enum { FORCE_IGNORE=0, ALLOW_GROUPS, }; /** List of bits for pattern definitons. * @{ */ #define HAVE_DIR 1 #define HAVE_CASE 2 #define HAVE_GROUP 4 #define HAVE_MODE 8 #define HAVE_PATTERN 16 #define HAVE_PATTERN_SUBST 32 /** @} */ /** For the help text. */ #define hlp_ignore hlp_groups #endif fsvs-1.2.6/src/commit.c0000644000202400020240000010756312043532166013710 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * \ref commit action. * * This is a bit hairy in that the order in which we process files (sorted * by inode, not in the directory structure) is not allowed * for a subversion editor. * * We have to read the complete tree, get the changes and store what we * want to do, and send these changes in a second run. * * * \section commit_2_revs Committing two revisions at once * Handling identical files; using hardlinks; creating two revisions on * commit. * * There are some use-cases where we'd like to store the data only a single * time in the repository, so that multiple files are seen as identical: * - hardlinks should be stored as hardlink; but subversion doesn't allow * something like that currently. Using some property pointing to the * "original" file would be some way; but for compatibility with other * subversion clients the data would have to be here, too. \n * Using copy-from would mess up the history of the file. * - Renames of changed files. Subversion doesn't accept copy-from links to * new files; we'd have to create two revisions: one with the data, and * the other with copyfrom information (or the other way around). * */ /** \addtogroup cmds * * \section commit * * \code * fsvs commit [-m "message"|-F filename] [-v] [-C [-C]] [PATH [PATH ...]] * \endcode * * Commits (parts of) the current state of the working copy into the * repository. * * * \subsection Example * * The working copy is \c /etc , and it is set up and committed already. \n * Then \c /etc/hosts and \c /etc/inittab got modified. Since these are * non-related changes, you'd like them to be in separate commits. * * So you simply run these commands: * \code * fsvs commit -m "Added some host" /etc/hosts * fsvs commit -m "Tweaked default runlevel" /etc/inittab * \endcode * * If the current directory is \c /etc you could even drop the \c /etc/ in * front, and use just the filenames. * * Please see \ref status for explanations on \c -v and \c -C . \n * For advanced backup usage see also \ref FSVS_PROP_COMMIT_PIPE "the * commit-pipe property". */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "global.h" #include "status.h" #include "checksum.h" #include "waa.h" #include "cache.h" #include "est_ops.h" #include "props.h" #include "options.h" #include "ignore.h" #include "cp_mv.h" #include "racallback.h" #include "url.h" #include "helper.h" /** Typedef needed for \a ci___send_user_props(). See there. */ typedef svn_error_t *(*change_any_prop_t) (void *baton, const char *name, const svn_string_t *value, apr_pool_t *pool); /** Counts the entries committed on the current URL. */ unsigned committed_entries; /** Remembers the to-be-made path in the repository, in UTF-8. */ char *missing_path_utf8; /** The precalculated length. */ int missing_path_utf8_len; /** -. * */ int ci__set_revision(struct estat *this, svn_revnum_t rev) { int i; /* should be benchmarked. * perhaps use better locality by doing levels at once. */ if (this->url == current_url) this->repos_rev=rev; if (S_ISDIR(this->st.mode)) for(i=0; ientry_count; i++) ci__set_revision(this->by_inode[i], rev); return 0; } /** Callback for successfull commits. * * This is the only place that gets the new revision number * told. * * \c svn_ra.h does not tell whether these strings are really UTF8. I think * they must be ASCII, except if someone uses non-ASCII-user names ... * which nobody does. */ svn_error_t * ci__callback ( svn_revnum_t new_revision, const char *utf8_date, const char *utf8_author, void *baton) { struct estat *root UNUSED=baton; int status; status=0; if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("committed revision\t%ld on %s as %s\n", new_revision, utf8_date, utf8_author); /* recursively set the new revision */ // STOPIF( ci__set_revision(root, new_revision), NULL); current_url->current_rev = new_revision; //ex: RETURN_SVNERR(status); } /** -. * * This callback is called by input_tree and build_tree. */ int ci__action(struct estat *sts) { int status; char *path; STOPIF( ops__build_path(&path, sts), NULL); STOPIF_CODE_ERR( sts->flags & RF_CONFLICT, EBUSY, "!The entry \"%s\" is still marked as conflict.", path); if (sts->entry_status || (sts->flags & RF___COMMIT_MASK) ) ops__mark_parent_cc(sts, entry_status); STOPIF( st__progress(sts), NULL); ex: return status; } /** Removes the flags saying that this entry was copied, recursively. * * Does stop on new copy-bases. * * Is needed because a simple "cp -a" wouldn't even go down into * the child-entries - there's nothing to do there! */ void ci___unset_copyflags(struct estat *root) { struct estat **sts; /* Delete the RF_ADD and RF_COPY_BASE flag, but set the FS_NEW status * instead. */ root->flags &= ~(RF_ADD | RF_COPY_BASE | RF_COPY_SUB); /* Set the current url for this entry. */ root->url=current_url; if (ops__has_children(root)) { sts=root->by_inode; while (*sts) { if (! ( (*sts)->flags & RF_COPY_BASE) ) { ci___unset_copyflags(*sts); } sts++; } } } #define TEST_FOR_OUT_OF_DATE(_sts, _s_er, ...) \ do { if (_s_er) { \ if (_s_er->apr_err == SVN_ERR_FS_TXN_OUT_OF_DATE) \ { \ char *filename; \ if (ops__build_path(&filename, _sts)) \ filename="(internal error)"; \ STOPIF( EBUSY, \ "!The entry \"%s\" is out-of-date;\n" \ "Please update your working copy.", \ filename); \ goto ex; \ } \ STOPIF( EBUSY, __VA_ARGS__); \ } } while (0) /* Convenience function; checks for \c FSVS_PROP_COMMIT_PIPE. * By putting that here we can avoid sending most of the parameters. */ inline int send_a_prop(void *baton, int store_encoder, struct estat *sts, change_any_prop_t function, char *key, svn_string_t *value, apr_pool_t *pool) { int status; svn_error_t *status_svn; status=0; /* We could tell the parent whether we need this property value, to avoid * copying and freeing; but it's no performance problem, I think. */ if (store_encoder && strcmp(key, propval_commitpipe) == 0) { if (value) STOPIF( hlp__strdup( &sts->decoder, value->data), NULL); else sts->decoder=NULL; } status_svn=function(baton, key, value, pool); TEST_FOR_OUT_OF_DATE(sts, status_svn, "send user props"); ex: return status; } /** Send the user-defined properties. * * The property table is left cleaned up, ie. any deletions that were * ordered by the user have been done -- no properties with \c * prp__prop_will_be_removed() will be here. * * If \a store_encoder is set, \c sts->decoder gets set from the value of * the commit-pipe. * * \c auto-props from groupings are sent, too. * */ int ci___send_user_props(void *baton, struct estat *sts, change_any_prop_t function, int store_encoder, apr_pool_t *pool) { int status; datum key, value; hash_t db; svn_string_t *str; db=NULL; /* First do auto-props. */ STOPIF( ops__apply_group(sts, &db, pool), NULL); /* Do user-defined properties. * Could return ENOENT if none. */ if (db) { status=prp__first(db, &key); while (status==0) { STOPIF( prp__fetch(db, key, &value), NULL); if (hlp__is_special_property_name(key.dptr)) { DEBUGP("ignoring %s - should not have been taken?", key.dptr); } else if (prp__prop_will_be_removed(value)) { DEBUGP("removing property %s", key.dptr); STOPIF( send_a_prop(baton, store_encoder, sts, function, key.dptr, NULL, pool), NULL); STOPIF( hsh__register_delete(db, key), NULL); } else { DEBUGP("sending property %s=(%d)%.*s", key.dptr, value.dsize, value.dsize, value.dptr); str=svn_string_ncreate(value.dptr, value.dsize-1, pool); STOPIF( send_a_prop(baton, store_encoder, sts, function, key.dptr, str, pool), NULL); } status=prp__next( db, &key, &key); } /* Anything but ENOENT spells trouble. */ if (status != ENOENT) STOPIF(status, NULL); status=0; } /* A hsh__close() does the garbage collection. */ STOPIF( hsh__close(db, status), NULL); ex: return status; } /** Send the meta-data-properties for \a baton. * * We hope that group/user names are ASCII; the names of "our" properties * are known, and contain no characters above \\x80. * * We get the \a function passed, because subversion has different property * setters for files and directories. * * If \a props is not \c NULL, we return the properties' handle. */ svn_error_t *ci___set_props(void *baton, struct estat *sts, change_any_prop_t function, apr_pool_t *pool) { const char *ccp; svn_string_t *str; int status; svn_error_t *status_svn; status=0; /* The unix-mode property is not sent for a symlink, as there's no * lchmod(). */ if (!S_ISLNK(sts->st.mode)) { /* mode */ str=svn_string_createf (pool, "0%03o", (int)(sts->st.mode & 07777)); status_svn=function(baton, propname_umode, str, pool); if (status_svn) goto error; } /* owner */ str=svn_string_createf (pool, "%lu %s", (unsigned long)sts->st.uid, hlp__get_uname(sts->st.uid, "") ); status_svn=function(baton, propname_owner, str, pool); if (status_svn) goto error; /* group */ str=svn_string_createf (pool, "%lu %s", (unsigned long)sts->st.gid, hlp__get_grname(sts->st.gid, "") ); status_svn=function(baton, propname_group, str, pool); if (status_svn) goto error; /* mtime. Extra const char * needed. */ ccp=(char *)svn_time_to_cstring ( apr_time_make( sts->st.mtim.tv_sec, sts->st.mtim.tv_nsec/1000), pool); str=svn_string_create(ccp, pool); status_svn=function(baton, propname_mtime, str, pool); if (status_svn) goto error; ex: RETURN_SVNERR(status); error: TEST_FOR_OUT_OF_DATE(sts, status_svn, "set meta-data"); goto ex; } /** Commit function for non-directory entries. * * Here we handle devices, symlinks and files. * * The given \a baton is already for the item; we got it from \a add_file * or \a open_file. * We just have to put data in it. * */ svn_error_t *ci__nondir(const svn_delta_editor_t *editor, struct estat *sts, void *baton, apr_pool_t *pool) { svn_txdelta_window_handler_t delta_handler; void *delta_baton; svn_error_t *status_svn; svn_stream_t *s_stream; char *cp; char *filename; int status; svn_string_t *stg; apr_file_t *a_stream; svn_stringbuf_t *str; struct encoder_t *encoder; int transfer_text, has_manber; str=NULL; a_stream=NULL; s_stream=NULL; encoder=NULL; STOPIF( ops__build_path(&filename, sts), NULL); /* The only "real" information symlinks have is the target * they point to. We don't set properties which won't get used on * update anyway - that saves a tiny bit of space. * What we need to send (for symlinks) are the user-defined properties. * */ /* Should we possibly send the properties only if changed? Would not make * much difference, bandwidth-wise. */ /* if ((sts->flags & RF_PUSHPROPS) || (sts->entry_status & (FS_META_CHANGED | FS_NEW)) ) */ STOPIF( ci___send_user_props(baton, sts, editor->change_file_prop, 1, pool), NULL); STOPIF_SVNERR( ci___set_props, (baton, sts, editor->change_file_prop, pool) ); /* By now we should know if our file really changed. */ BUG_ON( sts->entry_status & FS_LIKELY ); /* However, sending fulltext only if it really changed DOES make * a difference if you do not have a gigabit pipe to your * server. ;) * The RF_ADD was replaced by FS_NEW above. */ DEBUGP("%s: status %s; flags %s", sts->name, st__status_string(sts), st__flags_string_fromint(sts->flags)); transfer_text= sts->entry_status & (FS_CHANGED | FS_NEW | FS_REMOVED); /* In case the file is identical to the original copy source, we need * not send the data to the server. * BUT we have to store the correct MD5 locally; as the source file may * have changed, we re-calculate it - that has the additional advantage * that the manber-hashes get written, for faster comparision next time. * * I thought about using cs__compare_file() in the local check sequence * to build a new file; but if anything goes wrong later, the file would * be overwritten with the wrong data. * That's true if something goes wrong here, too. * * Another idea would be to build the new manber file with another name, * and only rename if it actually was committed ... but there's a race, * too. And we couldn't abort the check on the first changed bytes, and * we'd need doubly the space, ... * * TODO: run the whole fsvs commit process against an unionfs, and use * that for local transactions. */ if (!transfer_text && !(sts->flags & RF___IS_COPY)) { DEBUGP("hasn't changed, and no copy."); } else { has_manber=0; switch (sts->st.mode & S_IFMT) { case S_IFLNK: STOPIF( ops__link_to_string(sts, filename, &cp), NULL); STOPIF( hlp__local2utf8(cp, &cp, -1), NULL); /* It is not defined whether svn_stringbuf_create copies the string, * takes the character pointer into the pool, or whatever. * Knowing people wanted. */ str=svn_stringbuf_create(cp, pool); break; case S_IFBLK: case S_IFCHR: /* See above */ /* We only put ASCII in this string */ str=svn_stringbuf_create( ops__dev_to_filedata(sts), pool); break; case S_IFREG: STOPIF( apr_file_open(&a_stream, filename, APR_READ, 0, pool), "open file \"%s\" for reading", filename); s_stream=svn_stream_from_aprfile (a_stream, pool); /* We need the local manber hashes and MD5s to detect changes; * the remote values would be needed for delta transfers. */ has_manber= (sts->st.size >= CS__MIN_FILE_SIZE); if (has_manber) STOPIF( cs__new_manber_filter(sts, s_stream, &s_stream, pool), NULL ); /* That's needed only for actually putting the data in the * repository - for local re-calculating it isn't. */ if (transfer_text && sts->decoder) { /* The user-defined properties have already been sent, so the * propval_commitpipe would already be cleared; we don't need to * check for prp__prop_will_be_removed(). */ STOPIF( hlp__encode_filter(s_stream, sts->decoder, 0, filename, &s_stream, &encoder, pool), NULL ); encoder->output_md5= &(sts->md5); IF_FREE(sts->decoder); } break; default: BUG("invalid/unknown file type 0%o", sts->st.mode); } /* for special nodes */ if (str) s_stream=svn_stream_from_stringbuf (str, pool); BUG_ON(!s_stream); if (transfer_text) { DEBUGP("really sending ..."); STOPIF_SVNERR( editor->apply_textdelta, (baton, NULL, // checksum of old file, pool, &delta_handler, &delta_baton)); /* If we're transferring the data, we always get an MD5 here. We can * take the local value, if it had to be encoded. */ STOPIF_SVNERR( svn_txdelta_send_stream, (s_stream, delta_handler, delta_baton, sts->md5, pool) ); DEBUGP("after sending encoder=%p", encoder); } else { DEBUGP("doing local MD5."); /* For a non-changed entry, simply pass the data through the MD5 (and, * depending on filesize, the manber filter). * If the manber filter already does the MD5, we don't need it a second * time. */ STOPIF( hlp__stream_md5(s_stream, has_manber ? NULL : sts->md5), NULL); } STOPIF_SVNERR( svn_stream_close, (s_stream) ); /* If it's a special entry (device/symlink), set the special flag. */ if (str) { stg=svn_string_create(propval_special, pool); STOPIF_SVNERR( editor->change_file_prop, (baton, propname_special, stg, pool) ); } /* If the entry was encoded, send the original MD5 as well. */ if (encoder) { cp=cs__md5tohex_buffered(sts->md5); DEBUGP("Sending original MD5 as %s", cp); stg=svn_string_create(cp, pool); STOPIF_SVNERR( editor->change_file_prop, (baton, propname_origmd5, stg, pool) ); } } STOPIF( cs__set_file_committed(sts), NULL); ex: if (a_stream) { /* As this file was opened read only, we can dismiss any errors. * We could give them only if everything else worked ... */ apr_file_close(a_stream); } RETURN_SVNERR(status); } /** Commit function for directories. * */ svn_error_t *ci__directory(const svn_delta_editor_t *editor, struct estat *dir, void *dir_baton, apr_pool_t *pool) { void *baton; int status; struct estat *sts; apr_pool_t *subpool; int i, exists_now; char *filename; char *utf8_filename, *tmp; svn_error_t *status_svn; char *src_path; svn_revnum_t src_rev; struct sstat_t stat; struct cache_entry_t *utf8fn_plus_missing; int utf8fn_len; status=0; utf8fn_plus_missing=NULL; subpool=NULL; DEBUGP("commit_dir with baton %p", dir_baton); for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; /* The flags are stored persistently; we have to check whether this * entry shall be committed. */ if ( (sts->flags & RF___COMMIT_MASK) && sts->do_this_entry) { /* Did we change properties since last commit? Then we have something * to do. */ if (sts->flags & RF_PUSHPROPS) sts->entry_status |= FS_PROPERTIES; } else if (sts->entry_status) { /* The entry_status is set depending on the do_this_entry already; * if it's not 0, it's got to be committed. */ /* Maybe a child needs attention (with FS_CHILD_CHANGED), so we have * to recurse. */ } else /* Completely ignore item if nothing to be done. */ continue; /* clear an old pool */ if (subpool) apr_pool_destroy(subpool); /* get a fresh pool */ STOPIF( apr_pool_create_ex(&subpool, pool, NULL, NULL), "no pool"); STOPIF( ops__build_path(&filename, sts), NULL); /* As the path needs to be canonical we strip the ./ in front, and * possibly have to prepend some path (see option mkdir_base) */ STOPIF( hlp__local2utf8(filename+2, &utf8_filename, -1), NULL ); if (missing_path_utf8) { utf8fn_len=strlen(utf8_filename); STOPIF( cch__entry_set(&utf8fn_plus_missing, 0, NULL, missing_path_utf8_len + 1 + utf8fn_len + 1, 0, &tmp), NULL); strcpy(tmp, missing_path_utf8); tmp[missing_path_utf8_len]='/'; strcpy(tmp + missing_path_utf8_len +1, utf8_filename); utf8_filename=tmp; } DEBUGP("%s: action (%s), updated mode 0%o, flags %X, filter %d", filename, st__status_string(sts), sts->st.mode, sts->flags, ops__allowed_by_filter(sts)); if (ops__allowed_by_filter(sts)) STOPIF( st__status(sts), NULL); exists_now= !(sts->flags & RF_UNVERSION) && ( (sts->entry_status & (FS_NEW | FS_CHANGED | FS_META_CHANGED)) || (sts->flags & (RF_ADD | RF_PUSHPROPS | RF_COPY_BASE)) ); if ( (sts->flags & RF_UNVERSION) || (sts->entry_status & FS_REMOVED) ) { DEBUGP("deleting %s", sts->name); /* that's easy :-) */ STOPIF_SVNERR( editor->delete_entry, (utf8_filename, SVN_INVALID_REVNUM, dir_baton, subpool) ); committed_entries++; if (!exists_now) { DEBUGP("%s=%d doesn't exist anymore", sts->name, i); /* remove from data structures */ STOPIF( ops__delete_entry(dir, NULL, i, UNKNOWN_INDEX), NULL); STOPIF( waa__delete_byext(filename, WAA__FILE_MD5s_EXT, 1), NULL); STOPIF( waa__delete_byext(filename, WAA__PROP_EXT, 1), NULL); i--; continue; } } /* If there something to do - get a baton. * Else we're finished with this one. */ if (!exists_now && !(sts->entry_status & FS_CHILD_CHANGED)) continue; /* If we would send some data, verify the state of the entry. * Maybe it's a temporary file, which is already deleted. * As we'll access this entry in a few moments, the additional lookup * doesn't hurt much. * * (Although I'd be a bit happier if I found a way to do that better ... * currently I split by new/existing entry, and then by * directory/everything else. * Maybe I should change that logic to *only* split by entry type. * But then I'd still have to check for directories ...) * * So "Just Do It" (tm). */ /* access() would possibly be a bit lighter, but doesn't work * for broken symlinks. */ /* TODO: Could we use FS_REMOVED here?? */ if (hlp__lstat(filename, &stat)) { /* If an entry doesn't exist, but *should*, as it's marked RF_ADD, * we fail (currently). * Could be a warning with a default action of STOP. */ STOPIF_CODE_ERR( sts->flags & RF_ADD, ENOENT, "Entry %s should be added, but doesn't exist.", filename); DEBUGP("%s doesn't exist, ignoring (%d)", filename, errno); continue; } /* In case this entry is a directory that's only done because of its * children we shouldn't change its known data - we'd silently change * eg. the mtime. */ if (sts->do_this_entry && ops__allowed_by_filter(sts)) { sts->st=stat; DEBUGP("set st for %s", sts->name); } /* We need a baton. */ baton=NULL; /* If this entry has the RF_ADD or RF_COPY_BASE flag set, or is FS_NEW, * it is new (as far as subversion is concerned). * If this is an implicitly copied entry, subversion already knows * about it, so use open_* instead of add_*. */ if ((sts->flags & (RF_ADD | RF_COPY_BASE) ) || (sts->entry_status & FS_NEW) ) { /* New entry, fetch handle via add_* below. */ } else { status_svn= (S_ISDIR(sts->st.mode) ? editor->open_directory : editor->open_file) ( utf8_filename, dir_baton, current_url->current_rev, subpool, &baton); DEBUGP("opening %s with base %llu", filename, (t_ull)current_url->current_rev); status_svn= (S_ISDIR(sts->st.mode) ? editor->open_directory : editor->open_file) ( utf8_filename, dir_baton, current_url->current_rev, subpool, &baton); TEST_FOR_OUT_OF_DATE(sts, status_svn, "%s(%s) returns %d", S_ISDIR(sts->st.mode) ? "open_directory" : "open_file", filename, status_svn->apr_err); DEBUGP("baton for mod %s %p (parent %p)", sts->name, baton, dir_baton); } if (!baton) { DEBUGP("new %s (parent %p)", sts->name, dir_baton); /* Maybe that test should be folded into cm__get_source -- that would * save the assignments in the else-branch. * But we'd have to check for ENOENT again - it's not allowed if * RF_COPY_BASE is set, but possible if this flag is not set. So we'd * not actually get much. */ if (sts->flags & RF_COPY_BASE) { status=cm__get_source(sts, filename, &src_path, &src_rev, 1); BUG_ON(status == ENOENT, "copy but not copied?"); STOPIF(status, NULL); } else { /* Set values to "not copied". */ src_path=NULL; src_rev=SVN_INVALID_REVNUM; } /* TODO: src_sts->entry_status newly added? Then remember for second * commit! * */ DEBUGP("adding %s with %s:%ld", filename, src_path, src_rev); /** \name STOPIF_SVNERR_INDIR */ status_svn = (S_ISDIR(sts->st.mode) ? editor->add_directory : editor->add_file) (utf8_filename, dir_baton, src_path, src_rev, subpool, &baton); TEST_FOR_OUT_OF_DATE(sts, status_svn, "%s(%s, source=\"%s\"@%s) returns %d", S_ISDIR(sts->st.mode) ? "add_directory" : "add_file", filename, src_path, hlp__rev_to_string(src_rev), status_svn->apr_err); DEBUGP("baton for new %s %p (parent %p)", sts->name, baton, dir_baton); /* Copied entries need their information later in ci__nondir(). */ if (!(sts->flags & RF_COPY_BASE)) { sts->flags &= ~RF_ADD; sts->entry_status |= FS_NEW | FS_META_CHANGED; } } committed_entries++; DEBUGP("doing changes, flags=%X", sts->flags); /* Now we have a baton. Do changes. */ if (S_ISDIR(sts->st.mode)) { STOPIF_SVNERR( ci__directory, (editor, sts, baton, subpool) ); STOPIF_SVNERR( editor->close_directory, (baton, subpool) ); } else { STOPIF_SVNERR( ci__nondir, (editor, sts, baton, subpool) ); STOPIF_SVNERR( editor->close_file, (baton, NULL, subpool) ); } /* If it's copy base, we need to clean up all flags below; else we * just remove an (ev. set) add-flag. * We cannot do that earlier, because eg. ci__nondir() needs this * information. */ if (sts->flags & RF_COPY_BASE) ci___unset_copyflags(sts); /* Now this paths exists in this URL. */ if (url__current_has_precedence(sts->url)) { DEBUGP("setting URL of %s", filename); sts->url=current_url; sts->repos_rev = SET_REVNUM; } } /* When a directory has been committed (with all changes), * we can drop the check flag. * If we only do parts of the child list, we must set it, so that we know * to check for newer entries on the next status. (The directory * structure must possibly be built in the repository, so we have to do * each layer, and after a commit we take the current timestamp -- so we * wouldn't see changes that happened before the partly commit.) */ if (! (dir->do_this_entry && ops__allowed_by_filter(dir)) ) dir->flags |= RF_CHECK; else dir->flags &= ~RF_CHECK; /* That this entry belongs to this URL has already been set by the * parent loop. */ /* Given this example: * $ mkdir -p dir/sub/subsub * $ touch dir/sub/subsub/file * $ fsvs ci dir/sub/subsub * * Now "sub" gets committed because of its children; as having a * directory *without* meta-data in the repository is worse than having * valid data set, we push the meta-data properties for *new* * directories, and otherwise if they should be done and got something * changed. */ /* Regarding the "dir->parent" check: If we try to send properties for * the root directory, we get "out of date" ... even if nothing changed. * So don't do that now, until we know a way to make that work. * * Problem case: user creates an empty directory in the repository "svn * mkdir url:///", then sets this directory as base, and we try to commit - * "it's empty, after all". * Needing an update is not nice - but maybe what we'll have to do. */ if ((dir->do_this_entry && ops__allowed_by_filter(dir) && dir->parent && /* Are there properties to push? */ (dir->entry_status & (FS_META_CHANGED | FS_PROPERTIES))) || (dir->entry_status & FS_NEW)) { STOPIF_SVNERR( ci___set_props, (dir_baton, dir, editor->change_dir_prop, pool) ); STOPIF( ci___send_user_props(dir_baton, dir, editor->change_dir_prop, 0, pool), NULL); } ex: if (subpool) apr_pool_destroy(subpool); RETURN_SVNERR(status); } /** Start an editor, to get a commit message. * * We look for \c $EDITOR and \c $VISUAL -- to fall back on good ol' vi. */ int ci__getmsg(char **filename) { char *editor_cmd, *cp; int l,status; apr_file_t *af; status=0; STOPIF( waa__get_tmp_name( NULL, filename, &af, global_pool), NULL); /* we close the file, as an editor might delete the file and * write a new. */ STOPIF( apr_file_close(af), "close commit message file"); editor_cmd=getenv("EDITOR"); if (!editor_cmd) editor_cmd=getenv("VISUAL"); if (!editor_cmd) editor_cmd="vi"; l=strlen(editor_cmd) + 1 + strlen(opt_commitmsgfile) + 1; STOPIF( hlp__strmnalloc(l, &cp, editor_cmd, " ", opt_commitmsgfile, NULL), NULL); l=system(cp); STOPIF_CODE_ERR(l == -1, errno, "fork() failed"); STOPIF_CODE_ERR(l, WEXITSTATUS(l), "spawned editor exited with %d, signal %d", WEXITSTATUS(l), WIFSIGNALED(l) ? WTERMSIG(l) : 0); status=0; ex: return status; } /** Creates base directories from \c missing_path_utf8, if necessary, and * calls \c ci__directory(). * * \a current_missing points into \c missing_path_utf8_len, to the current * path spec; \a editor, \a root and \a dir_baton are as in * ci__directory(). * * As the number of directories created this way is normally 0, and for * typical non-zero use I'd believe about 3 or 4 levels (maximum), we don't * use an extra recursion pool here. */ svn_error_t *ci___base_dirs(char *current_missing, const svn_delta_editor_t *editor, struct estat *root, void *dir_baton) { int status; svn_error_t *status_svn; char *delim; void *child_baton; status=0; if (current_missing && *current_missing) { /* Create one level of the hierarchy. */ delim=strchr(current_missing, '/'); if (delim) { *delim=0; delim++; /* There must not be a "/" at the end, or two slashes. */ BUG_ON(!*delim || *delim=='/'); } DEBUGP("adding %s", missing_path_utf8); STOPIF_SVNERR( editor->add_directory, (missing_path_utf8, dir_baton, NULL, SVN_INVALID_REVNUM, current_url->pool, &child_baton)); if (delim) delim[-1]='/'; STOPIF_SVNERR( ci___base_dirs, (delim, editor, root, child_baton)); STOPIF_SVNERR( editor->close_directory, (child_baton, current_url->pool)); } else STOPIF_SVNERR( ci__directory, (editor, root, dir_baton, current_url->pool)); ex: RETURN_SVNERR(status); } /** The main commit function. * * It does as much setup as possible before traversing the tree - to find * errors (no network, etc.) as soon as possible. * * The message file gets opened here to verify its existence, * and to get a handle to it. If we're doing \c chdir()s later we don't * mind; the open handle let's us read when we need it. And the contents * are cached only as long as necessary. */ int ci__work(struct estat *root, int argc, char *argv[]) { int status; svn_error_t *status_svn; const svn_delta_editor_t *editor; void *edit_baton; void *root_baton; struct stat st; int commitmsg_fh, commitmsg_is_temp; char *utf8_commit_msg; char **normalized; const char *url_name; time_t delay_start; char *missing_dirs; status=0; status_svn=NULL; edit_baton=NULL; editor=NULL; /* This cannot be used uninitialized, but gcc doesn't know */ commitmsg_fh=-1; opt__set_int(OPT__CHANGECHECK, PRIO_MUSTHAVE, opt__get_int(OPT__CHANGECHECK) | CHCHECK_DIRS | CHCHECK_FILE); /* This must be done before opening the file. */ commitmsg_is_temp=!opt_commitmsg && !opt_commitmsgfile; if (commitmsg_is_temp) STOPIF( ci__getmsg(&opt_commitmsgfile), NULL); /* If there's a message file, open it here. (Bug out early, if * necessary). * * This must be done before waa__find_common_base(), as this does a * chdir() and would make relative paths invalid. */ if (opt_commitmsgfile) { commitmsg_fh=open(opt_commitmsgfile, O_RDONLY); STOPIF_CODE_ERR( commitmsg_fh<0, errno, "cannot open file %s", opt_commitmsgfile); } STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); /* Check if there's an URL defined before asking for a message */ STOPIF( url__load_nonempty_list(NULL, 0), NULL); if (urllist_count==1) current_url=urllist[0]; else { url_name=opt__get_string(OPT__COMMIT_TO); STOPIF_CODE_ERR( !url_name || !*url_name, EINVAL, "!Which URL would you like to commit to?\n" "Please choose one (config option \"commit_to\")."); STOPIF( url__find_by_name(url_name, ¤t_url), "!No URL named \"%s\" could be found.", url_name); } STOPIF_CODE_ERR( current_url->is_readonly, EROFS, "!Cannot commit to \"%s\",\n" "because it is marked read-only.", current_url->url); STOPIF(ign__load_list(NULL), NULL); STOPIF( url__open_session(NULL, &missing_dirs), NULL); /* Warn early. */ if (missing_dirs) STOPIF_CODE_ERR( opt__get_int(OPT__MKDIR_BASE) == OPT__NO, ENOENT, "!The given URL \"%s\" does not exist (yet).\n" "The missing directories \"%s\" could possibly be created, if\n" "you enable the \"mkdir_base\" option (with \"-o mkdir_base=yes\").", current_url->url, missing_dirs); opt__set_int( OPT__CHANGECHECK, PRIO_MUSTHAVE, opt__get_int(OPT__CHANGECHECK) | CHCHECK_DIRS | CHCHECK_FILE); /* This is the first step that needs some wall time - descending * through the directories, reading inodes */ STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 0), NULL); if (opt_commitmsgfile) { STOPIF_CODE_ERR( fstat(commitmsg_fh, &st) == -1, errno, "cannot estimate size of %s", opt_commitmsgfile); if (st.st_size == 0) { /* We're not using some mapped memory. */ DEBUGP("empty file"); opt_commitmsg=""; } else { DEBUGP("file is %llu bytes", (t_ull)st.st_size); opt_commitmsg=mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, commitmsg_fh, 0); STOPIF_CODE_ERR(!opt_commitmsg, errno, "mmap commit message (%s, %llu bytes)", opt_commitmsgfile, (t_ull)st.st_size); } close(commitmsg_fh); } if (!*opt_commitmsg) { STOPIF_CODE_ERR( opt__get_int(OPT__EMPTY_MESSAGE)==OPT__NO, EINVAL, "!Empty commit messages are defined as invalid, " "see \"empty_message\" option."); } STOPIF( hlp__local2utf8(opt_commitmsg, &utf8_commit_msg, -1), "Conversion of the commit message to utf8 failed"); if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("Committing to %s\n", current_url->url); STOPIF_SVNERR( svn_ra_get_commit_editor, (current_url->session, &editor, &edit_baton, utf8_commit_msg, ci__callback, root, NULL, // apr_hash_t *lock_tokens, FALSE, // svn_boolean_t keep_locks, global_pool) ); if (opt_commitmsgfile && st.st_size != 0) STOPIF_CODE_ERR( munmap(opt_commitmsg, st.st_size) == -1, errno, "munmap()"); if (commitmsg_is_temp) STOPIF_CODE_ERR( unlink(opt_commitmsgfile) == -1, errno, "Cannot remove temporary message file %s", opt_commitmsgfile); /* The whole URL is at the same revision - per definition. */ STOPIF_SVNERR( editor->open_root, (edit_baton, current_url->current_rev, global_pool, &root_baton) ); /* Only children are updated, not the root. Do that here. */ if (ops__allowed_by_filter(root)) STOPIF( hlp__lstat( root->name, &root->st), NULL); committed_entries=0; if (missing_dirs) { STOPIF( hlp__local2utf8( missing_dirs, &missing_dirs, -1), NULL); /* As we're doing a lot of local->utf8 conversions we have to copy the * result. */ missing_path_utf8_len=strlen(missing_dirs); STOPIF( hlp__strnalloc(missing_path_utf8_len+1, &missing_path_utf8, missing_dirs), NULL); } /* This is the second step that takes time. */ STOPIF_SVNERR( ci___base_dirs, (missing_path_utf8, editor, root, root_baton)); /* If an error occurred, abort the commit. */ if (!status) { if (opt__get_int(OPT__EMPTY_COMMIT)==OPT__NO && committed_entries==0) { if (opt__verbosity() > VERBOSITY_VERYQUIET) printf("Avoiding empty commit as requested.\n"); goto abort_commit; } STOPIF_SVNERR( editor->close_edit, (edit_baton, global_pool) ); edit_baton=NULL; delay_start=time(NULL); /* Has to write new file, if commit succeeded. */ if (!status) { /* We possibly have to use some generation counter: * - write the URLs to a temporary file, * - write the entries, * - rename the temporary file. * Although, if we're cut off anywhere, we're not consistent with the * data. * Just use unionfs - that's easier. */ STOPIF( waa__output_tree(root), NULL); STOPIF( url__output_list(), NULL); } /* We do the delay here ... here we've got a chance that the second * wrap has already happened because of the IO above. */ STOPIF( hlp__delay(delay_start, DELAY_COMMIT), NULL); } ex: STOP_HANDLE_SVNERR(status_svn); ex2: if (status && edit_baton) { abort_commit: /* If there has already something bad happened, it probably * makes no sense checking the error code. */ editor->abort_edit(edit_baton, global_pool); } return status; } fsvs-1.2.6/src/ignore.c0000644000202400020240000016764412043532166013711 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include "global.h" #include "interface.h" #include "waa.h" #include "est_ops.h" #include "helper.h" #include "warnings.h" #include "direnum.h" #include "ignore.h" #include "url.h" /** \file * \ref groups and \ref ignore command and functions. * */ /* \note Due to restriction in C-comment syntax the above * cases have to separate \c * and \c / to avoid breaking * the code. \c * and \c / would belong together. * * As a fix I wrote /§* and *§/, which get changed by a perl * script after generation. */ /** * \addtogroup cmds * \section groups * \anchor ignore * * \code * fsvs groups dump|load * fsvs groups [prepend|append|at=n] group-definition [group-def ...] * fsvs ignore [prepend|append|at=n] pattern [pattern ...] * fsvs groups test [-v|-q] [pattern ...] * \endcode * * This command adds patterns to the end of the pattern list, or, with \c * prepend, puts them at the beginning of the list. * With \c at=x the patterns are inserted at the position \c x , * counting from 0. * * The difference between \c groups and \c ignore is that \c groups \b * requires a group name, whereas the latter just assumes the default group * \c ignore. * * For the specification please see the related * \ref groups_format "documentation" . * * fsvs dump prints the patterns to \c STDOUT . If there are * special characters like \c CR or \c LF embedded in the pattern * without encoding (like \c \\r or \c \\n), the * output will be garbled. * * The patterns may include \c * and \c ? as wildcards in one directory * level, or \c ** for arbitrary strings. * * These patterns are only matched against new (not yet known) files; * entries that are already versioned are not invalidated. \n * If the given path matches a new directory, entries below aren't found, * either; but if this directory or entries below are already versioned, * the pattern doesn't work, as the match is restricted to the directory. * * So: * \code * fsvs ignore ./tmp * \endcode * ignores the directory \c tmp; but if it has already been committed, * existing entries would have to be unmarked with \ref unversion * "fsvs unversion". * Normally it's better to use * \code * fsvs ignore ./tmp/§** * \endcode * as that takes the directory itself (which might be needed after restore * as a mount point anyway), but ignore \b all entries below. \n * Currently this has the drawback that mtime changes will be reported and * committed; this is not the case if the whole directory is ignored. * * * Examples: * \code * fsvs group group:unreadable,mode:4:0 * fsvs group 'group:secrets,/etc/§*shadow' * * fsvs ignore /proc * fsvs ignore /dev/pts * fsvs ignore './var/log/§*-*' * fsvs ignore './§**~' * fsvs ignore './§**§/§*.bak' * fsvs ignore prepend 'take,./§**.txt' * fsvs ignore append 'take,./§**.svg' * fsvs ignore at=1 './§**.tmp' * * fsvs group dump * fsvs group dump -v * * echo "./§**.doc" | fsvs ignore load * # Replaces the whole list * \endcode * * \note Please take care that your wildcard patterns are not expanded * by the shell! * * * \subsection groups_test Testing patterns * * To see more easily what different patterns do you can use the \c test * subcommand. The following combinations are available:
    *
  • fsvs groups test \e pattern\n * Tests \b only the given pattern against all new entries in your working * copy, and prints the matching paths. The pattern is not stored in the * pattern list. *
  • fsvs groups test\n * Uses the already defined patterns on the new entries, and prints the * group name, a tab, and the path.\n * With \c -v you can see the matching pattern in the middle column, too. *
* * By using \c -q you can avoid getting the whole list; this makes sense if * you use the \ref o_group_stats "group_stats" option at the same time. */ /** * \addtogroup cmds * \section rign * * \code * fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] * fsvs ri [prepend|append|at=n] path-spec [path-spec ...] * \endcode * * If you keep the same repository data at more than one working copy on * the same machine, it will be stored in different paths - and that makes * absolute ignore patterns infeasible. But relative ignore patterns are * anchored at the beginning of the WC root - which is a bit tiring to type * if you're deep in your WC hierarchy and want to ignore some files. * * To make that easier you can use the \c rel-ignore (abbreviated as \c ri) * command; this converts all given path-specifications (which may include * wildcards as per the shell pattern specification above) to WC-relative * values before storing them. * * Example for \c /etc as working copy root: * \code * fsvs rel-ignore '/etc/X11/xorg.conf.*' * * cd /etc/X11 * fsvs rel-ignore 'xorg.conf.*' * \endcode * Both commands would store the pattern "./X11/xorg.conf.*". * * \note This works only for \ref ign_shell "shell patterns". * * For more details about ignoring files please see the \ref ignore command * and \ref groups_format. */ /** * \defgroup ignpat_dev Developers' reference * \ingroup add_unv * * Internal structure, and some explanations. * * The ignore lists are first loaded into a global array. * Then they should be distributed onto the directory structure; * all applicable patterns get referenced by a directory. * * \todo Currently all patterns get tested against all new entries. This * does not seem to be a performance problem. * * Eg this directory tree * \code * root * +-- dirA * +-- dirB * +-- dirB1 * \endcode * with these ignore patterns * \code * *.tmp * **~ * dirA/tmp*.lst * dirB/§**§/§*.o * \endcode * would result in * \code * root: *.tmp, **~ * +-- dirA **~, tmp*.lst * +-- dirB **§/§*.o * +-- dirB1 **§/§*.o * \endcode * * Ignore patterns apply only to \b new entries, ie. entries already * known stay known. * * That's why we need an "add" command: * \code * $ fsvs ignore '/proc/§*' * $ fsvs add /proc/stat * \endcode * would version \c /proc/stat , but nothing else from \c /proc . * * A negative ignore-list is named \e take list. * * The storage format is * \code * header: number of entries * %u\n * pattern\0\n * pattern\0\n * \endcode * * Whitespace are not allowed at the start of a pattern; use ./§* * or something similar. * * As low-level library pcre is used, the given shell-patterns are * translated from the shell-like syntax into PCREs. * \code * * -> [^/]* * ** -> .* * ? -> . * . -> \. * \endcode * All other \c \\W are escaped. * **/ // * This part answers the "why" and "how" for ignoring entries. /** \defgroup groups_spec Using grouping patterns * \ingroup userdoc * * * Patterns are used to define groups for new entries; a group can be used * to ignore the given entries, or to automatically set properties when the * entry is taken on the entry list. * * So the auto-props are assigned when the entry gets put on the internal * list; that happens for the \ref add, \ref prop-set or \ref prop-del, and * of course \ref commit commands. \n * To override the auto-props of some new entry just use the property * commands. * * * \section ign_overview Overview * * When \c FSVS walks through your working copy it tries to find \b new * (ie. not yet versioned) entries. Every \b new entry gets tested against * the defined grouping patterns (in the given order!); if a pattern * matches, the corresponding group is assigned to the entry, and no * further matching is done. * * See also \ref howto_entry_statii "entry statii". * * \subsection ign_g_ignore Predefined group 1: "ignore" * * If an entry gets a group named \c "ignore" assigned, it will not be * considered for versioning. * * This is the only \b really special group name. * * \subsection ign_g_take Predefined group 2: "take" * * This group mostly specifies that no further matching is to be done, so * that later \ref ign_g_ignore "ignore" patterns are not tested. * * Basically the \c "take" group is an ordinary group like all others; it * is just predefined, and available with a * \ref ign_mod_t "short-hand notation". * * * \section ignpat_why Why should I ignore files? * * Ignore patterns are used to ignore certain directory * entries, where versioning makes no sense. If you're * versioning the complete installation of a machine, you wouldn't care to * store the contents of \c /proc (see man 5 proc), or possibly * because of security reasons you don't want \c /etc/shadow , \c * /etc/sshd/ssh_host_*key , and/or other password- or key-containing * files. * * Ignore patterns allow you to define which directory entries (files, * subdirectories, devices, symlinks etc.) should be taken, respectively * ignored. * * * \section ignpat_why_groups Why should I assign groups? * * The grouping patterns can be compared with the \c auto-props feature of * subversion; it allows automatically defining properties for new entries, * or ignoring them, depending on various criteria. * * For example you might want to use encryption for the files in your * users' \c .ssh directory, to secure them against unauthorized access in * the repository, and completely ignore the private key files: * * Grouping patterns: * \code * group:ignore,/home/§*§/.ssh/id* * group:encrypt,/home/§*§/.ssh/§** * \endcode * And the \c $FSVS_CONF/groups/encrypt file would have a definition for * the fsvs:commit-pipe (see the \ref s_p_n "special properties"). * * * \section ignpat_groupdef Syntax of group files * * A group definition file looks like this:
    *
  • Whitespace on the beginning and the end of the line is ignored. *
  • Empty lines, and lines with the first non-whitespace character being * \c '#' (comments) are ignored. *
  • It can have \b either the keywords \c ignore or \c take; if neither * is specified, the group \c ignore has \c ignore as default (surprise, * surprise!), and all others use \c take. *
  • An arbitrary (small) number of lines with the syntax\n * auto-prop property-name property-value can be * given; \e property-name may not include whitespace, as there's no * parsing of any quote characters yet. *
* * An example: * \code * # This is a comment * # This is another * * auto-props fsvs:commit-pipe gpg -er admin@my.net * * # End of definition * \endcode * * * \section groups_format Specification of groups and patterns * * While an ignore pattern just needs the pattern itself (in one of the * formats below), there are some modifiers that can be additionally * specified: * \code * [group:{name},][dir-only,][insens|nocase,][take,][mode:A:C,]pattern * \endcode * These are listed in the section \ref ign_mod below. * * * These kinds of ignore patterns are available: * * \section ign_shell Shell-like patterns * * These must start with ./, just like a base-directory-relative * path. * \c ? , \c * as well as character classes \c [a-z] have their usual * meaning, and \c ** is a wildcard for directory levels. * * You can use a backslash \c \\ outside of character classes to match * some common special characters literally, eg. \c \\* within a pattern * will match a literal asterisk character within a file or directory name. * Within character classes all characters except \c ] are treated * literally. If a literal \c ] should be included in a character class, * it can be placed as the first character or also be escaped using a * backslash. * * Example for \c / as the base-directory * \code * ./[oa]pt * ./sys * ./proc/§* * ./home/§**~ * \endcode * * This would ignore files and directories called \c apt or \c opt in the * root directory (and files below, in the case of a directory), the * directory \c /sys and everything below, the contents of \c /proc * (but take the directory itself, so that upon restore it gets created * as a mountpoint), and all entries matching \c *~ in and below * \c /home . * * \note The patterns are anchored at the beginning and the end. So a * pattern ./sys will match \b only a file or directory named \c * sys. If you want to exclude a directories' files, but not the directory * itself, use something like ./dir/§* or ./dir/§** * * If you're deep within your working copy and you'd like to ignore some * files with a WC-relative ignore pattern, you might like to use the * \ref rign "rel-ignore" command. * * * \subsection ignpat_shell_abs Absolute shell patterns * * There is another way to specify shell patterns - using absolute paths. * \n * The syntax is similar to normal shell patterns; but instead of the * ./ prefix the full path, starting with \c /, is used. * * \code * /etc/§**.dpkg-old * /etc/§**.dpkg-bak * /§**.bak * /§**~ * \endcode * * The advantage of using full paths is that a later \c dump and \c load in * another working copy (eg. when moving from versioning \c /etc to \c /) * does simply work; the patterns don't have to be modified. * * Internally this simply tries to remove the working copy base directory * at the start of the patterns (on loading); then they are processed as * usual. * * If a pattern does \b not match the wc base, and neither has the * wild-wildcard prefix /§**, a \ref warn_ign_abs_not_base * "warning" is issued. * * * * \section ignpat_pcre PCRE-patterns * * PCRE stands for Perl Compatible Regular Expressions; you can read about * them with man pcre (if the manpages are installed), and/or * perldoc perlre (if perldoc is installed). \n * If both fail for you, just google it. * * These patterns have the form PCRE:{pattern}, with \c PCRE in * uppercase. * * An example: * \code * PCRE:./home/.*~ * \endcode * This one achieves exactly the same as ./home/§**~ . * * Another example: * \code * PCRE:./home/[a-s] * \endcode * * This would match \c /home/anthony , \c /home/guest , \c /home/somebody * and so on, but would not match \c /home/theodore . * * One more: * \code * PCRE:./.*(\.(tmp|bak|sik|old|dpkg-\w+)|~)$ * \endcode * * Note that the pathnames start with \c ./ , just like above, and that the * patterns are anchored at the beginning. To additionally anchor at the * end you could use a $ at the end. * * * \section ign_dev Ignoring all files on a device * * Another form to discern what is needed and what not is possible with * DEVICE:[<|<=|>|>=]major[:minor]. * * This takes advantage of the major and minor device numbers of inodes * (see man 1 stat and man 2 stat). * * The rule is as follows: * - Directories have their parent matched against the given string * - All other entries have their own device matched. * * This is because mount-points (ie. directories where other * filesystems get attached) show the device of the mounted device, but * should be versioned (as they are needed after restore); all entries (and * all binding mounts) below should not. * * The possible options \c <= or \c >= define a less-or-equal-than * respective bigger-or-equal-than relationship, to ignore a set of device * classes. * * Examples: * \code * tDEVICE:3 * ./§* * \endcode * This patterns would define that all filesystems on IDE-devices (with * major number 3) are \e taken , and all other files are ignored. * * \code * DEVICE:0 * \endcode * This would ignore all filesystems with major number 0 - in linux these * are the \e virtual filesystems ( \c proc , \c sysfs , \c devpts , etc.; * see \c /proc/filesystems , the lines with \c nodev ). * * Mind NFS and smb-mounts, check if you're using \e md , \e lvm and/or * \e device-mapper ! * * * Note: The values are parsed with \c strtoul() , so you can use decimal, * hexadecimal (by prepending \c "0x", like \c "0x102") and octal (\c "0", * like \c "0777") notation. * * * \section ign_inode Ignoring a single file, by inode * * At last, another form to ignore entries is to specify them via the * device they are on and their inode: * \code * INODE:major:minor:inode * \endcode * This can be used if a file can be hardlinked to many places, but only * one copy should be stored. Then one path can be marked as to \e take , * and other instances can get ignored. * * \note That's probably a bad example. There should be a better mechanism * for handling hardlinks, but that needs some help from subversion. * * * \section ign_mod Modifiers * * All of these patterns can have one or more of these modifiers \b before * them, with (currently) optional \c "," as separators; not all * combinations make sense. * * For patterns with the \c m (mode match) or \c d (dironly) modifiers the * filename pattern gets optional; so you don't have to give an all-match * wildcard pattern (./§**) for these cases. * * * \subsection ign_mod_t "take": Take pattern * This modifier is just a short-hand for assigning the group \ref * ign_g_take "take". * * \subsection ign_mod_ignore "ignore": Ignore pattern * This modifier is just a short-hand for assigning the group * \ref ign_g_ignore "ignore". * * \subsection ign_mod_i "insens" or "nocase": Case insensitive * With this modifier you can force the match to be case-insensitive; this * can be useful if other machines use eg. \c samba to access files, and * you cannot be sure about them leaving \c ".BAK" or \c ".bak" behind. * * \subsection ign_mod_d "dironly": Match only directories * This is useful if you have a directory tree in which only certain files * should be taken; see below. * * \subsection ign_mod_m "mode": Match entries' mode * This expects a specification of two octal values in the form * m:and_value:compare_value, like m:04:00; * the bits set in \c and_value get isolated from the entries' mode, and * compared against \c compare_value. * * As an example: the file has mode \c 0750; a specification of
    *
  • m:0700:0700 matches, *
  • m:0700:0500 doesn't; and *
  • m:0007:0000 matches, but *
  • m:0007:0007 doesn't.
* * A real-world example: m:0007:0000 would match all entries that * have \b no right bits set for \e "others", and could be used to exclude * private files (like \c /etc/shadow). (Alternatively, the \e others-read * bit could be used: m:0004:0000. * * FSVS will reject invalid specifications, ie. when bits in \c * compare_value are set that are cleared in \c and_value: these patterns * can never match. \n * An example would be m:0700:0007. * * * \subsection ign_mod_examples Examples * * \code * take,dironly,./var/vmail/§** * take,./var/vmail/§**§/.*.sieve * ./var/vmail/§** * \endcode * This would take all \c ".*.sieve" files (or directories) below * \c /var/vmail, in all depths, and all directories there; but no other * files. * * If your files are at a certain depth, and you don't want all other * directories taken, too, you can specify that exactly: * \code * take,dironly,./var/vmail/§* * take,dironly,./var/vmail/§*§/§* * take,./var/vmail/§*§/§*§/.*.sieve * ./var/vmail/§** * \endcode * * \code * mode:04:0 * take,./etc/ * ./§** * \endcode * This would take all files from \c /etc, but ignoring the files that are * not world-readable (\c other-read bit cleared); this way only "public" * files would get taken. * */ /** \section dev_groups Groups * \ingroup dev * * Some thoughts about groups in FSVS. * * Groups have to be considered as follows:
    *
  • On commit the auto-props must be used *
  • if an entry was added manually, they should apply as usual *
  • unless they're overridden by \c prop-set or \c prop-del *
* * The easiest way seems to be to write the properties in the filesystem * when the entries are being stored in the entry list, ie. at \c add, \c * prop-set, \c prop-del or \c commit time. \n * The simplest way to do that would be in \ref waa__output_tree() - we see * that an entry is newly allocated, and push all (not already set) * properties there. \n * But that wouldn't work with the \c prop-del command, as an automatically * assigned property wouldn't get removed. * * So there's the function ops__apply_group(), which is called in the * appropriate places. * */ /** All groups, addressed by name. */ apr_hash_t *ign___groups=NULL; /** The length of the longest group name, used for formatting the status * output. * This is initialized to 6, because "ignore" at least takes that much * space - and "(none)" too. */ int ign__max_group_name_len=6; /* They are only pointers */ #define RESERVE_IGNORE_ENTRIES (4) /** Header definition - currently only number of entries. */ static const char ign_header_str[] = "%u", ign__group_take[]="take", ign__group_ign[]="ignore"; const char ign___parm_delimiter=','; /** For how many grouping patterns memory is allocated. */ int max_ignore_entries=0; /** How many grouping patterns are actually used. */ int used_ignore_entries=0; /** Allocated array of grouping patterns. */ static struct ignore_t *ignore_list=NULL; /** Place where the patterns are mmap()ed. */ static char *memory; /** The various strings that define the pattern types. * @{ */ static const char pcre_prefix[]="PCRE:", dev_prefix[]="DEVICE:", inode_prefix[]="INODE:", norm_prefix[]= { '.', PATH_SEPARATOR, 0 }, /* The second PATH_SEPARATOR is not needed. */ wildcard_prefix[]= { PATH_SEPARATOR, '*', '*', 0 }, /* Should that be "//" to make a clearer difference? */ abs_shell_prefix[]= { PATH_SEPARATOR, 0 }; /** @} */ /** Processes a character class in shell ignore patterns. * */ int ign___translate_bracketed_expr(char *end_of_buffer, char **src, char **dest) { int status = 0; int pos_in_bracket_expr = -1; // zero-based, -1 == outside int backslashed = 0; STOPIF(**src != '[', "invalid argument, **src does not point to " "start of bracket expression"); do { if (backslashed) { /* Escaped mode; blindly copy the next character. */ *((*dest)++) = *((*src)++); backslashed = 0; /* pos_in_bracket_expr has already been increased. */ } else if ( pos_in_bracket_expr == 0 && (**src == '!' || **src == '^') ) { *((*dest)++) = '^'; ++(*src); /* "!" or "^" at the start of a bracket expression (negation of the * bracket expression/character class) do not count as a regular * content element, so pos_in_bracket_expr is left alone. */ } else { if (**src == ']' && pos_in_bracket_expr > 0) { /* Bracket expression ends. Set "end of expression" marker and fall through to copy the closing bracket. */ pos_in_bracket_expr = -1; } else { /* Now we're at the next character position. */ ++pos_in_bracket_expr; } /* Enter escaped mode? */ backslashed = (**src == '\\'); *((*dest)++) = *((*src)++); } /* end_of_buffer points at character after the allocated destination * buffer space -- *end_of_buffer is invalid/undefined. * Here we just have to be careful to not overwrite the stack - the * real length check is in ign__compile_pattern(). */ STOPIF_CODE_ERR( end_of_buffer - *dest < 5, ENOSPC, "not enough space in buffer"); } while(**src && pos_in_bracket_expr >= 0); ex: return status; } /** Compiles the given pattern for use with \c PCRE. * */ int ign__compile_pattern(struct ignore_t *ignore) { const char *err; int offset; int len; char *buffer; char *src, *dest; int status; int backslashed; status=0; if (ignore->type == PT_PCRE) dest=ignore->compare_string; else if (ignore->type == PT_SHELL || ignore->type == PT_SHELL_ABS) { /* translate shell-like syntax into pcre */ len=strlen(ignore->compare_string)*5+16; STOPIF( hlp__alloc( &buffer, len), NULL); dest=buffer; src=ignore->compare_string; if (ignore->type == PT_SHELL_ABS) { /* Strip the wc-path away, and put a . in front. */ /* The pattern must * - match all characters of the wc path, or * - start with a wild-wildcard ('/ **') - it's valid everywhere. * * If it only has a single wildcard it's not allowed - it would have * different meanings depending on the wc base: * pattern: / * /dir/ * matches: /etc/x/dir/ for wc base /etc * or /x/dir " " " / * So we don't allow that. */ if (strncmp(src, wc_path, wc_path_len) == 0) { /* Special case for wc base = / */ src += 1+ (wc_path_len == 1 ? 0 : wc_path_len); } else if (strncmp(src, wildcard_prefix, strlen(wildcard_prefix)) == 0) { /* Has wildcard at start ... just consume the PATH_SEPARATOR, as * that's included in the norm_prefix. */ src++; } else STOPIF( wa__warn(WRN__IGNPAT_WCBASE, EINVAL, "The absolute shell pattern\n" " \"%s\"\n" "does neither have the working copy base path\n" " \"%s\"\n" "nor a wildcard path (like \"%s\") at the beginning;\n" "maybe you want a wc-relative pattern, " "starting with \"%s\"?", src, wc_path, wildcard_prefix, norm_prefix), NULL); /* Before: /etc/X11/? /etc/X11/? * wc_path: /etc / * After: ./X11/? ./etc/X11/? * */ /* As norm_prefix is const, the compile should remove the strlen() by * the value. */ strncpy(dest, norm_prefix, strlen(norm_prefix)); dest+=strlen(norm_prefix); } backslashed = 0; do { if (backslashed) { // escaped mode *(dest++) = *(src++); backslashed = 0; } else { switch(*src) { case '*': if (src[1] == '*') { if (dest[-1] == PATH_SEPARATOR && src[2] == PATH_SEPARATOR) { /* Case 1: "/§**§/xxx"; this gets transformed to * "/(.*§/)?", so that *no* directory level is possible, too. */ *(dest++) = '('; *(dest++) = '.'; *(dest++) = '*'; *(dest++) = PATH_SEPARATOR; *(dest++) = ')'; *(dest++) = '?'; /* Eat the two "*"s, and the PATH_SEPARATOR. */ src+=3; } else { /* Case 2: "/ ** xxx", without a PATH_SEPARATOR after the * "**". */ *(dest++) = '.'; *(dest++) = '*'; while (*src == '*') src++; } } else { /* one directory level */ *(dest++) = '['; *(dest++) = '^'; *(dest++) = PATH_SEPARATOR; *(dest++) = ']'; *(dest++) = '*'; src++; } break; case '?': *(dest++) = '.'; src++; break; case '[': // processed bracket expression and advanced src and dest pointers STOPIF(ign___translate_bracketed_expr(buffer + len, &src, &dest), "processing a bracket expression failed"); break; case '0' ... '9': case 'a' ... 'z': case 'A' ... 'Z': /* Note that here it's not a PATH_SEPARATOR, but the simple * character -- on Windows there'd be a \, which would trash the * regular expression! Although we'd have some of these problems on * Windows ...*/ case '/': case '-': *(dest++) = *(src++); break; case '\\': backslashed = 1; // enter escaped mode *(dest++) = *(src++); break; /* . and all other special characters { ( ] ) } + # " \ $ * get escaped. */ case '.': default: *(dest++) = '\\'; *(dest++) = *(src++); break; } } /* Ensure that there is sufficient space in the buffer to process the * next character. A "*" might create up to 5 characters in dest, the * directory matching patterns appended last will add up to five, and * we have a terminating '\0'. * Plus add a few. */ STOPIF_CODE_ERR( buffer+len - dest < 6+5+1+6, ENOSPC, "not enough space in buffer"); } while (*src); if (src != ignore->compare_string) { *(dest++) = '$'; // anchor regexp /* src has moved at least one char, so it's safe to reference [-1] */ if(src[-1] == PATH_SEPARATOR) { /* Ok, the glob pattern ends in a PATH_SEPARATOR, so our special * "ignore directory" handling kicks in. This results in "($|/)" at * the end. */ dest[-2] = '('; *(dest++) = '|'; *(dest++) = PATH_SEPARATOR; *(dest++) = ')'; } } *dest=0; /* return unused space */ STOPIF( hlp__realloc( &buffer, dest-buffer+2), NULL); ignore->compare_string=buffer; dest=buffer; } else /* pattern type */ { BUG("unknown pattern type %d", ignore->type); /* this one's for gcc */ dest=NULL; } DEBUGP("compiled \"%s\"", ignore->pattern); DEBUGP(" into \"%s\"", ignore->compare_string); /* compile */ ignore->compiled = pcre_compile(dest, PCRE_DOTALL | PCRE_NO_AUTO_CAPTURE | PCRE_UNGREEDY | PCRE_ANCHORED | (ignore->is_icase ? PCRE_CASELESS : 0), &err, &offset, NULL); STOPIF_CODE_ERR( !ignore->compiled, EINVAL, "pattern \"%s\" (from \"%s\") not valid; error %s at offset %d.", dest, ignore->pattern, err, offset); /* Patterns are used often - so it should be okay to study them. * Although it may not help much? * Performance testing! */ ignore->extra = pcre_study(ignore->compiled, 0, &err); STOPIF_CODE_ERR( err, EINVAL, "pattern \"%s\" not studied; error %s.", ignore->pattern, err); ex: return status; } static int data_seen; int have_now(struct ignore_t *ignore, int cur, char *err) { int status; status=0; STOPIF_CODE_ERR(data_seen & cur, EINVAL, "!The pattern \"%s\" includes more than a single %s specification.", ignore->pattern, err); data_seen |= cur; ex: return status; } /** Does all necessary steps to use the given \c ignore_t structure. * */ int ign___init_pattern_into(char *pattern, char *end, struct ignore_t *ignore) { int status, stop; int and_value, cmp_value, speclen; char *cp, *eo_word, *param, *eo_parm; int pattern_len; status=0; pattern_len=strlen(pattern); cp=pattern+pattern_len; if (!end || end>cp) end=cp; /* go over \n and other white space. These are not allowed * at the beginning of a pattern. */ while (isspace(*pattern)) { pattern++; STOPIF_CODE_ERR( pattern>=end, EINVAL, "pattern has no pattern"); } data_seen=0; /* gcc reports "used unitialized" - it doesn't see that the loop gets * terminated in the case speclen==0. */ eo_parm=NULL; /* This are the defaults: */ memset(ignore, 0, sizeof(*ignore)); ignore->pattern = pattern; while (*pattern) { eo_word=pattern; while (isalpha(*eo_word)) eo_word++; speclen=eo_word-pattern; /* The used codes are (sorted by first character): * Ignore types Other flags * device, dironly, * group, * inode, ignore, * insens (=nocase), * mode, * nocase, * pcre, * take, * "./" * "/" * * The order below reflects the relative importance for shortened * strings. */ /* For shell patterns we need not look for parameters; and a comparison * with 0 characters makes no sense anyway. */ if (speclen == 0) goto shell_pattern; if (*eo_word == ':') { /* Look for the end of this specification. */ param= eo_word + 1; } else param=NULL; eo_parm=strchr(eo_word, ign___parm_delimiter); if (!eo_parm) eo_parm=eo_word+strlen(eo_word); /* Now eo_parm points to the first non-parameter character - either ',' * or \0. */ if (strncmp(ign__group_take, pattern, speclen)==0) { STOPIF( have_now(ignore, HAVE_GROUP, "group"), NULL); ignore->group_name=ign__group_take; } else if (strncmp(ign__group_ign, pattern, speclen)==0) { STOPIF( have_now(ignore, HAVE_GROUP, "group"), NULL); ignore->group_name=ign__group_ign; } else if (strncmp("group:", pattern, speclen)==0) { STOPIF( have_now(ignore, HAVE_GROUP, "group"), NULL); STOPIF_CODE_ERR( !param || eo_parm==param, EINVAL, "!Missing group name in pattern \"%s\".", ignore->pattern); speclen=eo_parm-param; STOPIF( hlp__strnalloc( speclen, (char**)&ignore->group_name, param), NULL); if (speclen > ign__max_group_name_len) ign__max_group_name_len=speclen; /* Test for valid characters. */ while (param != eo_parm) { STOPIF_CODE_ERR( !isalnum(*param), EINVAL, "!The group name may (currently) " "only use alphanumeric characters;\n" "so \"%s\" is invalid.", ignore->pattern); param++; } } else if (strncmp("dironly", pattern, speclen)==0) { ignore->dir_only=1; STOPIF( have_now(ignore, HAVE_DIR, "dironly"), NULL); data_seen |= HAVE_PATTERN_SUBST; } else if (strncmp("nocase", pattern, speclen)==0 || strncmp("insens", pattern, speclen)==0) { ignore->is_icase=1; STOPIF( have_now(ignore, HAVE_CASE, "case ignore"), NULL); } else if (strncmp("mode:", pattern, speclen)==0) { STOPIF( have_now(ignore, HAVE_MODE, "mode"), NULL); STOPIF_CODE_ERR( !param, EINVAL, "!Invalid mode specification in \"%s\".", ignore->pattern); STOPIF_CODE_ERR( sscanf(param, "%o:%o%n", &and_value, &cmp_value, &stop) != 2, EINVAL, "!Ignore pattern \"%s\" has a bad mode specification;\n" "the expected syntax is \"mode::\".", ignore->pattern); STOPIF_CODE_ERR( param+stop != eo_parm, EINVAL, "!Garbage after mode specification in \"%s\".", ignore->pattern); STOPIF_CODE_ERR( and_value>07777 || cmp_value>07777 || (cmp_value & ~and_value), EINVAL, "!Mode matching specification in \"%s\" has invalid numbers.", ignore->pattern); ignore->mode_match_and=and_value; ignore->mode_match_cmp=cmp_value; data_seen |= HAVE_PATTERN_SUBST; stop=0; } /* The following branches cause the loop to terminate, as there's no * delimiter character defined *within* patterns. * (Eg. a PCRE can use *any* character). */ else if (strncmp(dev_prefix, pattern, strlen(dev_prefix)) == 0) { ignore->type=PT_DEVICE; ignore->compare_string = pattern; ignore->compare = PAT_DEV__UNSPECIFIED; pattern+=strlen(dev_prefix); stop=0; while (!stop) { switch (*pattern) { case '<': ignore->compare |= PAT_DEV__LESS; break; case '=': ignore->compare |= PAT_DEV__EQUAL; break; case '>': ignore->compare |= PAT_DEV__GREATER; break; default: stop=1; break; } if (!stop) pattern++; } if (ignore->compare == PAT_DEV__UNSPECIFIED) ignore->compare = PAT_DEV__EQUAL; ignore->major=strtoul(pattern, &cp, 0); DEBUGP("device pattern: major=%d, left=%s", ignore->major, cp); STOPIF_CODE_ERR( cp == pattern, EINVAL, "!No major number found in \"%s\"", ignore->pattern); /* we expect a : here */ if (*cp) { STOPIF_CODE_ERR( *(cp++) != ':', EINVAL, "!Expected ':' between major and minor number in %s", ignore->pattern); pattern=cp; ignore->minor=strtoul(pattern, &cp, 0); STOPIF_CODE_ERR( cp == pattern, EINVAL, "!No minor number in \"%s\"", ignore->pattern); STOPIF_CODE_ERR( *cp, EINVAL, "!Garbage after minor number in \"%s\"", ignore->pattern); ignore->has_minor=1; } else { ignore->minor=PAT_DEV__UNSPECIFIED; ignore->has_minor=0; } status=0; data_seen |= HAVE_PATTERN; } else if (strncmp(inode_prefix, pattern, strlen(inode_prefix)) == 0) { #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else int mj, mn; ignore->type=PT_INODE; ignore->compare_string = pattern; pattern+=strlen(inode_prefix); mj=strtoul(pattern, &cp, 0); STOPIF_CODE_ERR( cp == pattern || *(cp++) != ':', EINVAL, "!No major number in %s?", ignore->pattern); pattern=cp; mn=strtoul(pattern, &cp, 0); STOPIF_CODE_ERR( cp == pattern || *(cp++) != ':', EINVAL, "!No minor number in %s?", ignore->pattern); ignore->dev=MKDEV(mj, mn); pattern=cp; ignore->inode=strtoull(pattern, &cp, 0); STOPIF_CODE_ERR( cp == pattern || *cp!= 0, EINVAL, "!Garbage after inode in %s?", ignore->pattern); #endif status=0; data_seen |= HAVE_PATTERN; } else { shell_pattern: if (strncmp(pattern, norm_prefix, strlen(norm_prefix)) == 0) { ignore->type=PT_SHELL; DEBUGP("shell pattern matching"); /* DON'T pattern+=strlen(norm_prefix) - it's needed for matching ! */ } else if (strncmp(pattern, abs_shell_prefix, strlen(abs_shell_prefix)) == 0) { ignore->type=PT_SHELL_ABS; DEBUGP("absolute shell pattern matching"); } else if (strncmp(pcre_prefix, pattern, strlen(pcre_prefix)) == 0) { ignore->type=PT_PCRE; pattern += strlen(pcre_prefix); DEBUGP("pcre matching"); } else STOPIF_CODE_ERR(1, EINVAL, "!Expected a shell pattern, starting with \"%s\" or \"%s\"!", norm_prefix, abs_shell_prefix); STOPIF_CODE_ERR( strlen(pattern)<3, EINVAL, "!Pattern \"%s\" too short!", ignore->pattern); ignore->compare_string = pattern; status=ign__compile_pattern(ignore); STOPIF(status, "compile returned an error"); data_seen |= HAVE_PATTERN; } /* If we got what we want ... */ if (data_seen & HAVE_PATTERN) break; /* Else do the next part of the string. */ pattern=eo_parm; /* Go beyond the delimiter. */ while (*pattern == ign___parm_delimiter) pattern++; DEBUGP("now at %d == %p; end=%p", *pattern, pattern, end); STOPIF_CODE_ERR( pattern>end || (pattern == end && *end!=0), EINVAL, "pattern not \\0-terminated"); } /* Don't know if it makes *really* sense to allow a dironly pattern * without pattern - but there's no reason to deny it outright. */ STOPIF_CODE_ERR(!(data_seen & (HAVE_PATTERN | HAVE_PATTERN_SUBST)), EINVAL, "!Pattern \"%s\" ends prematurely", ignore->pattern); /* If we're in the "ignore" command, and *no* group was given, assign a * default. * If an empty string was given, put always an error. */ /* COMPATIBILITY MODE FOR 1.1.18: always put a groupname there, if * necessary. * Needed so that the old ignore patterns can be read from the ignore * lists. */ // if (!ignore->group_name && (action->i_val & HAVE_GROUP)) if (!ignore->group_name) { ignore->group_name=ign__group_ign; eo_word="group:"; /* gcc optimizes that nicely. */ STOPIF( hlp__strmnalloc( strlen(eo_word) + strlen(ign__group_ign) + 1 + pattern_len + 1, &ignore->pattern, eo_word, ign__group_ign, ",", ignore->pattern, NULL), NULL); } STOPIF_CODE_ERR( !ignore->group_name || !*ignore->group_name, EINVAL, "!No group name given in \"%s\".", ignore->pattern); DEBUGP("pattern: %scase, group \"%s\", %s, mode&0%o==0%o", ignore->is_icase ? "I" : "", ignore->group_name, ignore->dir_only ? "dironly" : "all entries", ignore->mode_match_and, ignore->mode_match_cmp); if (!*pattern) { /* Degenerate case of shell pattern without pattern; allowed in certain * cases. */ ignore->type=PT_SHELL; } ex: return status; } /** -. * */ int ign__load_list(char *dir) { int status, fh, l; struct stat st; char *cp,*cp2; int count; fh=-1; status=waa__open_byext(dir, WAA__IGNORE_EXT, WAA__READ, &fh); if (status == ENOENT) { DEBUGP("no ignore list found"); status=0; goto ex; } else STOPIF_CODE_ERR(status, status, "reading ignore list"); STOPIF_CODE_ERR( fstat(fh, &st), errno, NULL); memory=mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, fh, 0); /* If there's an error, return it. * Always close the file. Check close() return code afterwards. */ status=errno; l=close(fh); STOPIF_CODE_ERR( memory == MAP_FAILED, status, "mmap failed"); STOPIF_CODE_ERR( l, errno, "close() failed"); /* make header \0 terminated */ cp=memchr(memory, '\n', st.st_size); if (!cp) { /* This means no entries. * Maybe we should check? */ DEBUGP("Grouping list header is invalid."); status=0; goto ex; } status=sscanf(memory, ign_header_str, &count); STOPIF_CODE_ERR( status != 1, EINVAL, "grouping header is invalid"); cp++; STOPIF( ign__new_pattern(count, NULL, NULL, 0, 0), NULL ); /* fill the list */ cp2=memory+st.st_size; for(l=0; l= cp2) break; } if (l != count) DEBUGP("Ignore-list defect - header count (%u) bigger than actual number" "of patterns (%u)", count, l); if (cp >= cp2) DEBUGP("Ignore-list defect - garbage after counted patterns"); l=used_ignore_entries; status=0; ex: /* to make sure no bad things happen */ if (status) used_ignore_entries=0; return status; } /** Compares the given \c sstat_t \a st with the \b device ignore pattern * \a ign. * Does the less-than, greater-than and/or equal comparision. * */ inline int ign___compare_dev(struct sstat_t *st, struct ignore_t *ign) { #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else int mj, mn; mj=(int)MAJOR(st->dev); mn=(int)MINOR(st->dev); if (mj > ign->major) return +2; if (mj < ign->major) return -2; if (!ign->has_minor) return 0; if (mn > ign->minor) return +1; if (mn < ign->minor) return -1; #endif return 0; } int ign___new_group(struct ignore_t *ign, struct grouping_t **result) { int status; int gn_len; struct grouping_t *group; status=0; DEBUGP("making group %s", ign->group_name); gn_len=strlen(ign->group_name); if (ign___groups) group=apr_hash_get(ign___groups, ign->group_name, gn_len); else { ign___groups=apr_hash_make(global_pool); group=NULL; } if (group) { /* Already loaded by another pattern. */ } else { STOPIF( hlp__calloc(&group, 1, sizeof(*group)), NULL); apr_hash_set(ign___groups, ign->group_name, gn_len, group); } *result=group; ign->group_def=group; ex: return status; } /** Loads the grouping definitions, and stores them via a \ref grouping_t. **/ int ign___load_group(struct ignore_t *ign) { int status; struct grouping_t *group; char *copy, *fn, *eos, *conf_start, *input; FILE *g_in; int is_ok, gn_len; static const char ps[]= { PATH_SEPARATOR, 0 }; char *cause; svn_string_t *str; BUG_ON(ign->group_def, "already loaded"); status=0; copy=NULL; g_in=NULL; gn_len=strlen(ign->group_name); STOPIF( ign___new_group(ign, &group), NULL); /* Initialize default values. */ if (strcmp(ign->group_name, ign__group_take) == 0) is_ok=1; else if (strcmp(ign->group_name, ign__group_ign) == 0) is_ok=2; else is_ok=0; /* waa__open() could be used for the WC-specific path; but we couldn't * easily go back to the common directory. * So we just compute the path, and move the specific parts for the * second try. */ STOPIF( waa__get_waa_directory( wc_path, &fn, &eos, &conf_start, GWD_CONF), NULL); /* We have to use a new allocation, because the group name can be * (nearly) arbitrarily long. */ STOPIF( hlp__strmnalloc(waa_tmp_path_len + strlen(CONFIGDIR_GROUP) + 1 + gn_len + 1, ©, fn, CONFIGDIR_GROUP, ps, ign->group_name, NULL), NULL); DEBUGP("try specific group: %s", copy); g_in=fopen(copy, "rt"); if (!g_in) { STOPIF_CODE_ERR(errno != ENOENT, errno, "!Cannot read group definition \"%s\"", copy); /* This range is overlapping: * /etc/fsvs/XXXXXXXXXXXX...XXXXXX/groups/ * ^fn ^conf_start ^eos * gets * /etc/fsvs/groups/ **/ memmove(copy + (conf_start-fn), copy + (eos-fn), strlen(CONFIGDIR_GROUP) + 1 + gn_len + 1); /* ==strlen(eos)+1 */ DEBUGP("try for common: %s", copy); g_in=fopen(copy, "rt"); STOPIF_CODE_ERR(!g_in && errno != ENOENT, errno, "!Cannot read group definition \"%s\"", copy); } DEBUGP("Got filename %s", copy); if (!g_in) { STOPIF_CODE_ERR(!is_ok, ENOENT, "!Group definition for \"%s\" not found;\n" "used in pattern \"%s\".", ign->group_name, ign->pattern); /* Else it's a default name, and we can just use the defaults. */ goto defaults; } hlp__string_from_filep(NULL, NULL, NULL, SFF_RESET_LINENUM); while (1) { status=hlp__string_from_filep(g_in, &input, NULL, SFF_WHITESPACE | SFF_COMMENT); if (status == EOF) break; STOPIF(status, "reading group file %s", copy); conf_start=input; DEBUGP("parsing %s", conf_start); eos=hlp__get_word(conf_start, &conf_start); if (*eos) *(eos++)=0; else eos=NULL; if (strcmp(conf_start, "take") == 0) { group->is_take=1; continue; } else if (strcmp(conf_start, "ignore") == 0) { group->is_ignore=1; continue; } else if (strcmp(conf_start, "auto-prop") == 0) { cause="no property name"; if (!eos) goto invalid; cause="no whitespace after name"; if (sscanf(eos, "%s%n", input, &gn_len) != 1) goto invalid; eos=hlp__skip_ws(eos+gn_len); DEBUGP("Got property name=%s, value=%s", input, eos); cause="no property value"; if (!*input || !*eos) goto invalid; if (!group->auto_props) group->auto_props=apr_hash_make(global_pool); STOPIF( hlp__strdup( &fn, eos), NULL); gn_len=strlen(input); STOPIF( hlp__strnalloc( gn_len, &eos, input), NULL); /* We could just store the (char*), too; but prp__set_from_aprhash() * takes the values to be of the kind svn_string_t. */ str=svn_string_create(fn, global_pool); apr_hash_set(group->auto_props, eos, gn_len, str); } else { cause="invalid keyword"; invalid: STOPIF( EINVAL, "!Cannot parse line #%d in file \"%s\" (%s).", hlp__string_from_filep(NULL, NULL, NULL, SFF_GET_LINENUM), copy, cause); } } defaults: status=0; STOPIF_CODE_ERR( group->is_ignore && group->is_take, EINVAL, "Either \"take\" or \"ignore\" must be given, in \"%s\".", copy); if (!group->is_ignore && !group->is_take) { if (is_ok == 2) group->is_ignore=1; else group->is_take=1; } DEBUGP("group has %sauto-props; ign=%d, take=%d, url=%s", group->auto_props ? "" : "no ", group->is_ignore, group->is_take, group->url ? group->url->url : "(default)"); ex: IF_FREE(copy); if (g_in) fclose(g_in); return status; } /** -. * * Searches this entry for a take/ignore pattern. * * If a parent directory has an ignore entry which might be valid * for this directory (like **§/§*~), it is mentioned in this * directory, too - in case of something like dir/a*§/b*§/§* * a path level value is given. * * As we need to preserve the _order_ of the ignore/take statements, * we cannot easily optimize. * is_ignored is set to +1 if ignored, 0 if unknown, and -1 if * on a take-list (overriding later ignore list). * * \a sts must already have the correct estat::st.mode bits set. */ int ign__is_ignore(struct estat *sts, int *is_ignored) { struct estat *dir; int status, namelen UNUSED, len, i, path_len UNUSED; char *path UNUSED, *cp; struct ignore_t **ign_list UNUSED; struct ignore_t *ign; struct sstat_t *st; struct estat sts_cmp; *is_ignored=0; status=0; dir=sts->parent; /* root directory won't be ignored */ if (!dir) goto ex; if (sts->to_be_ignored) { *is_ignored=1; goto ex; } /* TODO - see ign__set_ignorelist() */ /* currently all entries are checked against the full ignore list - * not good performance-wise! */ STOPIF( ops__build_path(&cp, sts), NULL); DEBUGP("testing %s for being ignored", cp); len=strlen(cp); for(i=0; igroup_def) STOPIF( ign___load_group(ign), NULL); ign->stats_tested++; if (ign->type == PT_SHELL || ign->type == PT_PCRE || ign->type == PT_SHELL_ABS) { DEBUGP("matching %s(0%o) against \"%s\" " "(dir_only=%d; and=0%o, cmp=0%o)", cp, sts->st.mode, ign->pattern, ign->dir_only, ign->mode_match_and, ign->mode_match_cmp); if (ign->dir_only && !S_ISDIR(sts->st.mode)) { status=PCRE_ERROR_NOMATCH; } else if (ign->mode_match_and && ((sts->st.mode & ign->mode_match_and) != ign->mode_match_cmp)) { status=PCRE_ERROR_NOMATCH; } else if (ign->compiled) { status=pcre_exec(ign->compiled, ign->extra, cp, len, 0, 0, NULL, 0); STOPIF_CODE_ERR( status && status != PCRE_ERROR_NOMATCH, status, "cannot match pattern %s on data %s", ign->pattern, cp); } } else if (ign->type == PT_DEVICE) { /* device compare */ st=(S_ISDIR(sts->st.mode)) ? &(dir->st) : &(sts->st); switch (ign->compare) { case PAT_DEV__LESS: status= ign___compare_dev(st, ign) < 0; break; case PAT_DEV__LESS | PAT_DEV__EQUAL: status= ign___compare_dev(st, ign) <= 0; break; case PAT_DEV__EQUAL: status= ign___compare_dev(st, ign) == 0; break; case PAT_DEV__EQUAL | PAT_DEV__GREATER: status= ign___compare_dev(st, ign) >= 0; break; case PAT_DEV__GREATER: status= ign___compare_dev(st, ign) > 0; break; } /* status = 0 if *matches* ! */ status = !status; DEBUGP("device compare pattern status=%d", status); } else if (ign->type == PT_INODE) { sts_cmp.st.dev=ign->dev; sts_cmp.st.ino=ign->inode; status = dir___f_sort_by_inodePP(&sts_cmp, sts) != 0; DEBUGP("inode compare %llX:%llu status=%d", (t_ull)ign->dev, (t_ull)ign->inode, status); } else BUG("unknown pattern type 0x%X", ign->type); /* here status == 0 means pattern matches */ if (status == 0) { ign->stats_matches++; *is_ignored = ign->group_def->is_ignore ? +1 : -1; sts->match_pattern=ign; DEBUGP("pattern found - result %d", *is_ignored); goto ex; } } /* no match, no error */ status=0; ex: return status; } /** Writes the ignore list back to disk storage. * */ int ign__save_ignorelist(char *basedir) { int status, fh, l, i; struct ignore_t *ign; char buffer[HEADER_LEN]; DEBUGP("saving ignore list: have %d", used_ignore_entries); fh=-1; if (!basedir) basedir=wc_path; if (used_ignore_entries==0) { STOPIF( waa__delete_byext(basedir, WAA__IGNORE_EXT, 1), NULL); goto ex; } STOPIF( waa__open_byext(basedir, WAA__IGNORE_EXT, WAA__WRITE, &fh), NULL); /* do header */ for(i=l=0; i= sizeof(buffer)-1, ENOSPC, "can't prepare header to write; buffer too small"); strcat(buffer, "\n"); l=strlen(buffer); status=write(fh, buffer, l); STOPIF_CODE_ERR( status != l, errno, "error writing header"); /* write data */ ign=ignore_list; for(i=0; ipattern)+1; status=write(fh, ign->pattern, l); STOPIF_CODE_ERR( status != l, errno, "error writing data"); status=write(fh, "\n", 1); STOPIF_CODE_ERR( status != 1, errno, "error writing newline"); } ign++; } status=0; ex: if (fh!=-1) { l=waa__close(fh, status); fh=-1; STOPIF(l, "error closing ignore data"); } return status; } int ign__new_pattern(unsigned count, char *pattern[], char *ends, int user_pattern, int position) { int status; unsigned i; struct ignore_t *ign; status=0; DEBUGP("getting %d new entries - max is %d, used are %d", count, max_ignore_entries, used_ignore_entries); if (used_ignore_entries+count >= max_ignore_entries) { max_ignore_entries = used_ignore_entries+count+RESERVE_IGNORE_ENTRIES; STOPIF( hlp__realloc( &ignore_list, sizeof(*ignore_list) * max_ignore_entries), NULL); } /* If we're being called without patterns, we should just reserve * the space in a piece. */ if (!pattern) goto ex; /* Per default new ignore patterns are appended. */ if (position != PATTERN_POSITION_END && used_ignore_entries>0) { /* This would be more efficient with a list of pointers. * But it happens only on explicit user request, and is therefore * very infrequent. */ /* This code currently assumes that all fsvs-system-patterns are * at the front of the list. The only use is currently in waa__init(), * and so that should be ok. */ /* If we assume that "inserting" patterns happen only when we don't * do anything but read, insert, write, we could even put the new * patterns in front. * On writing only the user-patterns would be written, and so on the next * load the order would be ok. */ /* Find the first user pattern, and move from there. */ for(i=0; i used_ignore_entries || position<0); status=0; for(i=0; iis_user_pat=user_pattern; pattern++; } used_ignore_entries+=count; ex: return status; } /** Parses the optional position specification. * */ int ign___parse_position(char *arg, int *position, int *advance) { int status; int i; status=0; *advance=0; /* Normal pattern inclusion. May have a position specification here. */ *position=PATTERN_POSITION_END; if (strcmp(arg, "prepend") == 0) { *advance=1; *position=PATTERN_POSITION_START; } else if (sscanf(arg, "at=%d", &i) == 1) { *advance=1; STOPIF_CODE_ERR(i > used_ignore_entries, EINVAL, "The position %d where the pattern " "should be inserted is invalid.\n", i); *position=i; } else if (strcmp(arg, "append") == 0) { /* Default */ *advance=1; } ex: return status; } int ign___test_single_pattern(struct estat *sts) { int status; char *path; status=0; BUG_ON(!(sts->entry_status & FS_NEW)); if (sts->match_pattern) { STOPIF( ops__build_path(&path, sts), NULL); if (opt__is_verbose() >= 0) STOPIF_CODE_EPIPE( printf("%s\n", path), NULL); } ex: return status; } int ign___test_all_patterns(struct estat *sts) { int status; char *path; struct ignore_t *ign; status=0; BUG_ON(!(sts->entry_status & FS_NEW)); STOPIF( ops__build_path(&path, sts), NULL); ign=sts->match_pattern; if (opt__is_verbose() >= 0) STOPIF_CODE_EPIPE( opt__is_verbose()>0 ? printf("%s\t%s\t%s\n", ign ? ign->group_name : "(none)", ign ? ign->pattern : "(none)", path) : printf("%s\t%s\n", ign ? ign->group_name : "(none)", path), NULL); ex: return status; return 0; } /** -. */ int ign__print_group_stats(FILE *output) { int status; int i; struct ignore_t *ign; STOPIF_CODE_EPIPE( fprintf(output, "\nGrouping statistics (" "tested, matched, groupname, pattern):\n\n"), NULL); for(i=0; iis_user_pat || opt__is_verbose()>0) { STOPIF_CODE_EPIPE( fprintf(output, "%u\t%u\t%s\t%s\n", ign->stats_tested, ign->stats_matches, ign->group_name, ign->pattern), NULL); } } ex: return status; } /** -. * This is called to append new ignore patterns. **/ int ign__work(struct estat *root UNUSED, int argc, char *argv[]) { int status; int position, i; char *cp, *copy; char *arg[2]; struct grouping_t *group; status=0; /* A STOPIF_CODE_ERR( argc==0, 0, ...) is possible, but not very nice - * the message is not really user-friendly. */ if (argc==0) ac__Usage_this(); /* Now we can be sure to have at least 1 argument. */ /* Goto correct base. */ status=waa__find_common_base(0, NULL, NULL); if (status == ENOENT) STOPIF(EINVAL, "!No working copy base was found."); STOPIF(status, NULL); DEBUGP("first argument is %s", argv[0]); status=0; if (strcmp(argv[0], parm_test) == 0) { argv++; argc--; if (argc>0) { STOPIF( ign___parse_position(argv[0], &position, &i), NULL); argv+=i; argc-=i; /* Even though we might have been called with "groups" instead of * "ignore", we just assume the "ignore" group, so that testing is * easier. */ action->i_val |= HAVE_GROUP; STOPIF( ign__new_pattern(argc, argv, NULL, 1, position), NULL); action->local_callback=ign___test_single_pattern; } else { STOPIF( ign__load_list(NULL), NULL); action->local_callback=ign___test_all_patterns; } opt__set_int(OPT__FILTER, PRIO_MUSTHAVE, FS_NEW); /* The entries would be filtered, and not even given to the output * function, so we have to fake the ignore groups into take groups. */ for(i=0; iis_ignore=0; ignore_list[i].group_def->is_take=1; } /* We have to load the URLs. */ STOPIF( url__load_list(NULL, 0), NULL); /* We fake the start path as (relative) argument; if it's the WC base, * we use ".". */ if (start_path_len == wc_path_len) arg[0]="."; else arg[0]=start_path+wc_path_len+1; arg[1]=NULL; STOPIF( waa__read_or_build_tree(root, 1, arg, arg, NULL, 0), NULL); if (opt__get_int(OPT__GROUP_STATS)) STOPIF( ign__print_group_stats(stdout), NULL); /* We must not store the list! */ goto dont_store; } else if (strcmp(argv[0], parm_load) == 0) { i=0; while (1) { status=hlp__string_from_filep(stdin, &cp, NULL, SFF_WHITESPACE); if (status == EOF) break; STOPIF(status, NULL); STOPIF( hlp__strdup( ©, cp), NULL); STOPIF( ign__new_pattern(1, ©, NULL, 1, PATTERN_POSITION_END), NULL); i++; } if (opt__is_verbose() >= 0) printf("%d pattern%s loaded.\n", i, i==1 ? "" : "s"); } else { /* We edit or dump the list, so read what we have. */ STOPIF( ign__load_list(NULL), NULL); if (strcmp(argv[0], parm_dump) == 0) { /* Dump only user-patterns. */ for(i=position=0; i < used_ignore_entries; i++, position++) if (ignore_list[i].is_user_pat) { if (opt__is_verbose() > 0) printf("%3d: ", position); printf("%s\n", ignore_list[i].pattern); } /* No need to save. */ goto dont_store; } else { STOPIF( ign___parse_position(argv[0], &position, &i), NULL); argv+=i; argc-=i; STOPIF( ign__new_pattern(argc, argv, NULL, 1, position), NULL); } } /* not "fsvs load" */ STOPIF( ign__save_ignorelist(NULL), NULL); dont_store: ex: return status; } /** -. * Relativizes the given paths, and stores them. **/ int ign__rign(struct estat *root UNUSED, int argc, char *argv[]) { int status; int i, position; char **normalized; status=0; if (argc==0) ac__Usage_this(); /* Position given? */ STOPIF( ign___parse_position(argv[0], &position, &i), NULL); argv+=i; argc-=i; /* Goto correct base. */ status=waa__find_common_base2(argc, argv, &normalized, FCB__PUT_DOTSLASH | FCB__NO_REALPATH); if (status == ENOENT) STOPIF(EINVAL, "!No working copy base was found."); STOPIF(status, NULL); /* Load, insert, save. */ STOPIF( ign__load_list(NULL), NULL); STOPIF( ign__new_pattern(argc, normalized, NULL, 1, position), NULL); STOPIF( ign__save_ignorelist(NULL), NULL); ex: return status; } fsvs-1.2.6/src/preproc.h0000644000202400020240000000347311145025433014066 0ustar marekmarek/************************************************************************ * Copyright (C) 2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __PREPROC_H__ #define __PREPROC_H__ /** \file * Preprocessor macros for global use. * */ #include #include /** Macros for counting the bits set. * The interested party is referred to "Numerical Recipes". * @{ */ #define _BITCOUNTx(x, m, s) ( ((x) & m) + (((x) & (m << s)) >> s) ) #define _BITCOUNT6(x) (_BITCOUNTx( (x), 0x5555555555555555ULL, 1)) #define _BITCOUNT5(x) (_BITCOUNTx(_BITCOUNT6(x), 0x3333333333333333ULL, 2)) #define _BITCOUNT4(x) (_BITCOUNTx(_BITCOUNT5(x), 0x0f0f0f0f0f0f0f0fULL, 4)) #define _BITCOUNT3(x) (_BITCOUNTx(_BITCOUNT4(x), 0x00ff00ff00ff00ffULL, 8)) #define _BITCOUNT2(x) (_BITCOUNTx(_BITCOUNT3(x), 0x0000ffff0000ffffULL, 16)) #define _BITCOUNT1(x) (_BITCOUNTx(_BITCOUNT2(x), 0x00000000ffffffffULL, 32)) #define _BITCOUNT(x) ( (int)_BITCOUNT1(x) ) /** @} */ /** How many bits a \c mode_t must be shifted to get the packed * representation. * */ #define MODE_T_SHIFT_BITS (_BITCOUNT(S_IFMT ^ (S_IFMT-1)) -1) /** The number of bits needed for storage. */ #define PACKED_MODE_T_NEEDED_BITS (_BITCOUNT(S_IFMT)) /** How to convert from \c mode_t to the packed representation (in struct * \ref estat) and back. * @{ */ #define MODE_T_to_PACKED(mode) ((mode) >> MODE_T_SHIFT_BITS) #define PACKED_to_MODE_T(p) ((p) << MODE_T_SHIFT_BITS) /** @} */ /** Simplification for testing packed modes. * Used with S_ISDIR etc. */ #define TEST_PACKED(test, val) test(PACKED_to_MODE_T(val)) #endif fsvs-1.2.6/src/config.h.in0000644000202400020240000001034012143163355014262 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CONFIG_H__ #define __CONFIG_H__ /** \file * \c Autoconf results storage. */ /** \defgroup compat Compatibility and interfaces * * For a reference on how to use FSVS on older system, please see also * \ref howto_chroot. * */ /** \defgroup compati Compilation-only * \ingroup compat * */ /** \defgroup autoconf Autoconf results. * \ingroup compati * * Here are the results of \c configure stored. * They get defined by the system and are used to tell FSVS whether * optional parts can be activated or not. */ /** @{ */ /** Whether the valgrind headers were found. * Then some initializers can specifically mark areas as initialized. */ #undef HAVE_VALGRIND /** If this is defined, some re-arrangements in struct-layout are made, * and additional bug-checking code may be included. */ #undef ENABLE_DEBUG /** Whether gcov test-coverage is wanted. */ #undef ENABLE_GCOV /** If set to 1, disable debug messages. */ #undef ENABLE_RELEASE /** How many characters of the MD5(wc_path) are used to distinguish the WAA * paths. */ #undef WAA_WC_MD5_CHARS #if WAA_WC_MD5_CHARS >=0 && WAA_WC_MD5_CHARS <=32 /* nothing, ok. */ #else #error "WAA_WC_MD5_CHARS invalid." #endif /** OpenBSD has no locales support. */ #undef HAVE_LOCALES /** Unsigned 32bit type. * The value of \c AC_CV_C_UINT32_T changed between autoconf 2.59e and 2.60. * Since 2.60 we get \c yes instead of the type. * And there's no \c HAVE_UINT32_T ... * I don't seem to get that to work properly. * So I changed configure.in to substitute \c yes to \c uint32_t. */ #undef HAVE_UINT32_T /* #if HAVE_UINT32_T #include #include #endif */ #undef AC_CV_C_UINT32_T /** Whether \c linux/types.h was found. */ #undef HAVE_LINUX_TYPES_H /** Whether \c linux/unistd.h was found. */ #undef HAVE_LINUX_UNISTD_H /** Whether \c dirfd() was found (\ref dir__get_dir_size()). */ #undef HAVE_DIRFD /** Whether there's an additional microsecond field in struct stat. */ #undef HAVE_STRUCT_STAT_ST_MTIM /** The chroot jail path given at configure time. */ #undef CHROOTER_JAIL #undef NEED_ENVIRON_EXTERN /** Comparision function definition (for \c qsort()) */ #undef HAVE_COMPARISON_FN_T #ifndef HAVE_COMPARISON_FN_T typedef int (*comparison_fn_t) (__const void *, __const void *); #endif #undef HAVE_O_DIRECTORY #ifndef HAVE_O_DIRECTORY #define O_DIRECTORY (0) #endif /** Does \c linux/kdev_t.h exist? * Needed for \a MAJOR() and \a MINOR() macros. */ #undef HAVE_LINUX_KDEV_T_H /** Should we fake definitions? */ #undef ENABLE_DEV_FAKE /** Error macro if no device definitions available. */ #undef DEVICE_NODES_DISABLED #ifdef HAVE_LINUX_KDEV_T_H #include #else #ifdef ENABLE_DEV_FAKE /** \name fakedev Fake definitions, as reported with configure. * Taken from \c linux/kdev_t.h. */ /** @{ */ #define MAJOR(dev) ((dev)>>8) #define MINOR(dev) ((dev) & 0xff) #define MKDEV(ma,mi) ((ma)<<8 | (mi)) /** @} */ #else /** No definitions, disable some code. */ #define DEVICE_NODES_DISABLED() BUG("No MAJOR(), MINOR() or MKDEV() found at configure time.") #undef MAJOR #undef MINOR #undef MKDEV #endif #endif /** @} */ /** i386 has the attribute fastcall; this is used for a few * small functions. */ #undef HAVE_FASTCALL #ifdef HAVE_FASTCALL #define FASTCALL __attribute__((fastcall)) #else #define FASTCALL #endif /** Changing owner/group for symlinks possible? */ #undef HAVE_LCHOWN /** Changing timestamp for symlinks? */ #undef HAVE_LUTIMES /** For Solaris 10, thanks Peter. */ #ifndef NAME_MAX #define NAME_MAX (FILENAME_MAX) #endif #undef HAVE_STRSEP #ifndef HAVE_STRSEP char * strsep (char **stringp, const char *delim); #endif #undef HAVE_FMEMOPEN #ifdef HAVE_FMEMOPEN #define ENABLE_DEBUGBUFFER 1 #else #undef ENABLE_DEBUGBUFFER #endif /** Check for doors; needed for Solaris 10, thanks XXX */ #ifndef S_ISDOOR #define S_ISDOOR(x) (0) #endif #endif fsvs-1.2.6/src/ac_list.c0000644000202400020240000001226111264677022014031 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include "global.h" #include "actions.h" #include "status.h" #include "commit.h" #include "update.h" #include "export.h" #include "log.h" #include "cat.h" #include "ignore.h" #include "cp_mv.h" #include "sync.h" #include "checkout.h" #include "diff.h" #include "url.h" #include "add_unvers.h" #include "props.h" #include "info.h" #include "revert.h" #include "remote.h" #include "resolve.h" #include "build.h" /** \file * List of actions, their command line names, and corresponding flags. */ /** Array of command name pointers. * The \c acl at the beginning means ACtion List. */ static const char *acl_status[] = { "status", NULL }, *acl_commit[] = { "commit", "checkin", "ci", NULL }, *acl_update[] = { "update", NULL }, *acl_export[] = { "export", NULL }, *acl_build[] = { "_build-new-list", NULL }, *acl_delay[] = { "delay", NULL }, *acl_remote[] = { "remote-status", "rs", NULL }, *acl_ignore[] = { "ignore", NULL }, *acl_rign[] = { "rel-ignore", "ri", "r-i", NULL }, *acl_groups[] = { "groups", "groupings", "grps", NULL }, *acl_add[] = { "add", NULL }, *acl_copyfr[] = { "copyfrom-detect", "copy-detect", NULL }, *acl_cp[] = { "copy", "move", "cp", "mv", NULL }, *acl_uncp[] = { "uncopy", NULL }, *acl_unvers[] = { "unversion", NULL }, *acl_log[] = { "log", NULL }, *acl_cat[] = { "cat", NULL }, *acl_resolv[] = { "resolved", NULL }, *acl_checko[] = { "checkout", "co", NULL }, *acl_sync_r[] = { "sync-repos", NULL }, *acl_revert[] = { "revert", "undo", NULL }, *acl_prop_l[] = { "prop-list", "pl", NULL }, *acl_prop_g[] = { "prop-get", "pg", NULL }, *acl_prop_s[] = { "prop-set", "ps", NULL }, *acl_prop_d[] = { "prop-del", "pd", NULL }, *acl_diff[] = { "diff", NULL }, *acl_help[] = { "help", "?", NULL }, *acl_info[] = { "info", NULL }, /** \todo: remove initialize */ *acl_urls[] = { "urls", "initialize", NULL }; /* A generated file. */ #include "doc.g-c" /** This \#define is used to save us from writing the member names, in * order to get a nice tabular layout. * Simply writing the initializations in structure order is not good; * a simple re-arrange could make problems. */ #define ACT(nam, _work, _act, ...) \ { .name=acl_##nam, .help_text=hlp_##nam, \ .work=_work, .local_callback=_act, \ __VA_ARGS__ } /** Use the progress uninitializer */ #define UNINIT .local_uninit=st__progress_uninit /** Store update-pipe strings */ #define DECODER .needs_decoder=1 /** Commands obeys filtering via -f */ #define FILTER .only_opt_filter=1 /** Wants a current value in estat::st */ #define STS_WRITE .overwrite_sts_st=1 /** waa__update_dir() may look for new entries */ #define DIR_UPD .do_update_dir=1 /** Action doesn't write into WAA, may be used by unpriviledged user */ #define RO .is_readonly=1 /** -. */ struct actionlist_t action_list[]= { /* The first action is the default. */ ACT(status, st__work, st__action, FILTER, STS_WRITE, DIR_UPD, RO), ACT(commit, ci__work, ci__action, UNINIT, FILTER, DIR_UPD), ACT(update, up__work, st__progress, UNINIT, DECODER), ACT(export, exp__work, NULL, .is_import_export=1, DECODER), ACT(unvers, au__work, au__action, .i_val=RF_UNVERSION, STS_WRITE), ACT( add, au__work, au__action, .i_val=RF_ADD, STS_WRITE), ACT( diff, df__work, NULL, DECODER, STS_WRITE, RO), ACT(sync_r, sync__work, NULL, .repos_feedback=sync__progress, .keep_user_prop=1), ACT( urls, url__work, NULL), ACT(revert, rev__work, NULL, UNINIT, DECODER, .keep_children=1), ACT(groups, ign__work, NULL, .i_val=0, DIR_UPD), ACT(ignore, ign__work, NULL, .i_val=HAVE_GROUP, DIR_UPD), ACT( rign, ign__rign, NULL, .i_val=HAVE_GROUP, DIR_UPD), ACT(copyfr, cm__detect, st__progress, UNINIT, DIR_UPD, STS_WRITE), ACT( cp, cm__work, NULL), ACT( cat, cat__work, NULL), ACT( uncp, cm__uncopy, NULL), ACT(resolv, res__work, res__action, .is_compare=1), ACT( log, log__work, NULL, RO), ACT(checko, co__work, NULL, DECODER, .repos_feedback=st__rm_status), ACT( build, bld__work, st__status, DIR_UPD), ACT( delay,delay__work, st__status, RO), /* For help we set import_export, to avoid needing a WAA * (default /var/spool/fsvs) to exist. */ ACT( help, ac__Usage, NULL, .is_import_export=1, RO), ACT( info, info__work, info__action, RO), ACT(prop_g,prp__g_work, NULL, RO), ACT(prop_s,prp__s_work, NULL, .i_val=FS_NEW), ACT(prop_d,prp__s_work, NULL, .i_val=FS_REMOVED), ACT(prop_l,prp__l_work, NULL, RO), ACT(remote, up__work, NULL, .is_compare=1, .repos_feedback=st__rm_status), }; /** -. */ const int action_list_count = sizeof(action_list)/sizeof(action_list[0]); /** -. */ struct actionlist_t *action=action_list; fsvs-1.2.6/src/diff.h0000644000202400020240000000107410777063155013334 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __DIFF_H__ #define __DIFF_H__ /** \file * \ref diff action header file. */ #include "global.h" #include "actions.h" /** Diff command main function. */ work_t df__work; #endif fsvs-1.2.6/src/waa.h0000644000202400020240000003362411331113742013163 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __WAA_H__ #define __WAA_H__ #include #include #include "global.h" #include "actions.h" /** \file * WAA functions header file. */ /** Max macro. * Taken from gcc info manual. \c >? and \c _b ? _a : _b; }) /** Entry list for disk-order update. * This structure is used to store a linked list of struct \c estat * in (mostly) ascending inode order. It is used in \c waa__update_tree() * to \c lstat() all (needed) entries in (hopefully) an order which minimizes * the backtracking of the storage media. * What this means is: The harddisk head should go straight in one direction, * and avoid seeking as much as possible. */ struct waa__entry_blocks_t { /** Pointer to packed struct \c estat array. */ struct estat *first; /** Pointers for linked list. @{ */ struct waa__entry_blocks_t *next, *prev; /** @} */ /** Number of entries in array */ int count; }; /** First block for to-be-updated pointers. */ extern struct waa__entry_blocks_t waa__entry_block; /** \defgroup waa_files Files used by fsvs * \ingroup compat * * \c FSVS uses various files to store its configuration and informations * about the system it is running on. * * Two file trees are used:
    *
  • \c /var/spool/fsvs (if not overridden by \ref o_waa "$FSVS_WAA"). * The WAA stores volatile data that should not be backed up; the files * have only lower-case letters. *
  • \c /etc/fsvs (or \ref o_conf "$FSVS_CONF") * This is used to store configuration data, eg. for the working copies. * The names of files stored here have the first letter in upper-case. * Having this data backed-up (eg. along with the rest of the filesystem) * allows easy recovery of the configuration. * The single exception are the \ref dir files; these are, strictly seen, * per working copy, but are stored in the spool directory, as they are * reconstructed on restore and would only give conflicts with old * versions. \n * Please note that it's entirely fine to keep this directory versioned * via \c FSVS, to have the ignore patterns and URL list stored; and in * fact that would happen automatically if you have \c /etc as working * copy. *
* * Generally a path can be of (nearly) arbitrary length, and have every * character (except \c NUL [\c \\0]) in it. * * So wherever pathnames or similar things are stored (eg. patterns), they * are \c NUL -terminated; for addressing specific files the MD5 hash over * the filename is used. \n * * The written data sets normally have linefeeds (\c \\n) in them, to ease * inspection in an editor. * * Please see waa.c for more details. * @{ */ /** \anchor waa_wc \name Per working copy * These are stored in a subdirectory of the WAA, named by the MD5-sum of * the WC-path. * @{ */ /** \anchor dir List of files/subdirs/devices/symlinks in and below this * working copy directory. * * The filelists remember the last committed state of entries. That * includes the ctime, mtime, unix-mode (with flags for * directory/device/symlink/file), MD5 sum, size in bytes, inode, tree * relation, number of child nodes, user and group, and filename. The path * can be recreated from the tree-structure and the filenames. * * The header includes fields such as header version, header length, number * of entries, needed space for the filenames, and the length of the * longest path - most of that for memory allocation. * * See also \a waa__output_tree(). * */ #define WAA__DIR_EXT "dir" /** \anchor ign List of groupings ("Identification Groups for New entries", * formally "Ignore patterns"). * They consist of a header with the number of patterns, followed by the * group, a whitespace, and the pattern (as, string with options); \c NUL * -terminated, \c LF -separated. */ #define WAA__IGNORE_EXT "Ign" /** \anchor urls List of URLs. * They consist of a header with the number of URLs, followed by the URLs * themselves; \c NUL -terminated, \c LF -separated. */ #define WAA__URLLIST_EXT "Urls" /** \anchor urls Current revisions of the URLs. * Very similar to \c WAA__URLLIST_EXT (see \ref url__load_list()). * These are split into a separate file, so that no data in \c /etc is * changed after a commit. */ #define WAA__URL_REVS "revs" /** \anchor copy Hash of copyfrom relations. * The key is the destination-, the value is the source-path; they are * stored relative to the wc root, without the leading \c "./", ie. as \c * "dir/test". The \c \\0 is included in the data. */ #define WAA__COPYFROM_EXT "Copy" /** \anchor readme Information file. * Here a short explanation for this directory is stored. */ #define WAA__README "README.txt" /** @} */ /** \anchor waa_file \name Per file/directory * The cached informations (per-file) are located in the \c cache * subdirectory of the WAA; two subdirectory levels are created below that. * * @{ */ /** \anchor md5s List of MD5s of the manber blocks of a file. * * To speed up comparing and committing large files, these files hold a * list of MD5 hashes for the manber blocks. \n * This way big files don't have to be hashed in full to check whether * they've changed; and the manber blocks can be used for the delta algorithm. * * Maybe the parameters for manber hashing should be stored there, too - * currently they're hardcoded. * * Furthermore in the WAA directory of the working copy we store a * (temporary) file as an index for all entries' MD5 checksums. */ #define WAA__FILE_MD5s_EXT "md5s" /** \anchor prop List of other properties. * These are properties not converted to meta-data. */ #define WAA__PROP_EXT "prop" /** \anchor cflct List of other conflict files. * Defined as filename\\0\\nfilename\\0\\n... */ #define WAA__CONFLICT_EXT "cflct" /** @} */ /** \anchor ino * \name Temporary copy/move detection database * Entries are addressed by device and inode. * @{ */ /** For files */ #define WAA__FILE_INODE_EXT "fino" /** For directories */ #define WAA__DIR_INODE_EXT "dino" /** @} */ /** \anchor name * \name Temporary copy/move detection database * Entries are addressed by name. * @{ */ /** For files */ #define WAA__FILE_NAME_EXT "fname" /** For directories */ #define WAA__DIR_NAME_EXT "dname" /** @} */ /** \name Short names for the open modes. * @{ */ #define WAA__WRITE (O_WRONLY | O_CREAT | O_TRUNC) #define WAA__READ (O_RDONLY) #define WAA__APPEND (O_APPEND | O_CREAT) /** @} */ /** @} */ /* this should be optimized into a constant. * verified for gcc (debian 4.0.0-7ubuntu2) */ #define WAA__MAX_EXT_LENGTH max( \ max( \ max(strlen(WAA__CONFLICT_EXT), \ strlen(WAA__COPYFROM_EXT)), \ strlen(WAA__IGNORE_EXT) ), \ max( \ max(max(strlen(WAA__DIR_EXT), \ strlen(WAA__FILE_MD5s_EXT)), \ max(strlen(WAA__PROP_EXT), \ strlen(WAA__CONFLICT_EXT)) ), \ max( \ max(strlen(WAA__FILE_INODE_EXT), \ strlen(WAA__DIR_INODE_EXT)), \ max(strlen(WAA__FILE_NAME_EXT), \ strlen(WAA__DIR_NAME_EXT)) ) ) ) /** Store the current working directory. */ int waa__save_cwd(char **where, int *len, int additional); /** Initialize WAA operations. */ int waa__init(void); /** Create a directory; ignore \c EEXIST. */ int waa__mkdir_mask(char *dir, int including_last, int mask); /** Create a directory, ignore \c EEXIST, and use a default mask. */ int waa__mkdir(char *dir, int including_last); /* Given an \a path and an \a extension, this function returns a * \a filehandle that was opened for this entry in the WAA with \a flags. */ int waa__open(char *path, const char *extension, int mode, int *filehandle); /** This function closes a writable filehandle that was opened in the * WAA via \c waa__open(). */ int waa__close(int filehandle, int has_failed); /** Wrapper function. Opens a \c dir-file for the \a directory in the WAA. * \todo Should possibly be eliminated. */ int waa__open_dir(char *directory, int write, int *fh); /** Creates the entries tree below \c root . */ int waa__build_tree(struct estat *root); /** Write the \ref dir file for this \c root . */ int waa__output_tree(struct estat *root); /** Read the \ref dir file for the current working directory. */ int waa__input_tree(struct estat *root, struct waa__entry_blocks_t **blocks, action_t *callback); /** Wrapper function for \c waa__open(). */ int waa__open_byext(char *directory, char *extension, int write, int *fh); /** Wrapper for \c waa__load_repos_urls_silent(). */ int waa__load_repos_urls(char *dir, int reserve_space); /** Load the URLs associated with \a dir (or current working directory, if * \a dir is \c NULL). */ int waa__load_repos_urls_silent(char *dir, int reserve_space); /** Returns the given directory or, if \c NULL , \c getcwd() . */ int waa__given_or_current_wd(char *directory, char **erg); /** Creates a README in the WAA. */ int waa__make_info_file(char *directory, char *name, char *dest); /** This function takes a \a path and an \a extension and tries to remove * the associated file in the WAA. */ int waa__delete_byext(char *path, char *extension, int ignore_not_exist); /** Reads the entry tree or, if none stored, builds one. */ int waa__read_or_build_tree(struct estat *root, int argc, char *normalized[], char *orig[], action_t *callback, int return_ENOENT); #define FCB__PUT_DOTSLASH (1) #define FCB__NO_REALPATH (2) /** Given a list of path arguments the \a base path and relative paths * are returned. */ int waa__find_common_base2(int argc, char *args[], char ***normalized_paths, int flags); /** Wrapper for waa__find_common_base2. */ static inline int waa__find_common_base(int argc, char *args[], char **normalized[]) { return waa__find_common_base2(argc, args, normalized, 0); } /** Similar to \ref waa__find_common_base, but allows only specification of * a WC root. */ int waa__find_base(struct estat *root, int *argc, char ***args); /** Creates the WAA and CONF directories needed for \a wc_path. */ int waa__create_working_copy(const char const *wc_dir); /** Stores the path of the working copy. * Not needed if waa__find_common_base or similar is called. */ int waa__set_working_copy(const char const *wc_dir); /** \name Building paths for FSVS's datafiles. * @{ */ /** The path should be in the WAA. */ #define GWD_WAA (1) /** The path should be in the configuration area. */ #define GWD_CONF (2) /** The intermediate directories should be created. */ #define GWD_MKDIR (4) /** This function determines the directory used in the WAA area for the * given \a path. */ int waa__get_waa_directory(char *path, char **erg, char **eos, char **start_of_spec, int flags); /** Function that returns the right flag for the wanted file. * To be used in calls of \ref waa__get_waa_directory(). */ static inline int waa__get_gwd_flag(const char *const extension) { return !extension || (isupper(extension[0])) ? GWD_CONF : GWD_WAA; } /** @} */ /** This function traverses the tree and sets \c entry_status for the * marked entries. */ int waa__update_tree(struct estat *root, struct waa__entry_blocks_t *blocks); /** Insert an \a entry block with \a count entries into the * \a waa__entry_blocks list, to get it updated later * by \a waa__update_tree(). */ /** The list of entries to be updated is registered after the given block. * */ int waa__new_entry_block(struct estat *entry, int count, struct waa__entry_blocks_t *previous); /** Simple wrapper; inserts entries at the start of the list. */ static inline int waa__insert_entry_block(struct estat *entry, int count) { return waa__new_entry_block(entry, count, &waa__entry_block); } /** The given paths are looked for in the entries tree, are marked * for update, and their parents are flagged. */ int waa__partial_update(struct estat *root, int argc, char *normalized[], char *orig[], struct waa__entry_blocks_t *blocks); /** This function traverses the tree and calls the handler function * for the marked entries; directories before their children, and in order * sorted by name. */ int waa__do_sorted_tree(struct estat *root, action_t handler); /** A wrapper around dir__enumerator(), ignoring entries below \c * $FSVS_WAA. */ int waa__dir_enum(struct estat *this, int est_count, int by_name); /** Copies all sub-entries of \a src to \a dest. */ int waa__copy_entries(struct estat *src, struct estat *dest); /** How many bytes the \ref dir file header has. */ #define HEADER_LEN (64) /** Which version does the dir file have? */ #define WAA_VERSION (6) /** Copy URL revision number. * The problem on commit is that we send a number of entries to the * repository, and only afterwards we get to know which revision number * they got. * To avoid having to run through the whole tree again we use this special * marker, which gets set on the committed entries, to be corrected on * ops__save_1entry(). */ #define SET_REVNUM (-12) /** Returns a distict name and filehandle. */ int waa__get_tmp_name(const char *base_dir, char **output, apr_file_t **handle, apr_pool_t *pool); /** Our current WC base. */ extern char *wc_path; /** How much bytes the \ref wc_path has. */ extern int wc_path_len; /** Length of paths of temporary files. */ extern int waa_tmp_path_len; /** Buffers for temporary filename storage. * @{ */ extern char *waa_tmp_path, *waa_tmp_fn, *conf_tmp_path, *conf_tmp_fn; /** @} */ #endif fsvs-1.2.6/src/dox/0000755000202400020240000000000012554717233013041 5ustar marekmarekfsvs-1.2.6/src/dox/HOWTO-BACKUP.dox0000644000202400020240000001531111264677022015417 0ustar marekmarek/** \defgroup howto A small collection of HOW-TOs \ingroup userdoc Here you see a small collection of HOW-TOs. These aim to give you a small overview about common tasks. The paths and examples are based on a current Debian/Testing, but should be easily transferable to other Linux distributions or other UNIXes. */ /** \defgroup howto_backup HOWTO: Backup \ingroup howto This document is a step-by-step explanation how to do backups using FSVS. \section howto_backup_prep Preparation If you're going to back up your system, you have to decide what you want to have stored in your backup, and what should be left out. Depending on your system usage and environment you first have to decide:
  • Do you only want to backup your data in \c /home?
    • Less storage requirements
    • In case of hardware crash the OS must be set up again
  • Do you want to keep track of your configuration in \c /etc?
    • Very small storage overhead
    • Not much use for backup/restore, but shows what has been changed
  • Or do you want to backup your whole installation, from \c / on?
    • Whole system versioned, restore is only a few commands
    • Much more storage space needed - typically you'd need at least a few GB free space.
The next few moments should be spent thinking about the storage space for the repository - will it be on the system harddisk, a secondary or an external harddisk, or even off-site? \note If you just created a fresh repository, you probably should create the "default" directory structure for subversion - \c trunk, \c branches, \c tags; this layout might be useful for your backups.\n The URL you'd use in fsvs would go to \c trunk. Possibly you'll have to take the available bandwidth into your considerations; a single home directory may be backed up on a 56k modem, but a complete system installation would likely need at least some kind of DSL or LAN. \note If this is a production box with sparse, small changes, you could take the initial backup on a local harddisk, transfer the directory with some media to the target machine, and switch the URLs. A fair bit of time should go to a small investigation which file patterns and paths you \b not want to back-up.
  • Backup files like \c *.bak, \c *~, \c *.tmp, and similar
  • History files: .sh-history and similar in the home-directories
  • Cache directories: your favourite browser might store many MB of cached data in you home-directories
  • Virtual system directories, like \c /proc and \c /sys, \c /dev/shmfs.
\section howto_backup_first_steps Telling FSVS what to do Given \c $WC as the working directory - the base of the data you'd like backed up (\c /, \c /home), and \c $URL as a valid subversion URL to your (already created) repository path. Independent of all these details the first steps look like these: \code cd $WC fsvs urls $URL \endcode Now you have to say what should be ignored - that'll differ depending on your needs/wishes. \code fsvs ignore './§**~' './§**.tmp' './§**.bak' fsvs ignore ./proc/ ./sys/ ./tmp/ fsvs ignore ./var/tmp/ ./var/spool/lpd/ fsvs ignore './var/log/§*.gz' fsvs ignore ./var/run/ /dev/pts/ fsvs ignore './etc/*.dpkg-dist' './etc/*.dpkg-new' fsvs ignore './etc/*.dpkg-old' './etc/*.dpkg-bak' \endcode \note \c /var/run is for transient files; I've heard reports that \ref revert "reverting" files there can cause problems with running programs.\n Similar for \c /dev/pts - if that's a \c devpts filesystem, you'll run into problems on \ref update or \ref revert - as FSVS won't be allowed to create entries in this directory. Now you may find that you'd like to have some files encrypted in your backup - like \c /etc/shadow, or your \c .ssh/id_* files. So you tell fsvs to en/decrypt these files: \code fsvs propset fsvs:commit-pipe 'gpg -er {your backup key}' /etc/shadow /etc/gshadow fsvs propset fsvs:update-pipe 'gpg -d' /etc/shadow /etc/gshadow \endcode \note This are just examples. You'll probably have to exclude some other paths and patterns from your backup, and mark some others as to-be-filtered. \section howto_backup_first_commit The first backup \code fsvs commit -m "First commit." \endcode That's all there is to it! \section howto_backup_usage Further use and maintenance The further usage is more or less the \c commit command from the last section. \n When do you have to do some manual work?
  • When ignore patterns change.
    • New filesystems that should be ignored, or would be ignored but shouldn't
    • You find that your favorite word-processor leaves many *.segv files behind, and similar things
  • If you get an error message from fsvs, check the arguments and retry. In desperate cases (or just because it's quicker than debugging yourself) ask on dev [at] fsvs.tigris.org.
\section howto_backup_restore Restoration in a working system Depending on the circumstances you can take different ways to restore data from your repository.
  • "fsvs export" allows you to just dump some repository data into your filesystem - eg. into a temporary directory to sort things out.
  • Using "fsvs revert" you can get older revisions of a given file, directory or directory tree inplace. \n
  • Or you can do a fresh checkout - set an URL in an (empty) directory, and update to the needed revision.
  • If everything else fails (no backup media with fsvs on it), you can use subversion commands (eg. \c export) to restore needed parts, and update the rest with fsvs.
\section howto_backup_recovery Recovery for a non-booting system In case of a real emergency, when your harddisks crashed or your filesystem was eaten and you have to re-partition or re-format, you should get your system working again by
  • booting from a knoppix or some other Live-CD (with FSVS on it),
  • partition/format as needed,
  • mount your harddisk partitions below eg. \c /mnt,
  • and then recovering by
\code $ cd /mnt $ export FSVS_CONF=/etc/fsvs # if non-standard $ export FSVS_WAA=/var/spool/fsvs # if non-standard $ fsvs checkout -o softroot=/mnt \endcode If somebody asks really nice I'd possibly even create a \c recovery command that deduces the \c softroot parameter from the current working directory. For more information please take a look at \ref o_softroot. \section howto_backup_feedback Feedback If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list users [at] fsvs.tigris.org. Thank you! */ // vi: filetype=doxygen spell spelllang=en_us fsvs-1.2.6/src/dox/HOWTO-MASTER_LOCAL.dox0000644000202400020240000002507711213413040016347 0ustar marekmarek/** \defgroup howto_master_local HOWTO: Master/Local repositories \ingroup howto This HOWTO describes how to use a single working copy with multiple repositories. Please read the \ref howto_backup first, to know about basic steps using FSVS. \section howto_masloc_ratio Rationale If you manage a lot of machines with similar or identical software, you might notice that it's a bit of work keeping them all up-to-date. Sure, automating distribution via rsync or similar is easy; but then you get identical machines, or you have to play with lots of exclude patterns to keep the needed differences. Here another way is presented; and even if you don't want to use FSVS for distributing your files, the ideas presented here might help you keep your machines under control. \section howto_masloc_prep Preparation, repository layout In this document the basic assumption is that there is a group of (more or less identical) machines, that share most of their filesystems. Some planning should be done beforehand; while the ideas presented here might suffice for simple versioning, your setup can require a bit of thinking ahead. This example uses some distinct repositories, to achieve a bit more clarity; of course these can simply be different paths in a single repository (see \ref howto_masloc_single_repos for an example configuration). Repository in URL \c base: \code trunk/ bin/ ls true lib/ libc6.so modules/ sbin/ mkfs usr/ local/ bin/ sbin/ tags/ branches/ \endcode Repository in URL \c machine1 (similar for machine2): \code trunk/ etc/ HOSTNAME adjtime network/ interfaces passwd resolv.conf shadow var/ log/ auth.log messages tags/ branches/ \endcode \subsection howto_masloc_prep_user User data versioning If you want to keep the user data versioned, too, a idea might be to start a new working copy in \b every home directory; this way - the system- and (several) user-commits can be run in parallel, - the intermediate \c home directory in the repository is not needed, and - you get a bit more isolation (against FSVS failures, out-of-space errors and similar). - Furthermore FSVS can work with smaller file sets, which helps performance a bit (less dentries to cache at once, less memory used, etc.). \code A/ Andrew/ .bashrc .ssh/ .kde/ Alexander/ .bashrc .ssh/ .kde/ B/ Bertram/ \endcode A cronjob could simply loop over the directories in \c /home, and call fsvs for each one; giving a target URL name is not necessary if every home-directory is its own working copy. \note URL names can include a forward slash \c / in their name, so you might give the URLs names like \c home/Andrew - although that should not be needed, if every home directory is a distinct working copy. \section howto_masloc_using Using master/local repositories Imagine having 10 similar machines with the same base-installation. Then you install one machine, commit that into the repository as \c base/trunk, and make a copy as \c base/released. The other machines get \c base/released as checkout source, and another (overlaid) from eg. \c machine1/trunk. \n Per-machine changes are always committed into the \c machineX/trunk of the per-machine repository; this would be the host name, IP address, and similar things. On the development machine all changes are stored into \c base/trunk; if you're satisfied with your changes, you merge them (see \ref howto_masloc_branches) into \c base/released, whereupon all other machines can update to this latest version. So by looking at \c machine1/trunk you can see the history of the machine-specific changes; and in \c base/released you can check out every old version to verify problems and bugs. \note You can take this system a bit further: optional software packages could be stored in other subtrees. They should be of lower priority than the base tree, so that in case of conflicts the base should always be preferred (but see \ref howto_masloc_note_1). Here is a small example; \c machine1 is the development machine, \c machine2 is a \e client. \code machine1$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine1/trunk machine1$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk # Determine differences, and commit them machine1$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log machine1$ fsvs ci -o commit_to=base / \endcode Now you've got a base-install in your repository, and can use that on the other machine: \code machine2$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine2/trunk machine2$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk machine2$ fsvs sync-repos # Now you see differences of this machines' installation against the other: machine2$ fsvs st # You can see what is different: machine2$ fsvs diff /etc/X11/xorg.conf # You can take the base installations files: machine2$ fsvs revert /bin/ls # And put the files specific to this machine into its repository: machine2$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log \endcode Now, if this machine has a harddisk failure or needs setup for any other reason, you boot it (eg. via PXE, Knoppix or whatever), and do (\ref howto_masloc_note_3) \code # Re-partition and create filesystems (if necessary) machine2-knoppix$ fdisk ... machine2-knoppix$ mkfs ... # Mount everything below /mnt machine2-knoppix$ mount /mnt/[...] machine2-knoppix$ cd /mnt # Do a checkout below /mnt machine2-knoppix$ fsvs co -o softroot=/mnt \endcode \section howto_masloc_branches Branching, tagging, merging Other names for your branches (instead of \c trunk, \c tags and \c branches) could be \c unstable, \c testing, and \c stable; your production machines would use \c stable, your testing environment \c testing, and in \c unstable you'd commit all your daily changes. \note Please note that there's no merging mechanism in FSVS; and as far as I'm concerned, there won't be. Subversion just gets automated merging mechanisms, and these should be fine for this usage too. (\ref howto_masloc_note_4) \subsection howto_masloc_branch_tags Thoughts about tagging Tagging works just like normally; although you need to remember to tag more than a single branch. Maybe FSVS should get some knowledge about the subversion repository layout, so a fsvs tag would tag all repositories at once? It would have to check for duplicate tag-names (eg. on the \c base -branch), and just keep it if it had the same copyfrom-source. But how would tags be used? Define them as source URL, and checkout? Would be a possible case. Or should fsvs tag do a \e merge into the repository, so that a single URL contains all files currently checked out, with copyfrom-pointers to the original locations? Would require using a single repository, as such pointers cannot be across different repositories. If the committed data includes the \c $FSVS_CONF/.../Urls file, the original layout would be known, too - although to use it a \ref sync-repos would be necessary. \section howto_masloc_single_repos Using a single repository A single repository would have to be partitioned in the various branches that are needed for bookkeeping; see these examples. Depending on the number of machines it might make sense to put them in a 1- or 2 level deep hierarchy; named by the first character, like \code machines/ A/ Axel/ Andreas/ B/ Berta/ G/ Gandalf/ \endcode \subsection howto_masloc_single_simple Simple layout Here only the base system gets branched and tagged; the machines simply backup their specific/localized data into the repository. \code # For the base-system: trunk/ bin/ usr/ sbin/ tags/ tag-1/ branches/ branch-1/ # For the machines: machines/ machine1/ etc/ passwd HOSTNAME machine2/ etc/ passwd HOSTNAME \endcode \subsection howto_masloc_single_per_area Per-area Here every part gets its \c trunk, \c branches and \c tags: \code base/ trunk/ bin/ sbin/ usr/ tags/ tag-1/ branches/ branch-1/ machine1/ trunk/ etc/ passwd HOSTNAME tags/ tag-1/ branches/ machine2/ trunk/ etc/ passwd HOSTNAME tags/ branches/ \endcode \subsection howto_masloc_single_common_ttb Common trunk, tags, and branches Here the base-paths \c trunk, \c tags and \c branches are shared: \code trunk/ base/ bin/ sbin/ usr/ machine2/ etc/ passwd HOSTNAME machine1/ etc/ passwd HOSTNAME tags/ tag-1/ branches/ branch-1/ \endcode \section howto_masloc_notes Other notes \subsection howto_masloc_note_1 1 Conflicts should not be automatically merged. If two or more trees bring the same file, the file from the \e highest tree wins - this way you always know the file data on your machines. It's better if a single software doesn't work, compared to a machine that no longer boots or is no longer accessible (eg. by SSH)). So keep your base installation at highest priority, and you've got good chances that you won't loose control in case of conflicting files. \subsection howto_masloc_note_2 2 If you don't know which files are different in your installs, - install two machines, - commit the first into fsvs, - do a \ref sync-repos on the second, - and look at the \ref status output. \subsection howto_masloc_note_3 3 As debian includes FSVS in the near future, it could be included on the next KNOPPIX, too! Until then you'd need a custom boot CD, or copy the absolute minimum of files to the harddisk before recovery. There's a utility \c svntar available; it allows you to take a snapshot of a subversion repository directly into a \c .tar -file, which you can easily export to destination machine. (Yes, it knows about the meta-data properties FSVS uses, and stores them into the archive.) \subsection howto_masloc_note_4 4 Why no file merging? Because all real differences are in the per-machine files -- the files that are in the \c base repository are changed only on a single machine, and so there's an unidirectional flow. BTW, how would you merge your binaries, eg. \c /bin/ls? \section howto_masloc_feedback Feedback If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list users [at] fsvs.tigris.org. Thank you! */ // vi: filetype=doxygen spell spelllang=en_us fsvs-1.2.6/src/dox/dev.dox0000644000202400020240000002312510775334472014341 0ustar marekmarek/** \addtogroup dev \section dev_welcome Dear developers/debuggers, thank you for your interest in fsvs. I highly appreciate any help, tips and tricks, and even if it's just a bug report I want to know that. I'm also looking forward to documentation updates, and notifying me about mistakes will be politely answered, too. */ /** \defgroup dev_debug What to do in case of errors \ingroup dev First, please read the documentation to rule out the possibility that it's just a badly written sentence that caused misunderstanding. If you can't figure it out yourself, don't hesitate and write a bug report. Please include the version you're running (output of fsvs -V), the command line you're calling fsvs with, and the output it gives. Furthermore it might help diagnosing if you tried with the \ref glob_opt_verb "-v" parameter, and/or with \ref glob_opt_deb "-d"; but please mind that there might be data in the dump that you don't want to make public -- if you're not sure, ask on the mailing lists. Send these things along with a description of what you wanted to do to \b \c dev@fsvs.tigris.org or, if you like that alternative better, just file an issue. \n (The bugs I find and the things on my \c TODO are not in the issue tracker, as I can't access it while on the train - and that's where I spend the most time working on fsvs). Please be aware that I possibly need more details or some other tries to find out what goes wrong. \section dev_devs People that like to help If you know C and want to help with fsvs, Just Do It (R) :-) Look into the \c TODO file, pick your favorite point, and implement it. If you don't know C, but another programming language (like perl, python, or shell-programming), you can help, too -- help write test scripts. \n I mostly checked the positive behavior (ie. that something should happen given a set of predefined state and parameters), but testing for wrong and unexpected input makes sense, too. If you don't know any programming language, you can still look at the documentation and point me to parts which need clarifying, write documents yourself, or just fix mistakes. All contributions should \b please be sent as a unified diff, along with a description of the change, and there's a good chance to have it integrated into the fsvs code-base. \note How to generate such a diff? \n If you're using svn or svk to track fsvs usage, the "svn diff" or "svk diff" commands should do what you want. If you downloaded a \c .tar.gz or \c .tar.bz2, keep a pristine version in some directory and make your changes in another copy. \n When you're finished making changes, run the command \code diff -ur \e original \e new > \e my-changes.patch \endcode and send me that file. */ /** \defgroup dev_design The internal design \ingroup dev \section dev_design_terms Terms used in this document \subsection dev_des_t_entry Entry In subversion speak an entry is either a directory, a symlink or a file; In FSVS it can additionally be a block or character device. \n Sockets and pipes are currently ignored, as they're typically re-created by the various applications. \subsection dev_des_t_waa WAA, CONF Please see \ref waa_file. \section dev_des_memory_layout In-memory layout In memory fsvs builds a complete tree of the needed entries (\c struct \c estat). They are referenced with the \c parent pointers upwards to the root, and the \c estat::by_inode and \c estat::by_name downwards. \subsection dev_des_mem_alloc Storage and allocation Every directory entry can have a string space allocated, ie. space needed for the filenames in this directory (and possibly sub-directories, too.) On loading of the list in \c waa__input_tree() two memory ranges are allocated - one for the struct estats read, and one for the filenames. Because of this \c free()ing of part of the entries is not possible; a freelist for the struct estats is used, but the string space is more or less permanent. \section dev_des_algo Algorithms and assumption in the code Generally I tried to use fast and simple algorithms better than \c O(n); but it's possible that I forgot something. \subsection dev_des_algo_dsearch Searching for an entry Searching for an entry in a directory (in memory) is \c O(log2(n)), as I use \c bsearch(). \subsection dev_des_algo_output Writing the entry list Determining the correct order for writing the entries (in \c waa__output_tree()) is optimized by having all lists pre-sorted; about half the time (tested) a single compare is enough to determine the next written entry. \note Of course, to keep the lists sorted, a lot of comparisons have to be made before waa__output_tree(). \subsection dev_des_algo_by estat::by_inode and estat::by_name The \c by_inode and \c by_name members are pointers to arrays of pointers to entries (:-); they must reference the same entries, only the order may differ. \c by_inode must (nearly) always be valid ; \c by_name is optional. The flag \c estat::to_be_sorted tells \c waa__output_tree() that the order of the \c by_inode array might be wrong, and has to be re-sorted before use. While scanning for changes we use a global \c by_inode ordering, as this is \b much faster than naive traversal; the \c by_name array is used for comparing directories, to determine whether there are any new entries. Both arrays \b must include a \c NULL -pointer at the end of the array. \subsection dev_des_algo_manber Manber-Hash and MD5 To quickly find whether a given file has changed, and to send only the changed bytes over the wire, we take a running hash (a Manber-Hash), and whenever we find a "magic" value we take that as buffer end. We calculate the MD5 of each buffer, and store them along with their start offset in the file. So on commit we can find identical blocks and send only those, and while comparing we can return "changed" as soon as we find a difference. \section dev_des_errors Error checking and stopping Return codes are checked everywhere. The return value of functions in this program is normally (int); 0 means success, something else an error. Either this error is expected (like ENOENT for some operations) and handled, or it must be returned to the caller. Most of this is already defined in macros. Typical function layout is like this (taken from waa.c): \code int waa__make_info_link(char *directory, char *name, char *dest) { int status; char *path, *eos; STOPIF( waa___get_waa_directory(directory, &path, &eos), NULL); strcpy(eos, name); ... if (access(path, F_OK) != 0) STOPIF_CODE_ERR( symlink(dest, path) == -1, errno, "cannot create informational symlink '%s' -> '%s'", path, dest); ex: return status; } \endcode When a function gets called by subversion libraries, we have to use their return type. Here an example from \c commit.c: \code svn_error_t *ci___set_props(void *baton, struct estat *sts, change_any_prop_t function, apr_pool_t *pool) { const char *ccp; svn_string_t *str; int status; svn_error_t *status_svn; status=0; ... if (sts->entry_type != FT_SYMLINK) { ... str=svn_string_createf (pool, "%u %s", sts->st.uid, hlp__get_uname(sts->st.uid, "") ); STOPIF_SVNERR( function, (baton, propname_owner, str, pool) ); ... } ex: RETURN_SVNERR(status); } \endcode The various \c STOPIF() -macros automatically print an error message and, depending on the debug- and verbosity-flags given on the command line, a back trace too. Another special case is output to \c STDOUT; if we get an error \c EPIPE here, we pass it up to main() as \c -EPIPE (to avoid confusion with writing some other data), where it gets ignored. To avoid printing an error message this is hardcoded in the \c STOPIF() macros. Assertions should be checked by \c BUG_ON(condition, format_string, ...). This will cause a segmentation violation, which (for debug builds) will automatically attach a debugger (\c gdb, only if present on the system). \section dev_des_comments Comments and documentation FSVS is normalized to use doxygen format for the documentation: "/§** ... *§/". For non-trivial things it's practical to document the thoughts, too; such internal documentation uses the normal C-style comments ("/§* ... *§/"). \subsection dev_des_slash_star /§* in documentation In cases where a slash \c / and a star \c * have to be used in the documentation, there's a hack by putting a paragraph symbol (\c \\c2\\xa7 in UTF-8) between them, so that it doesn't break the comment block. There's a perl hack for documentation generation, where these get removed. \note For C this would not be strictly necessary; There's always the way of putting a # if 0 block around that comment block. Doxygen doesn't allow this; even if using a shell script (with comments indicated by \c #) doxygen doesn't allow "/§* or *§/. \section dev_tests About the tests \subsection dev_tests_delay Delays after commit There have been a lot of "sleep 1" commands in the tests, to get directories' mtime to change for new entries. Now they are mostly changed to a simple "-o delay=yes" on the commit just above, which should give us about half a second on average. \note If FSVS has to be run for the check, it must wait until the other instance has finished - else the dir-list file and so on won't be written; so parallel checking via \c & and \c wait doesn't really work. Simply putting delay=yes in the FSVS configuration file more than doubled the run time of the tests - this was unacceptable to me. */ // vi: filetype=doxygen spell spelllang=en_us fsvs-1.2.6/src/dox/TIPS_TRICKS.dox0000644000202400020240000000275611147210573015415 0ustar marekmarek/** \defgroup tips Tips and tricks \ingroup userdoc This is a list of tips and tricks that you might find useful. \section tip_verbose Seeing the verbose status, but only changed entries Sometimes the status \ref status_meta_changed "meta-data changed" is not enough - the differentiation between \c mtime and the permission attributes is needed. For that the command line option \ref glob_opt_verb "-v" is used; but this \e verbose mode also prints all entries, not only the changed. To solve that the \ref glob_opt_filter "filter option" gets set; with the value \c none (to reset the mask), and then with the wanted mask - to restore the default the string \c "text,meta" could be set. Example: \code $ fsvs status -v -f none,text,meta $ fsvs status -v -f none,text,meta /etc $ fsvs status -v -f none,text,meta some/dir another_dir and_a_file \endcode \section tip_perf Performance points Some effort has been taken to get FSVS as fast as possible. With 1.1.17 the default for checking for changes on files was altered, to do a MD5-check of files with a changed modification time but the same size (to avoid printing a \c "?" \ref status_possibly "as status"); if that affects your use-case badly you can use the \ref o_chcheck "option" to get the old (fast) behavior. Please note that not the whole file has to be read - the first changed manber block (with averaged 128kB) terminates the check. */ // vi: filetype=doxygen spell spelllang=en_us fsvs-1.2.6/src/dox/statii.dox0000644000202400020240000000632311045111660015040 0ustar marekmarek/** \defgroup howto_entry_statii HOWTO: Understand the entries' statii \ingroup howto Transitions between the various statii. Here is a small overview about the various entry-statii and their change conditions. If you find any mismatches between this graphic and FSVS behaviour, don't hesitate to ask on the dev@ mailing list. \dot digraph { // use tooltip? // Note: the labelangle is manually optimized for the current // ordering - which isn't stable, so it might go wrong. edge [fontname=Arial, fontsize=7, labeldistance=0]; node [shape=box, fontname=Arial, fontsize=9]; subgraph cluster_2 { color=white; // --------------- Statii { rank=same; node [style=bold]; New; Not_existing [label="Not existing"]; } Ignored; Deleted; { rank=same; Added; CopiedU [label="Copied,\nunchanged"]; } { rank=same; Changed; Committed [color=green, style=bold]; } Unversioned [label="To-be-\nunversioned"]; { rank=same; Conflicted; CopiedC [label="Copied,\nchanged"]; } // --------------- Commands edge [color=brown]; New -> Added [label="add", URL="\ref add" ]; Ignored -> Added [label="add", URL="\ref add"]; Committed -> Unversioned [label="unversion", URL="\ref unversion"]; { edge [ label="update", URL="\ref update"]; Committed -> Committed; Changed -> Conflicted; } Conflicted -> Changed [label="resolve", URL="\ref resolve"]; { edge [ color=green, URL="\ref commit", tooltip="commit"]; Added -> Committed; New -> Committed; CopiedU -> Committed; Changed -> Committed; CopiedC -> Committed; Unversioned -> New [ label="commit;\nremoved from\nrepository;\nlocally kept,\nbut forgotten."]; Deleted:w -> Not_existing [ label="commit;\nremoved from\nrepository\nand local data."]; } New -> CopiedU [label="copy", URL="\ref cp"]; CopiedU -> New [label="uncopy", URL="\ref uncp"]; { edge [ color=blue, URL="\ref revert", tooltip="revert"]; CopiedC -> CopiedU; Changed -> Committed; Deleted -> Committed; Added -> New; Unversioned -> Committed; Conflicted -> Committed; } // Configuration edge [color=black]; New -> Ignored [label="ignore\npattern\nmatches", URL="\ref ignore"]; // External edge [color=orange, style=dashed]; CopiedU -> CopiedC [label="edit"]; Committed -> Changed [label="edit"]; Committed -> Deleted [label="rm"]; Not_existing -> New [ label="Create"]; } subgraph cluster_1 { margin=0; nodesep=0.2; ranksep=0.2; color=white; node [shape=plaintext, width=0, height=0, label=""]; { rank=same; revert1 -> revert2 [color=blue, label="revert", URL="\ref revert"]; } { rank=same; commit1 -> commit2 [label="commit", color=green, URL="\ref commit"]; } { rank=same; other1 -> other2 [color=brown, label="other\ncommands"]; } { rank=same; extern1 -> extern2 [color=orange, label="external\nchanges", style=dashed]; } edge [ style=invis ]; revert1 -> commit1 -> other1 -> extern1; } } \enddot */ // vi: filetype=doxygen spell spelllang=en_gb fsvs-1.2.6/src/dox/options.dox0000644000202400020240000006720411556526402015256 0ustar marekmarek/** \defgroup options Further options for FSVS. \ingroup userdoc List of settings that modify FSVS' behaviour. FSVS understands some options that modify its behaviour in various small ways. \section oh_overview Overview \subsection o__hlist This document This document lists all available options in FSVS, in an \ref o__list "full listing" and in \ref o__groups "groups". Furthermore you can see their \ref o__prio "relative priorities" and some \ref o__examples "examples". \subsection o__groups Semantic groups
  • \ref oh_display
  • \ref oh_diff
  • \ref oh_commit
  • \ref oh_performance
  • \ref oh_base
  • \ref oh_debug
\subsection o__list Sorted list of options FSVS currently knows:
  • \c all_removed - \ref o_all_removed
  • \c author - \ref o_author
  • \c change_check - \ref o_chcheck
  • \c colordiff - \ref o_colordiff
  • \c commit_to - \ref o_commit_to
  • \c conflict - \ref o_conflict
  • \c conf - \ref o_conf.
  • \c config_dir - \ref o_configdir.
  • \c copyfrom_exp - \ref o_copyfrom_exp
  • \c debug_output - \ref o_debug_output
  • \c debug_buffer - \ref o_debug_buffer
  • \c delay - \ref o_delay
  • \c diff_prg, \c diff_opt, \c diff_extra - \ref o_diff
  • \c dir_exclude_mtime - \ref o_dir_exclude_mtime
  • \c dir_sort - \ref o_dir_sort
  • \c empty_commit - \ref o_empty_commit
  • \c empty_message - \ref o_empty_msg
  • \c filter - \ref o_filter, but see \ref glob_opt_filter "-f".
  • \c group_stats - \ref o_group_stats.
  • \c limit - \ref o_logmax
  • \c log_output - \ref o_logoutput
  • \c merge_prg, \c merge_opt - \ref o_merge
  • \c mkdir_base - \ref o_mkdir_base
  • \c password - \ref o_passwd
  • \c path - \ref o_opt_path
  • \c softroot - \ref o_softroot
  • \c stat_color - \ref o_status_color
  • \c stop_change - \ref o_stop_change
  • \c verbose - \ref o_verbose
  • \c warning - \ref o_warnings, but see \ref glob_opt_warnings "-W".
  • \c waa - \ref o_waa "waa".
\subsection o__prio Priorities for option setting The priorities are
  • Command line \e (highest)
  • Environment variables. These are named as FSVS_{upper-case option name}.
  • $HOME/.fsvs/wc-dir/config
  • $FSVS_CONF/wc-dir/config
  • $HOME/.fsvs/config
  • $FSVS_CONF/config
  • Default value, compiled in \e (lowest)
\note The \c $HOME-dependent configuration files are not implemented currently. Volunteers? Furthermore there are "intelligent" run-time dependent settings, like turning off colour output when the output is redirected. Their priority is just below the command line - so they can always be overridden if necessary. \subsection o__examples Examples Using the commandline: \code fsvs -o path=environment fsvs -opath=environment \endcode Using environment variables: \code FSVS_PATH=absolute fsvs st \endcode A configuration file, from $FSVS_CONF/config or in a WC-specific path below $FSVS_CONF: \code # FSVS configuration file path=wcroot \endcode \section oh_display Output settings and entry filtering \subsection o_all_removed Trimming the list of deleted entries If you remove a directory, all entries below are implicitly known to be deleted, too. To make the \ref status output shorter there's the \c all_removed option which, if set to \c no, will cause children of removed entries to be omitted. Example for the config file: \code all_removed=no \endcode \subsection o_dir_exclude_mtime Ignore mtime-metadata changes for directories When this option is enabled, directories where only the mtime changed are not reported on \ref status anymore. This is useful in situations where temporary files are created in directories, eg. by text editors. (Example: \c VIM swapfiles when no \c directory option is configured). Example for the config file: \code dir_exclude_mtime=yes \endcode \subsection o_dir_sort Directory sorting If you'd like to have the output of \ref status sorted, you can use the option \c dir_sort=yes. FSVS will do a run through the tree, to read the status of the entries, and then go through it again, but sorted by name. \note If FSVS aborts with an error during \ref status output, you might want to turn this option off again, to see where FSVS stops; the easiest way is on the command line with \c -odir_sort=no. \subsection o_filter Filtering entries Please see the command line parameter for \ref glob_opt_filter "-f", which is identical. \code fsvs -o filter=mtime \endcode \subsection o_logmax "fsvs log" revision limit There are some defaults for the number of revisions that are shown on a "fsvs log" command:
  • 2 revisions given (-rX:Y): \c abs(X-Y)+1, ie. all revisions in that range.
  • 1 revision given: exactly that one.
  • no revisions given: from \c HEAD to 1, with a maximum of 100.
As this option can only be used to set an upper limit of revisions, it makes most sense for the no-revision-arguments case. \subsection o_logoutput "fsvs log" output format You can modify aspects of the \ref log "fsvs log" output format by setting the \c log_output option to a combination of these flags:
  • \c color: This uses color in the output, similar to \c cg-log (\c cogito-log); the header and separator lines are highlighted. \note This uses ANSI escape sequences, and tries to restore the default color; if you know how to do that better (and more compatible), please tell the developer mailing list.
  • \c indent: Additionally you can shift the log message itself a space to the right, to make the borders clearer.
Furthermore the value \c normal is available; this turns off all special handling. \note If you start such an option, the value is reset; so if you specify \c log_output=color,indent in the global config file, and use \c log_output=color on the commandline, only colors are used. This is different to the \ref o_filter option, which is cumulating. \subsection o_opt_path Displaying paths You can specify how paths printed by FSVS should look like; this is used for the entry status output of the various actions, and for the diff header lines. There are several possible settings, of which one can be chosen via the \c path option.
  • \anchor pd_wcroot \c wcroot \n This is the old, traditional FSVS setting, where all paths are printed relative to the working copy root.
  • \anchor pd_parm \c parameter \n With this setting FSVS works like most other programs - it uses the first best-matching parameter given by the user, and appends the rest of the path.\n This is the new default. \note Internally FSVS still first parses all arguments, and then does a single run through the entries. So if some entry matches more than one parameter, it is printed using the first match.
  • \anchor pd_absolute \c absolute \n All paths are printed in absolute form. This is useful if you want to paste them into other consoles without worrying whether the current directory matches, or for using them in pipelines.
The next two are nearly identical to \c absolute, but the beginning of paths are substituted by environment variables. This makes sense if you want the advantage of full paths, but have some of them abbreviated.
  • \anchor pd_env \c environment \n Match variables to directories after reading the known entries, and use this cached information. This is faster, but might miss the best case if new entries are found (which would not be checked against possible longer hits). \n Furthermore, as this works via associating environment variables to entries, the environment variables must at least match the working copy base - shorter paths won't be substituted.
  • \c full-environment \n Check for matches just before printing the path. \n This is slower, but finds the best fit. \note The string of the environment variables must match a directory name; the filename is always printed literally, and partial string matches are not allowed. Feedback wanted. \note Only environment variables whose names start with \c WC are used for substitution, to avoid using variables like \c $PWD, \c $OLDPWD, \c $HOME and similar which might differ between sessions. Maybe the allowed prefixes for the environment variables should be settable in the configuration. Opinions to the users mailing list, please.
Example, with \c / as working copy base: \code $ cd /etc $ fsvs -o path=wcroot st .mC. 1001 ./etc/X11/xorg.conf $ fsvs -o path=absolute st .mC. 1001 /etc/X11/xorg.conf $ fsvs -o path=parameters st .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st . .mC. 1001 ./X11/xorg.conf $ fsvs -o path=parameters st / .mC. 1001 /etc/X11/xorg.conf $ fsvs -o path=parameters st X11 .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st ../dev/.. .mC. 1001 ../dev/../etc/X11/xorg.conf $ fsvs -o path=parameters st X11 ../etc .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st ../etc X11 .mC. 1001 ../etc/X11/xorg.conf $ fsvs -o path=environ st .mC. 1001 ./etc/X11/xorg.conf $ WCBAR=/etc fsvs -o path=wcroot st .mC. 1001 $WCBAR/X11/xorg.conf $ WCBAR=/etc fsvs -o path=wcroot st / .mC. 1001 $WCBAR/X11/xorg.conf $ WCBAR=/e fsvs -o path=wcroot st .mC. 1001 /etc/X11/xorg.conf $ WCBAR=/etc WCFOO=/etc/X11 fsvs -o path=wcroot st .mC. 1001 $WCFOO/xorg.conf $ touch /etc/X11/xinit/xinitrc $ fsvs -o path=parameters st .mC. 1001 X11/xorg.conf .m.? 1001 X11/xinit/xinitrc $ fsvs -o path=parameters st X11 /etc/X11/xinit .mC. 1001 X11/xorg.conf .m.? 1001 /etc/X11/xinit/xinitrc \endcode \note At least for the command line options the strings can be abbreviated, as long as they're still identifiable. Please use the full strings in the configuration file, to avoid having problems in future versions when more options are available. \subsection o_status_color Status output coloring FSVS can colorize the output of the status lines; removed entries will be printed in red, new ones in green, and otherwise changed in blue. Unchanged (for \c -v) will be given in the default color. For this you can set \c stat_color=yes; this is turned \c off per default. As with the other colorizing options this gets turned \c off automatically if the output is not on a tty; on the command line you can override this, though. \subsection o_stop_change Checking for changes in a script If you want to use FSVS in scripts, you might simply want to know whether anything was changed. In this case use the \c stop_on_change option, possibly combined with \ref o_filter; this gives you no output on \c STDOUT, but an error code on the first change seen: \code fsvs -o stop_change=yes st /etc if fsvs status -o stop_change=yes -o filter=text /etc/init.d then echo No change found ... else echo Changes seen. fi \endcode \subsection o_verbose Verbosity flags If you want a bit more control about the data you're getting you can use some specific flags for the \c verbose options.
  • \c none,veryquiet - reset the bitmask, don't display anything.
  • \c quiet - only a few output lines.
  • \c changes - the characters showing what has changed for an entry.
  • \c size - the size for files, or the textual description (like \c "dir").
  • \c path - the path of the file, formatted according to \ref o_opt_path "the path option".
  • \c default - The default value, ie. \c changes, \c size and \c name.
  • \c meta - One more than the default so it can be used via a single \c "-v", it marks that the mtime and owner/group changes get reported as two characters. . If \c "-v" is used to achieve that, even entries without changes are reported, unless overridden by \ref o_filter.
  • \c url - Displays the entries' top priority URL
  • \c copyfrom - Displays the URL this entry has been copied from (see \ref copy).
  • \c group - The group this entry belongs to, see \ref group
  • \c urls - Displays all known URLs of this entry
  • \c stacktrace - Print the full stacktrace when reporting errors; useful for debugging.
  • \c all - Sets all flags. Mostly useful for debugging.
Please note that if you want to display \b fewer items than per default, you'll have to clear the bitmask first, like this: \code fsvs status -o verbose=none,changes,path \endcode \section oh_diff Diffing and merging on update \subsection o_diff Options relating to the "diff" action The diff is not done internally in FSVS, but some other program is called, to get the highest flexibility. There are several option values:
  • diff_prg: The executable name, default "diff".
  • diff_opt: The default options, default "-pu".
  • diff_extra: Extra options, no default.
The call is done as \code $diff_prg $diff_opt $file1 --label "$label1" $file2 --label "$label2" $diff_extra \endcode \note In \c diff_opt you should use only use command line flags without parameters; in \c diff_extra you can encode a single flag with parameter (like "-U5"). If you need more flexibility, write a shell script and pass its name as \c diff_prg. Advanced users might be interested in \ref exp_env "exported environment variables", too; with their help you can eg. start different \c diff programs depending on the filename. \subsection o_colordiff Using colordiff If you have \c colordiff installed on your system, you might be interested in the \c colordiff option. It can take one of these values:
  • \c no, \c off or \c false: Don't use \c colordiff.
  • empty (default value): Try to use \c colordiff as executable, but don't throw an error if it can't be started; just pipe the data as-is to \c STDOUT. (\e Auto mode.)
  • anything else: Pipe the output of the \c diff program (see \ref o_diff) to the given executable.
Please note that if \c STDOUT is not a tty (eg. is redirected into a file), this option must be given on the command line to take effect. \subsection o_conflict How to resolve conflicts on update If you start an update, but one of the entries that was changed in the repository is changed locally too, you get a conflict. There are some ways to resolve a conflict:
  • \c local - Just take the local entry, ignore the repository.
  • \c remote - Overwrite any local change with the remote version.
  • \c both - Keep the local modifications in the file renamed to filename.mine, and save the repository version as filename.rXXX, ie. put the revision number after the filename. The conflict must be solved manually, and the solution made known to FSVS via the \ref resolve command. \note As there's no known \e good version after this renaming, a zero byte file gets created. \n Any \ref resolve "resolve" or \ref revert "revert" command would make that current, and the changes that are kept in filename.mine would be lost! \n You should only \ref revert to the last repository version, ie. the data of filename.rXXX.
  • \c merge - Call the program \c merge with the common ancestor, the local and the remote version. If it is a clean merge, no further work is necessary; else you'll get the (partly) merged file, and the two other versions just like with the \c both variant, and (again) have to tell FSVS that the conflict is solved, by using the \ref resolve command.
\note As in the subversion command line client \c svn the auxiliary files are seen as new, although that might change in the future (so that they automatically get ignored). \subsection o_merge Options regarding the "merge" program Like with \ref o_diff "diff", the \c merge operation is not done internally in FSVS. To have better control
  • merge_prg: The executable name, default "merge".
  • merge_opt: The default options, default "-A".
The option \c "-p" is always used: \code $merge_prg $merge_opt -p $file1 $common $file2 \endcode \section oh_commit Options for commit \subsection o_author Author You can specify an author to be used on commit. This option has a special behaviour; if the first character of the value is an \c '$', the value is replaced by the environment variable named. Empty strings are ignored; that allows an \c /etc/fsvs/config like this: \code author=unknown author=$LOGNAME author=$SUDO_USER \endcode where the last non-empty value is taken; and if your \c .authorized_keys has lines like \code environment="FSVS_AUTHOR=some_user" ssh-rsa ... \endcode that would override the config values. \note Your \c sshd_config needs the \c PermitUserEnvironment setting; you can also take a look at the \c AcceptEnv and \c SendEnv documentation. \subsection o_passwd Password In some scenarios like ssl-client-key-authentication it is more comfortable to use anonymous logins for checkout. In case the commit needs authentication via a password, you can use the \c password option. Please note the possible risks - on the command line it's visible via \c ps, and config files should at least be protected via \c chmod! There's no encryption or obfuscation! \code password="pa55word" \endcode \subsection o_commit_to Destination URL for commit If you defined multiple URLs for your working copy, FSVS needs to know which URL to commit to. For this you would set \c commit_to to the \b name of the URL; see this example: \code fsvs urls N:master,P:10,http://... N:local,P:20,file:///... fsvs ci /etc/passwd -m "New user defined" -ocommit_to=local \endcode \subsection o_empty_commit Doing empty commits In the default settings FSVS will happily create empty commits, ie. revisions without any changed entry. These just have a revision number, an author and a timestamp; this is nice if FSVS is run via CRON, and you want to see when FSVS gets run. If you would like to avoid such revisions, set this option to \c no; then such commits will be avoided. Example: \code fsvs commit -o empty_commit=no -m "cron" /etc \endcode \subsection o_empty_msg Avoid commits without a commit message If you don't like the behaviour that FSVS does commits with an empty message received from \c $EDITOR (eg if you found out that you don't want to commit after all), you can change this option to \c no; then FSVS won't allow empty commit messages. Example for the config file: \code empty_message=no \endcode \subsection o_mkdir_base Creating directories in the repository above the URL If you want to keep some data versioned, the first commit is normally the creation of the base directories \b above the given URL (to keep that data separate from the other repository data). Previously this had to be done manually, ie. with a svn mkdir $URL --parents or similar command. \n With the \c mkdir_base option you can tell FSVS to create directories as needed; this is mostly useful on the first commit. \code fsvs urls ... fsvs group 'group:ignore,./**' fsvs ci -m "First post!" -o mkdir_base=yes \endcode \subsection o_delay Waiting for a time change after working copy operations If you're using FSVS in automated systems, you might see that changes that happen in the same second as a commit are not seen with \ref status later; this is because the timestamp granularity of FSVS is 1 second. For backward compatibility the default value is \c no (don't delay). You can set it to any combination of
  • \c commit,
  • \c update,
  • \c revert and/or
  • \c checkout;
for \c yes all of these actions are delayed until the clock seconds change. Example how to set that option via an environment variable: \code export FSVS_DELAY=commit,revert \endcode \section oh_performance Performance and tuning related options \subsection o_chcheck Change detection This options allows to specify the trade-off between speed and accuracy. A file with a changed size can immediately be known as changed; but if only the modification time is changed, this is not so easy. Per default FSVS does a MD5 check on the file in this case; if you don't want that, or if you want to do the checksum calculation for \b every file (in case a file has changed, but its mtime not), you can use this option to change FSVS' behaviour. On the command line there's a shortcut for that: for every \c "-C" another check in this option is chosen. The recognized specifications are
none Resets the check bitmask to "no checks".
file_mtime Check files for modifications (via MD5) and directories for new entries, if the mtime is different - default
dir Check all directories for new entries, regardless of the timestamp.
allfiles Check \b all files with MD5 for changes (\c tripwire -like operation).
full All available checks.
You can give multiple options; they're accumulated unless overridden by \c none. \code fsvs -o change_check=allfiles status \endcode \note \a commit and \a update set additionally the \c dir option, to avoid missing new files. \subsection o_copyfrom_exp Avoiding expensive compares on \ref cpfd "copyfrom-detect" If you've got big files that are seen as new, doing the MD5 comparison can be time consuming. So there's the option \c copyfrom_exp (for \e "expensive", which takes the usual \c yes (default) and \c no arguments. \code fsvs copyfrom-detect -o copyfrom_exp=no some_directory \endcode \subsection o_group_stats Getting grouping/ignore statistics If you need to ignore many entries of your working copy, you might find that the ignore pattern matching takes some valuable time. \n In order to optimize the order of your patterns you can specify this option to print the number of tests and matches for each pattern. \code $ fsvs status -o group_stats=yes -q Grouping statistics (tested, matched, groupname, pattern): 4705 80 ignore group:ignore,./**.bak 4625 40 ignore group:ignore,./**.tmp \endcode For optimizing you'll want to put often matching patterns at the front (to make them match sooner, and avoid unnecessary tests); but if you are using other groups than \c ignore (like \c take), you will have to take care to keep the patterns within a group together. Please note that the first line shows how many entries were tested, and that the next lines differ by the number of matches entries for the current line, as all entries being tested against some pattern get tested for the next too, unless they match the current pattern. This option is available for \ref status and the \ref ignore "ignore test" commands. \section oh_base Base configuration \subsection o_conf Path definitions for the config and WAA area \anchor o_waa The paths given here are used to store the persistent configuration data needed by FSVS; please see \ref waa_files and \ref o__prio for more details, and the \ref o_softroot parameter as well as the \ref howto_backup_recovery for further discussion. \code FSVS_CONF=/home/user/.fsvs-conf fsvs -o waa=/home/user/.fsvs-waa st \endcode \note Please note that these paths can be given \b only as environment variables (\c $FSVS_CONF resp. \c $FSVS_WAA) or as command line parameter; settings in config files are ignored. \subsection o_configdir Configuration directory for the subversion libraries This path specifies where the subversion libraries should take their configuration data from; the most important aspect of that is authentication data, especially for certificate authentication. The default value is \c $FSVS_CONF/svn/. \c /etc/fsvs/config could have eg. \code config_dir=/root/.subversion \endcode Please note that this directory can hold an \c auth directory, and the \c servers and \c config files. \subsection o_softroot Using an alternate root directory This is a path that is prepended to \c $FSVS_WAA and \c $FSVS_CONF (or their default values, see \ref waa_files), if they do not already start with it, and it is cut off for the directory-name MD5 calculation. When is that needed? Imagine that you've booted from some Live-CD like Knoppix; if you want to setup or restore a non-working system, you'd have to transfer all files needed by the FSVS binary to it, and then start in some kind of \c chroot environment. With this parameter you can tell FSVS that it should load its libraries from the current filesystem, but use the given path as root directory for its administrative data. This is used for recovery; see the example in \ref howto_backup_recovery. So how does this work?
  • The internal data paths derived from \c $FSVS_WAA and \c $FSVS_CONF use the value given for \c softroot as a base directory, if they do not already start with it. \n (If that creates a conflict for you, eg. in that you want to use \c /var as the \c softroot, and your \c $FSVS_WAA should be \c /var/fsvs, you can make the string comparison fail by using /./var for either path.)
  • When a directory name for \c $FSVS_CONF or \c $FSVS_WAA is derived from some file path, the part matching \c softroot is cut off, so that the generated names match the situation after rebooting.
Previously you'd have to \ref export your data back to the filesystem and call \ref urls "fsvs urls" and FSVS \ref sync-repos "sync-repos" again, to get the WAA data back. \note A plain \c chroot() would not work, as some needed programs (eg. the decoder for update, see \ref s_p_n) would not be available. \note The easy way to understand \c softroot is: If you want to do a \c chroot() into the given directory (or boot with it as \c /), you'll want this set. \note As this value is used for finding the correct working copy root (by trying to find a \ref o_conf "conf-path", it cannot be set from a per-wc config file. Only the environment, global configuration or command line parameter make sense. \section oh_debug Debugging and diagnosing The next two options could be set in the global configuration file, to automatically get the last debug messages when an error happens. To provide an easy way to get on-line debugging again, \c debug_output and \c debug_buffer are both reset to non-redirected, on-line output, if more than a single \c -d is specified on the command line, like this: \code fsvs commit -m "..." -d -d filenames \endcode In this case you'll get a message telling you about that. \subsection o_debug_output Destination for debug output You can specify the debug output destination with the option \c debug_output. This can be a simple filename (which gets truncated on open), or, if it starts with a \c |, a command that the output gets piped into. If the destination cannot be opened (or none is given), debug output goes to \c STDOUT (for easier tracing via \c less). Example: \code fsvs -o debug_output=/tmp/debug.out -d st /etc \endcode \note That string is taken only once - at the first debug output line. So you have to use the correct order of parameters: -o debug_output=... -d. An example: writing the last 200 lines of debug output into a file. \code fsvs -o debug_output='| tail -200 > /tmp/debug.log' -d .... \endcode \subsection o_debug_buffer Using a debug buffer With the \c debug_buffer option you can specify the size of a buffer (in kB) that is used to capture the output, and which gets printed automatically if an error occurs. This must be done \b before debugging starts, like with the \ref o_debug_output "debug_output" specification. \code fsvs -o debug_buffer=128 ... \endcode \note If this option is specified in the configuration file or via the environment, only the buffer is allocated; if it is used on the command line, debugging is automatically turned on, too. \subsection o_warnings Setting warning behaviour Please see the command line parameter \ref glob_opt_warnings "-W", which is identical. \code fsvs -o warning=diff-status=ignore \endcode */ // Use this for folding: // g/^\\subsection/normal v/^\\s kkzf // vi: filetype=doxygen spell spelllang=en_gb formatoptions+=ta : // vi: nowrapscan foldmethod=manual foldcolumn=3 : fsvs-1.2.6/src/waa.c0000644000202400020240000024324012467104255013165 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include #include "waa.h" #include "interface.h" #include "direnum.h" #include "options.h" #include "add_unvers.h" #include "cache.h" #include "checksum.h" #include "helper.h" #include "global.h" #include "status.h" #include "est_ops.h" #include "ignore.h" #include "actions.h" /** \file * Handling of multiple struct estats, WAA (working copy * administrative area) function. * * In other words, handling single directories or complete trees of entries * (whereas est_ops.c is concerned with operations on single entries). * * \note \e WAA is short for Working copy Administrative * Area, ie. the directory hierarchy where local data concerning * the remote state and some caches are stored. * * This is not needed for all operations; eg. an \a export works without it. * */ /** The extension temporary files in the WAA get. */ static const char ext_tmp[]=".tmp"; /** -. * They are long enough to hold the \ref OPT__WAA_PATH "waa path" plus the * 3-level deep subdirectory structure for cache and data files. * The \ref OPT__CONF_PATH "conf path" plus additional data gets it own * buffers. * @{ */ char *waa_tmp_path, *waa_tmp_fn, *conf_tmp_path, *conf_tmp_fn; /** @} */ /** The meta-data for the WAA base directory. * The WAA itself doesn't get committed; checked via this inode. */ static struct sstat_t waa_stat; /** The maximum path length encountered so far. Stored in the \a dir-file, * to enable construction of paths without reallocating. */ static unsigned max_path_len; /** -. * This gets sorted by priority and URL on reading in \a url__load_list() . */ struct url_t **urllist=NULL; /** -. */ int urllist_count=0; /** How many entries we have; this is used to show the user * some kind of progress report, in percent. */ unsigned approx_entry_count; struct waa___temp_names_t { char *temp_name; char *dest_name; }; /** This array stores the target names. * Writes to the waa-area use temporary files, * which get renamed on waa__close(). */ static struct waa___temp_names_t *target_name_array=NULL; /** How many entries have been in use in the \ref target_name_array. */ static int target_name_array_len=0; /** -. */ int waa_tmp_path_len; /** -. */ struct waa__entry_blocks_t waa__entry_block; /** -. * Valid after a successfull call to \ref waa__find_common_base(). */ char *wc_path; /** -. */ int wc_path_len; const char /** The header line of the dir-files. * * Consists of * - header version (for verification), * - header length (for verification), * - number of entries (for space allocation), * - subdirectory count (currently only informational), * - needed string space (in bytes), * - length of longest path in bytes. * */ waa__header_line[]="%u %lu %u %u %u %u"; /** Convenience function for creating two paths. */ static inline void waa___init_path(enum opt__settings_e which, char *dest, char **eos) { int l; l=0; if (strncmp(opt__get_string(OPT__SOFTROOT), opt__get_string(which), opt__get_int(OPT__SOFTROOT)) != 0 ) { strcpy(dest, opt__get_string(OPT__SOFTROOT)); l=opt__get_int(OPT__SOFTROOT); /* OPT__SOFTROOT is defined to have *no* PATH_SEPARATOR at the end. * */ dest[l++]=PATH_SEPARATOR; } l+= strlen( strcpy(dest+l, opt__get_string(which) ) ); /* ensure a delimiter */ if (dest[l-1] != PATH_SEPARATOR) { dest[l++]=PATH_SEPARATOR; dest[l]='\0'; } *eos=dest + l; opt__set_int(which, PRIO_MUSTHAVE, l); } /** -. * If not a WAA-less operation, find the WAA and define an ignore * pattern. */ int waa__init(void) { int status; char *cp; status=0; /* If we're doing an import/export operation, we must not use the waa * area. We may be running off a KNOPPIX CD, or whatever. * * What we *need* is the conf directory ... it might have options for us. * */ /** \todo remove when gcc doesn't warn about \c strlen("const") * initializers. See debian bug #60xxxx. * And see below for WAA_PATH, too. */ if (opt__get_int(OPT__CONF_PATH)==0) { opt__set_string(OPT__CONF_PATH, PRIO_MUSTHAVE, DEFAULT_CONF_PATH); opt__set_int(OPT__CONF_PATH, PRIO_MUSTHAVE, strlen(DEFAULT_CONF_PATH)); } /* at least /w or some such */ STOPIF_CODE_ERR( opt__get_int(OPT__CONF_PATH)<3, EINVAL, "The CONF path is invalid; a (non-root) path is expected."); if (action->is_import_export) { /* So the WAA path is NULL, and serves as a validation point - every * access tried will get a SEGV and can be debugged. */ opt__set_string(OPT__WAA_PATH, PRIO_MUSTHAVE, NULL); opt__set_int(OPT__WAA_PATH, PRIO_MUSTHAVE, 0); } else { if (opt__get_int(OPT__WAA_PATH)==0) { opt__set_string(OPT__WAA_PATH, PRIO_MUSTHAVE, DEFAULT_WAA_PATH); opt__set_int(OPT__WAA_PATH, PRIO_MUSTHAVE, strlen(DEFAULT_WAA_PATH)); } STOPIF_CODE_ERR( opt__get_int(OPT__WAA_PATH)<3, EINVAL, "The WAA path should be set to a directory below \"/\"."); } /* This memory has lifetime of the process. * /path/to/waa / 01/02/03..0F/ extension .tmp * The memory allocated is enough for the longest possible path. */ waa_tmp_path_len= opt__get_int(OPT__SOFTROOT) + 1 + ( max(opt__get_int(OPT__WAA_PATH), opt__get_int(OPT__CONF_PATH)) ) + 1 + WAA_WC_MD5_CHARS + 1 + APR_MD5_DIGESTSIZE*2 + 3 + WAA__MAX_EXT_LENGTH + strlen(ext_tmp) + 1 +4; DEBUGP("using %d bytes for temporary WAA+conf paths", waa_tmp_path_len); /* Here the paths are set at highest priority, so they can't get changed * afterwards. */ STOPIF( hlp__alloc( &conf_tmp_path, waa_tmp_path_len), NULL); waa___init_path(OPT__CONF_PATH, conf_tmp_path, &conf_tmp_fn); if (!action->is_import_export) { STOPIF( hlp__alloc( &waa_tmp_path, waa_tmp_path_len), NULL); waa___init_path(OPT__WAA_PATH, waa_tmp_path, &waa_tmp_fn); /* validate existence and save dev/inode for later checking */ STOPIF( hlp__lstat(waa_tmp_path, &waa_stat), "!stat() of waa-path \"%s\" failed. " "Does your local WAA storage area exist? ", waa_tmp_path); DEBUGP("got the WAA as inode %llu", (t_ull)waa_stat.ino); /* Only check whether it's there. */ STOPIF_CODE_ERR( access(conf_tmp_path, action->is_readonly ? R_OK : W_OK)==-1, errno, "!Cannot %s to the FSVS_CONF path \"%s\".", action->is_readonly ? "read" : "write", conf_tmp_path); } /* Now no more changes of the softroot (eg. via the per-WC configuration) * are allowed. */ opt__set_int( OPT__SOFTROOT, PRIO_MUSTHAVE, opt__get_int(OPT__SOFTROOT)); cp=opt__variable_from_option(OPT__SOFTROOT); /* Solaris 10 compatibility. */ if (opt__get_string(OPT__SOFTROOT)) setenv(cp, opt__get_string(OPT__SOFTROOT), 1); else unsetenv(cp); ex: return status; } /** -. * This is more or less a portable reimplementation of GNU \c * getcwd(NULL,0) ... self-allocating the needed buffer. * * \a where gets the cwd, and \b must be free()d; the optional \a ret_len * can be set to the actual length of the cwd. * * If the caller wants to append some path to the end, and knows how many * bytes are needed, the \a additional bytes can be requested. * * If the cwd has been removed, we get \c ENOENT. But returning that would * not necessarily signal a fatal error to all callers, so we return \c * ENOTDIR in that case. */ int waa__save_cwd(char **where, int *ret_len, int additional) { int status; /* We remember how many bytes we used last time, hoping that we need no * realloc() call in later invocations. */ static int len=256; char *path; path=NULL; status=0; while (1) { STOPIF( hlp__realloc( &path, len + additional + 4), NULL); /* We allocate the needed amount, but lie to getcwd() about the available * space - so the caller surely has space left. */ if (getcwd(path, len-1)) break; STOPIF_CODE_ERR(errno != ERANGE, errno == ENOENT ? ENOTDIR : errno, "Cannot get the current directory."); len += 512; STOPIF_CODE_ERR(len > 1<<13, ERANGE, "You have mighty long paths. Too long. More than %d bytes? Sorry.", len); } if (ret_len) *ret_len=strlen(path); *where=path; ex: return status; } /** -. * * \note The mask used is \c 0777 - so mind your umask! */ int waa__mkdir(char *dir, int including_last) { int status; STOPIF( waa__mkdir_mask(dir, including_last, 0777), NULL); ex: return status; } /** -. * * If it already exists, no error is returned. * * If needed, the structure is generated recursively. * * With \a including_last being \c 0 you can give a filename, and make sure * that the directories up to there are created. Because of this we can't * use \c apr_dir_make_recursive() - We'd have to cut the filename away, * and this is done here anyway. * */ int waa__mkdir_mask(char *dir, int including_last, int mask) { int status; char *last_ps; struct stat buf; status=0; /* Does something exist here? */ if (lstat(dir, &buf) == -1) { if (errno == ENOENT) { /* Some intermediate levels are still missing; try again * recursively. */ last_ps=strrchr(dir, PATH_SEPARATOR); BUG_ON(!last_ps); /* Strip last directory, and *always* undo the change. */ *last_ps=0; status=waa__mkdir(dir, 1); *last_ps=PATH_SEPARATOR; STOPIF( status, NULL); DEBUGP("%s: last is %d", dir, including_last); /* Now the parent was done ... so we should not get ENOENT again. */ if (including_last) STOPIF_CODE_ERR( mkdir(dir, mask & 07777) == -1, errno, "cannot mkdir(%s)", dir); } else STOPIF(status, "cannot lstat(%s)", dir); } else { STOPIF_CODE_ERR( including_last && !S_ISDIR(buf.st_mode), ENOTDIR, "\"%s\" is not a directory", dir); } ex: return status; } /** Returns the MD5 of the given path, taking the softroot into account. */ int waa___get_path_md5(const char const *path, unsigned char digest[APR_MD5_DIGESTSIZE]) { int status; int plen, wdlen; char *cp; static const char root[]= { PATH_SEPARATOR, 0}; status=0; cp=NULL; plen=strlen(path); DEBUGP("path is %s", path); /* If we have a relative path, ie. one without / as first character, * we have to take the current directory first. */ if (path[0] != PATH_SEPARATOR) { /* This may be suboptimal for performance, but the only usage * currently is for MD5 of large files - and there it doesn't * matter, because shortly afterwards we'll be reading many KB. */ STOPIF( waa__save_cwd(&cp, &wdlen, 1 + plen + 1 + 3), NULL); path= hlp__pathcopy(cp, NULL, cp, "/", path, NULL); /* hlp__pathcopy() can return shorter strings, eg. by removing ./././// * etc. So we have to count again. */ plen=strlen(path); } while (plen>1 && path[plen-1] == PATH_SEPARATOR) plen--; if (opt__get_string(OPT__SOFTROOT)) { DEBUGP("have softroot %s for %s, compare %d bytes", opt__get_string(OPT__SOFTROOT), path, opt__get_int(OPT__SOFTROOT)); if (strncmp(opt__get_string(OPT__SOFTROOT), path, opt__get_int(OPT__SOFTROOT)) == 0 ) path+=opt__get_int(OPT__SOFTROOT); /* We need to be sure that the path starts with a PATH_SEPARATOR. * That is achieved in waa__init(); the softroot path gets normalized * there. */ /* In case both the argument and the softroot are identical, * we end up with *path==0. Change that to the root directory. */ if (!*path) path=(char*)root; plen=strlen(path); } DEBUGP("md5 of %s", path); apr_md5(digest, path, plen); IF_FREE(cp); ex: return status; } /** -. * * In \a erg a pointer to an static buffer (at least as far as the caller * should mind!) is returned; \a eos, if not \c NULL, is set to the end of * the string. \a start_of_spec points at the first character specific to * this file, ie. after the constant part of \c $FSVS_WAA or \c $FSVS_CONF * and the \c PATH_SEPARATOR. * * \a flags tell whether the path is in the WAA (\ref GWD_WAA) or in the * configuration area (\ref GWD_CONF); furthermore you can specify that * directories should be created as needed with \ref GWD_MKDIR. * * The intermediate directories are created, so files can be created * or read directly after calling this function. */ int waa__get_waa_directory(char *path, char **erg, char **eos, char **start_of_spec, int flags) { static int waa_init_for_wc = 0; int status, len; char *cp; unsigned char digest[APR_MD5_DIGESTSIZE], *p2dig; status=0; cp=NULL; /* Do that before the apr_md5 call, so we can use the digest. */ if ((flags & GWD_WAA) && !waa_init_for_wc) { waa_init_for_wc=1; /* We avoid that if it's 0 (backward compatibility). */ if (WAA_WC_MD5_CHARS) { BUG_ON(!wc_path); STOPIF( waa___get_path_md5(wc_path, digest), NULL); /* We have enough space for the full MD5, even if it's overwritten * later on; and as it's no hot path (in fact it's called only once), * the performance doesn't matter, too. * So just use the function that we already have. */ cs__md5tohex(digest, waa_tmp_fn); waa_tmp_fn += WAA_WC_MD5_CHARS; *waa_tmp_fn = PATH_SEPARATOR; waa_tmp_fn++; } /* Termination is needed only for the output below. */ *waa_tmp_fn = 0; DEBUGP("init wc base:%s %s", wc_path+opt__get_int(OPT__SOFTROOT), waa_tmp_path); } STOPIF( waa___get_path_md5(path, digest), NULL); p2dig=digest; len=APR_MD5_DIGESTSIZE; if (flags & GWD_WAA) { *erg = waa_tmp_path; cp = waa_tmp_fn; if (start_of_spec) *start_of_spec=cp; Mbin2hex(p2dig, cp, 1); len--; *(cp++) = PATH_SEPARATOR; Mbin2hex(p2dig, cp, 1); len--; *(cp++) = PATH_SEPARATOR; } else if (flags & GWD_CONF) { *erg = conf_tmp_path; cp = conf_tmp_fn; if (start_of_spec) *start_of_spec=cp; } else { BUG(".:8:."); } Mbin2hex(p2dig, cp, len); if (flags & GWD_MKDIR) STOPIF( waa__mkdir(*erg, 1), NULL); *(cp++) = PATH_SEPARATOR; *cp = '\0'; if (eos) *eos=cp; DEBUGP("returning %s", *erg); ex: return status; } /** Base function to open files in the WAA. * * For the \a flags the values of \c creat or \c open are used; * the mode is \c 0777, so take care of your umask. * * If the flags include one or more of \c O_WRONLY, \c O_TRUNC or \c O_RDWR * the file is opened as a temporary file and \b must be closed with * waa__close(); depending on the success value given there it is renamed * to the destination name or deleted. * * This temporary path is stored in a per-filehandle array, so there's no * limit here on the number of written-to files. * * If the flags include \c O_APPEND, no temporary file is used, and no * filehandle is stored - do simply a \c close(). * * For read-only files simply do a \c close() on their filehandles. * * Does return \c ENOENT without telling the user. * * \note If \a extension is given as \c NULL, only the existence of the * given WAA directory is checked. So the caller gets a \c 0 or an error * code (like \c ENOENT); \a flags and \a filehandle are ignored. * */ int waa__open(char *path, const char *extension, int flags, int *filehandle) { char *cp, *orig, *eos, *dest, *start_spec; int fh, status; int use_temp_file; int old_len; fh=-1; orig=NULL; /* O_APPEND means that we have to append to the *existing* file, so we * may not use the temporaray name. * But using O_APPEND normally means using O_CREAT, too - so we have to * do the specifically. */ use_temp_file=(flags & O_APPEND) ? 0 : (flags & (O_WRONLY | O_RDWR | O_CREAT)); STOPIF( waa__get_waa_directory(path, &dest, &eos, &start_spec, waa__get_gwd_flag(extension) ), NULL); if (!extension) { /* Remove the last PATH_SEPARATOR. */ BUG_ON(eos == dest); eos[-1]=0; return hlp__lstat(dest, NULL); } strcpy(eos, extension); BUG_ON( action->is_readonly && (flags & (O_WRONLY | O_RDWR | O_APPEND | O_CREAT)), "Action marked read-only, got flags 0x%x for %s", flags, eos); if (use_temp_file) { STOPIF( hlp__strdup( &orig, dest), NULL); strcat(eos, ext_tmp); /* In order to avoid generating directories (eg. for md5s-files) that * aren't really used (because the data files are < 128k, and so the md5s * files get deleted again), we change the PATH_SEPARATOR in the * destination filename to '_' - this way we get different filenames * and avoid collisions with more than a single temporary file (as * would happen with just $FSVS_WAA/tmp). * * Can that filename get longer than allowed? POSIX has 255 characters, * IIRC - that should be sufficient. */ cp=strchr(start_spec, PATH_SEPARATOR); while (cp) { *cp='_'; cp=strchr(cp+1, PATH_SEPARATOR); } /* We want to know the name later, so keep a copy. */ STOPIF( hlp__strdup( &dest, dest), NULL); DEBUGP("tmp for target %s is %s", orig, dest); } else DEBUGP("reading target %s", dest); if (flags & O_APPEND) STOPIF( waa__mkdir(dest, 0), NULL); /* in case there's a O_CREAT */ fh=open(dest, flags, 0777); if (fh<0) { status=errno; if (status == ENOENT) goto ex; STOPIF(status, "open %s with flags 0x%X", dest, flags); } DEBUGP("got fh %d", fh); /* For files that are written to, remember the original filename, indexed * by the filehandle. That must be done *after* the open - we don't know * the filehandle in advance! */ if (use_temp_file) { if (fh >= target_name_array_len) { /* store old length */ old_len=target_name_array_len; /* Assume some more filehandles will be opened */ target_name_array_len=fh+8; DEBUGP("reallocate target name array to %d", target_name_array_len); STOPIF( hlp__realloc( &target_name_array, sizeof(*target_name_array) * target_name_array_len), NULL); /* zero out */ memset(target_name_array + old_len, 0, sizeof(*target_name_array) * (target_name_array_len-old_len)); } /* These are already copies. */ target_name_array[fh].dest_name=orig; target_name_array[fh].temp_name=dest; } *filehandle=fh; status=0; ex: if (status && fh>-1) close(fh); return status; } /** -. * * If \a has_failed is !=0, the writing to the file has * failed somewhere; so the temporary file is not renamed to the * destination name, just removed. * * This may be called only for \b writeable files of waa__open() and * similar; readonly files should just be \c close()d. */ int waa__close(int filehandle, int has_failed) { int status, do_unlink; struct waa___temp_names_t *target; /* Assume we have to remove the file; only if the rename * is successful, this ain't true. */ do_unlink=1; status=0; target= target_name_array ? target_name_array+filehandle : NULL; if (target) DEBUGP("filehandle %d should be %s", filehandle, target->dest_name); else DEBUGP("filehandle %d wasn't opened via waa__open()!", filehandle); status=close(filehandle); if (!has_failed) { STOPIF_CODE_ERR(status == -1, errno, "closing tmp file"); if (target) { /* Now that we know we'd like to keep that file, make the directories * as needed. */ STOPIF( waa__mkdir(target->dest_name, 0), NULL); /* And give it the correct name. */ STOPIF_CODE_ERR( rename(target->temp_name, target->dest_name) == -1, errno, "renaming tmp file from %s to %s", target->temp_name, target->dest_name); } do_unlink=0; } status=0; ex: /* If there's an error while closing the file (or already given * due to has_failed), unlink the file. */ if (do_unlink && target) { do_unlink=0; STOPIF_CODE_ERR( unlink(target->temp_name) == -1, errno, "Cannot remove temporary file %s", target->temp_name); } if (target) { IF_FREE(target->temp_name); IF_FREE(target->dest_name); } return status; } /** -. * * Normally this is used to mark the base directory used in some WAA path, * ie. if you are versioning \c /etc, you'll get a symlink * \c $WAA/18/2f/153bd94803955c2043e6f2581d5d/_base * pointing to \c /etc . */ int waa__make_info_file(char *directory, char *name, char *dest) { int status; int hdl; static const char readme_1[]="This directory is used by FSVS.\n" "Please see http://fsvs.tigris.org/ for more details.\n\n" "The working copy for this hash value is\n" "\t"; static const char readme_2[]="\n"; inline int w(const char *buffer, int l) { return write(hdl, buffer, l) != l; } STOPIF( waa__open(directory, name, O_CREAT | O_WRONLY, &hdl), NULL); STOPIF_CODE_ERR( w(readme_1, sizeof(readme_1)-1) || w(dest, strlen(dest)) || w(readme_2, sizeof(readme_2)-1), errno, "Cannot create the readme file."); STOPIF( waa__close(hdl, 0), NULL); ex: return status; } /** -. * * This function takes the parameter \a name, and returns a freshly * allocated bit of memory with the given value or - if \c NULL - * the current working directory. * * That the string is always freshly allocated on the heap makes * sense in that callers can \b always just free it. */ int waa__given_or_current_wd(char *name, char **erg) { int status; status=0; if (name) STOPIF( hlp__strdup( erg, name), NULL); else STOPIF( waa__save_cwd( erg, NULL, 0), NULL); ex: return status; } /** -. * * If the \c unlink()-call succeeds, the (max. 2) directory levels above * are removed, if possible. * * Via the parameter \a ignore_not_exist the caller can say whether a * \c ENOENT should be returned silently. * * If \a extension is \c NULL, the given path already specifies a file, and * is not converted into a WAA path. * * \see waa_files. */ int waa__delete_byext(char *path, char *extension, int ignore_not_exist) { int status; char *cp, *eos; int i; status=0; if (extension) { STOPIF( waa__get_waa_directory(path, &cp, &eos, NULL, waa__get_gwd_flag(extension)), NULL); strcpy(eos, extension); /* Make eos point at the PATH_SEPARATOR. */ eos--; BUG_ON(*eos != PATH_SEPARATOR); } else { cp=path; eos=strrchr(cp, PATH_SEPARATOR); BUG_ON(!eos); } DEBUGP("unlink %s", cp); if (unlink(cp) == -1) { status=errno; if (status == ENOENT && ignore_not_exist) status=0; STOPIF(status, "Cannot remove spool entry %s", cp); } /* Try to unlink the (possibly) empty directory. * If we get an error don't try further, but don't give it to * the caller, either. * After all, it's just a clean-up. */ /* eos is currently at a PATH_SEPARATOR; we have to clean that. */ for(i=0; i<3; i++) { *eos=0; if (rmdir(cp) == -1) break; eos=strrchr(cp, PATH_SEPARATOR); /* That should never happen. */ BUG_ON(!eos, "Got invalid path to remove"); } DEBUGP("last removed was %s", cp); ex: return status; } /** -. * * The \a entry_name may be \c NULL; then the current working directory * is taken. * \a write is open mode, like used for \c open(2) (O_CREAT | O_WRONLY * | O_TRUNC) and is given to \c waa__open(). * * \c ENOENT is returned without giving an error message. */ int waa__open_byext(char *entry_name, char *extension, int mode, int *fh) { int status; char *entry; status=0; entry=NULL; STOPIF( waa__given_or_current_wd(entry_name, &entry), NULL ); status=waa__open(entry, extension, mode, fh); if (status == ENOENT) goto ex; STOPIF(status, NULL); ex: IF_FREE(entry); return status; } /** -. * */ int waa__open_dir(char *wc_base, int write, int *fh) { return waa__open_byext(wc_base, WAA__DIR_EXT, write, fh); } /** -. * * All entries are defined as new. */ int waa__build_tree(struct estat *dir) { int status; struct estat *sts; int i, ignore, have_ignored, have_found; status=0; /* no stat info on first iteration */ STOPIF( waa__dir_enum( dir, 0, 0), NULL); DEBUGP("found %d entries ...", dir->entry_count); have_ignored=0; have_found=0; for(i=0; ientry_count; i++) { sts=dir->by_inode[i]; STOPIF( ign__is_ignore(sts, &ignore), NULL); if (ignore>0) { DEBUGP("ignoring entry %s", sts->name); sts->to_be_ignored=1; have_ignored=1; continue; } /* in build_tree, it must be a new entry. */ sts->entry_status=FS_NEW; ops__set_todo_bits(sts); approx_entry_count++; have_found++; if (S_ISDIR(sts->st.mode)) { if (ops__are_children_interesting(sts)) { STOPIF_CODE_ERR( chdir(sts->name) == -1, errno, "chdir(%s)", sts->name); STOPIF( waa__build_tree(sts), NULL ); /* this can fail if the parent directories have been removed. */ STOPIF_CODE_ERR( chdir("..") == -1, errno, "parent has gone"); } } STOPIF( ac__dispatch(sts), NULL); } if (have_ignored) /* Delete per index faster */ STOPIF( ops__free_marked(dir, 0), NULL); if (have_found) ops__mark_changed_parentcc(dir, entry_status); ex: return status; } /** Returns the index at which the element should be * (the index at which an equal or first bigger inode is). */ int waa___find_position(struct estat **new, struct estat ***array, int count) { int smaller, middle, bigger_eq; int status; /* That's easy. */ if (count == 0) return 0; /* A special case. As the directories are normally laid out sequentially * on a hard disk, the inodes are often grouped in their directories. * In a test case (my /etc) this shortcut was taken 1294 times, and * didn't catch 1257 times (with up to 80 entries in the array). */ /* Recent gcov tests are still better: * - of ~24000 calls of this function * - 118 times the above shortcut for only a single directory was taken, * - ~23730 times this single comparision was enough, * - and the binary search loop below was called only 16 times. * Hooray! */ if (dir___f_sort_by_inode(new, array[0]) < 0) { DEBUGP("short path taken for 0<1"); return 0; } /* if only one element, and not on first position ... */ if (count == 1) return 1; /* some more cheating :-) */ if (dir___f_sort_by_inode(new, array[count-1]) >= 0) { DEBUGP("short path taken for >count"); return count; } smaller=1; /* bsearch can only find the _equal_ element - we need * the first one higher. */ /* order is wrong - find new place for this element. */ bigger_eq=count-1; /* i is a smaller element, k a possibly higher */ #if 0 if (1) { char tmp[count*(18+1)+10]; int i, n; for (i=n=0; ist.ino); } DEBUGP("having %d [ %s]", count, tmp); DEBUGP("looking for %llu", (t_ull)(*new)->st.ino); } #endif while (1) //bigger_eq>smaller+1) { middle=(bigger_eq+smaller)/2; DEBUGP("at %d=%llu - %d=%llu - %d=%llu", smaller, (t_ull)(*array[smaller])->st.ino, middle, (t_ull)(*array[middle])->st.ino, bigger_eq, (t_ull)(*array[bigger_eq])->st.ino); status=dir___f_sort_by_inode(new, array[middle]); if (status > 0) smaller=middle+1; else if (status < 0) bigger_eq=middle; else { /* status==0 means identical inodes => hardlinks. * Now these are directories ... but we see hardlinks eg. for binding * mounts, so we cannot just abort. */ DEBUGP("Jackpot, hardlink!"); bigger_eq=middle; break; } if (bigger_eq<=smaller) break; } DEBUGP("believing in %d %llu", bigger_eq, (t_ull)(*array[bigger_eq])->st.ino); /* now we have an index bigger_eq, whose element is bigger or equal * than the new, and its left is smaller or equal: */ #if DEBUG BUG_ON((bigger_eq < count-1 && dir___f_sort_by_inode(new, array[bigger_eq])>0) || (bigger_eq >0 && dir___f_sort_by_inode(new, array[bigger_eq-1])<0)); #endif return bigger_eq; } /** -. * * Here the complete entry tree gets written to a file, which is used on the * next invocations to determine the entries' statii. It contains the names, * sizes, MD5s, devices, inode numbers, parent, mode and time informations, * and a reference to the parent to re-build the tree. * * \todo Currently hardlinks with duplicate inode-numbers are not well done * in fsvs. * * *

Format

* This file has a single header line with a defined length; it is padded * before the newline with spaces, and the last character before the newline * is a \c $ . * The other lines have space-delimited fields, and a \\0 delimited name * at the end, followed by a newline. * *

Order of entries in the file

* We always write parents before childs, and (mostly) lower inode numbers * before higher; mixing the subdirectories is allowed. * This allows us to rebuild the tree in one pass (because the parents are * already known), and gives us nearly linear reading on the storage media * (because the inodes are mostly in harddisk order, there's not much * back-seeking necessary). * * As a consequence the root entry \c . is \b always the first one in * the written file. * * \note * If we were going \b strictly in inode-order, we would have to jump over * some entries (if the parent directory has a higher inode * number than this entry and the [hard disk] head is already further down), * and then have a second run through ... (or possibly a third, and so on). * That's more complexity than wanted, and doesn't bring performance. * So currently only one run; hard disk must move back sometimes. * * *

\c directory Array

* We use one array, named \c directory , to store pointers in the * \a estat::by_inode arrays we're traversing (which are defined to * be NULL-terminated). * * We just have to find the next inode in the active directories; they are * already sorted by inode, so that's very easy. * * Here's a small drawing in ASCII, followed by a graphviz version. * * \verbatim * (struct estat) * ^ * | * xxxxxxxxxxxxxxxxN xxxxxxxxxxxxxxxN xxN xxxxN * ^ ^ ^ ^ * /->d >-/ | | | * | d >----------------------------/ | | * | d >------------------------------------/ | * | d >------------------------------------------------/ * | * directory * \endverbatim * \dot * digraph directory * { * node [shape=record, fontsize=9, height=0, width=0]; * rankdir=BT; * * directory [label=" directory | { <1> 1 | <2> 2 | <3> 3 | <4> 4 }"]; * List1 [ label="<1>1|<2>2|<3>3|<4>4|NULL" ]; * List2 [ label="<1>1|<2>2|<3>3|<4>4|<5>5|NULL" ]; * List3 [ label="<1>1|<2>2|<3>3|<4>4|NULL" ]; * List4 [ label="<1>1|<2>2|<3>3|NULL" ]; * sts [label="(struct estat)"]; * * * directory:4:e -> List1:2:s; * directory:3:e -> List2:3:s; * directory:2:e -> List3:4:s; * directory:1:e -> List4:1:s; * * List1:1 -> sts; * * node [style=invis]; * edge [style=invis]; * * directory:1:e -> Hidden1 -> List4:n; * directory:2:e -> Hidden1 -> List3:n; * } * \enddot * The x's are the by_inode-arrays of pointers to struct, NULL-terminated. * * The d's show the directories-array with 4 entries. * * We don't really store the parent inode numbers in the file; that wouldn't * be enough, anyway - as soon as there are two or more filesystems, they * would collide. * * So instead of the inode number we store the number of the entry *in the * file*; so the root inode (which is always first) has parent_ino=0 (none), * its children get 1, and so on. * That means that as long as we allocate the memory block in a single * continuous block, we don't have to search any more; we can just reconstruct * the pointers to the parent. * We keep the directory-array sorted; so we have to insert a new directory * at the correct position, but can otherwise output very fast. * So for the array * [10 20 30 40 50 60 70] * the element 10 is written; the next one, say 35, is inserted at the * correct position: * [20 30 35 40 50 60 70] * Again the first (smallest) element is written, and so on. */ int waa__output_tree(struct estat *root) { struct estat ***directory, *sts, **sts_pp; int max_dir, i, alloc_dir; unsigned this_len; int status, waa_info_hdl; unsigned complete_count, string_space; char header[HEADER_LEN] = "UNFINISHED"; waa_info_hdl=-1; directory=NULL; STOPIF( waa__open_dir(NULL, WAA__WRITE, &waa_info_hdl), NULL); /* allocate space for later use - entry count and similar. */ status=strlen(header); memset(header + status, '\n', sizeof(header)-status); i=write(waa_info_hdl, header, sizeof(header)); STOPIF_CODE_ERR( i != sizeof(header), errno, "header was not written"); /* Take a page of pointers (on x86-32). Will be reallocated if * necessary. */ alloc_dir=1024; STOPIF( hlp__calloc( &directory, alloc_dir+1, sizeof(*directory)), NULL); /* The root entry is visible above all URLs. */ root->url=NULL; STOPIF( ops__save_1entry(root, 0, waa_info_hdl), NULL); root->file_index=complete_count=1; root->path_len=string_space=strlen(root->name); max_path_len=root->path_len; /* an if (root->entry_count) while (...) {...} * would be possible, but then an indentation level would * be wasted :-) ! */ if (!root->entry_count) goto save_header; /* This check is duplicated in the loop. * We could do that in ops__save_1entry(), but it doesn't belong here. */ if (root->to_be_sorted) { DEBUGP("re-sorting root"); STOPIF( dir__sortbyinode(root), NULL); } /* by_inode might be reallocated by dir__sortbyinode(); so it has be used * after that. */ directory[0]=root->by_inode; max_dir=1; /* as long as there are directories to do... */ while (max_dir) { // get current entry sts=( *directory[0] ); /* find next element */ directory[0]++; /* end of this directory ?*/ if (*directory[0] == NULL) { /* remove this directory by shifting the list */ max_dir--; DEBUGP("finished subdir"); memmove(directory, directory+1, sizeof(*directory)*max_dir); } else if (max_dir>1) { /* check if it stays or gets moved. * ignore element 0, as this is the new one. */ i=waa___find_position(directory[0], directory+1, max_dir-1); if (i) { /* order is wrong - move elements. * Mind that returned index is one element further in directory[]! * * [ 55 20 30 40 50 60 ] max_dir=6, i=4 * new^ ^0 ^4 * * [ 20 30 40 50 55 60 ] * */ sts_pp=directory[0]; memmove(directory, directory+1, sizeof(*directory)*i); directory[i]=sts_pp; DEBUGP("old current moves to #%u: %llu < %llu", i, (t_ull)(*directory[i-1])->st.ino, (t_ull)(*directory[i ])->st.ino); } } /* If this takes too much performance, we might have to duplicate that * check before the waa___find_position() call above. */ if (!ops__should_entry_be_written_in_list(sts)) continue; // do current entry STOPIF( ops__save_1entry(sts, sts->parent->file_index, waa_info_hdl), NULL); complete_count++; /* store position number for child -> parent relationship */ sts->file_index=complete_count; this_len=strlen(sts->name)+1; string_space += this_len; if (!sts->path_len) ops__calc_path_len(sts); if (sts->path_len > max_path_len) max_path_len = sts->path_len; if (ops__has_children(sts)) { /* It's easy and possible to have always the correct number * of subdirectories in root->subdir_count. We'd just have * to walk up to the root in waa__build_tree and add_directory * and increment the number there. * * But * - we don't really know if this size is really required and * - we'd like to decrease the size of the structure, * so we don't use that really any more - we realloc the pointers * if necessary. */ if (max_dir >= alloc_dir) { alloc_dir *= 2; STOPIF( hlp__realloc( &directory, (alloc_dir+1) * sizeof(*directory)), NULL); DEBUGP("reallocated directory pointers to %u entries", alloc_dir); } /* Has this directory to be sorted, because it got new elements? * Must be done *before* inserting into the array. */ if (sts->to_be_sorted) STOPIF( dir__sortbyinode(sts), NULL); /* sort into array */ i=waa___find_position(sts->by_inode, directory, max_dir); /* this time we have to shift all bigger elements one further: * new=45, max_dir=7, * [10 20 30 40 50 60 70] * i=4 * results in * [10 20 30 40 45 50 60 70] */ memmove(directory+i+1, directory+i, sizeof(*directory)*(max_dir-i)); directory[i]=sts->by_inode; DEBUGP("new subdir %llu #%u", (t_ull)(*directory[i])->st.ino, i); max_dir++; } #ifdef DEBUG for(i=1; i0); #endif } save_header: /* save header information */ /* path_len needs a terminating \0, so add a few bytes. */ status=snprintf(header, sizeof(header), waa__header_line, WAA_VERSION, (t_ul)sizeof(header), complete_count, alloc_dir, string_space+4, max_path_len+4); BUG_ON(status >= sizeof(header)-1, "header space not large enough"); /* keep \n at end */ memset(header + status, ' ', sizeof(header)-1 -status); header[sizeof(header)-2]='$'; STOPIF_CODE_ERR( lseek(waa_info_hdl, 0, SEEK_SET) == -1, errno, "seeking to start of file"); status=write(waa_info_hdl, header, sizeof(header)); STOPIF_CODE_ERR( status != sizeof(header), errno, "re-writing header failed"); status=0; ex: if (waa_info_hdl != -1) { i=waa__close(waa_info_hdl, status); waa_info_hdl=-1; STOPIF( i, "closing tree handle"); } if (directory) IF_FREE(directory); return status; } static struct estat *old; static struct estat current; static int nr_new; /** Compares the directories. * Every element found in old will be dropped from current; * only new elements are added to old, by temporarily using * current.by_inode. * * Example: * * Old has these elements. * b c e g h * * current gets these entries in by_name before the correlation * (by_inode has just another order): * A b c D e F g h NULL * Now we need A, D, and F. * * After the loop current has: * by_inode A D F * by_name NULL b c NULL e NULL g h NULL * with nr_new=3. * * */ int new_entry(struct estat *sts, struct estat **sts_p) { int status; int ignore; STOPIF( ign__is_ignore(sts, &ignore), NULL); if (ignore>0) DEBUGP("ignoring entry %s", sts->name); else { sts->parent=old; *sts_p=NULL; current.by_inode[nr_new]=sts; nr_new++; DEBUGP("found a new one!"); sts->entry_status=FS_NEW; sts->flags |= RF_ISNEW; /* Has to be done in that order, so that ac__dispatch() already finds * sts->do_filter_allows set. */ ops__set_todo_bits(sts); STOPIF( ac__dispatch(sts), NULL); ops__mark_parent_cc(sts, entry_status); approx_entry_count++; /* if it's a directory, add all subentries, too. */ if (S_ISDIR(sts->st.mode) && ops__are_children_interesting(sts) && (opt__get_int(OPT__FILTER) & FS_NEW)) { STOPIF_CODE_ERR( chdir(sts->name) == -1, errno, "chdir(%s)", sts->name); STOPIF( waa__build_tree(sts), NULL); STOPIF_CODE_ERR( chdir("..") == -1, errno, "parent went away"); } } ex: return status; } /** Checks for new entries in this directory, and updates the * directory information. * * Gets called after all \b expected (known) entries of this directory have * been (shallowly!) read - so subdirectories might not yet be up-to-date * yet. * * The estat::do_this_entry and estat::do_userselected flags are set, and * depending on them (and opt_recursive) estat::entry_status is set. * * On \c chdir() an eventual \c EACCES is ignored, and the "maybe changed" * status returned. */ int waa__update_dir(struct estat *_old) { int dir_hdl, status; int i; char *path; old = _old; status=nr_new=0; dir_hdl=-1; current=*old; current.by_inode=current.by_name=NULL; current.entry_count=0; STOPIF( ops__build_path(&path, old), NULL); /* To avoid storing arbitrarily long pathnames, we just open this * directory and do a fchdir() later. */ dir_hdl=open(".", O_RDONLY | O_DIRECTORY); STOPIF_CODE_ERR( dir_hdl==-1, errno, "saving current directory with open(.)"); DEBUGP("update_dir: chdir(%s)", path); if (chdir(path) == -1) { if (errno == EACCES) goto ex; STOPIF( errno, "chdir(%s)", path); } /* Here we need the entries sorted by name. */ STOPIF( waa__dir_enum( ¤t, 0, 1), NULL); DEBUGP("update_dir: direnum found %d; old has %d (%d)", current.entry_count, old->entry_count, status); /* No entries means no new entries; but not old entries deleted! */ if (current.entry_count == 0) goto after_compare; nr_new=0; STOPIF( ops__correlate_dirs( old, ¤t, NULL, NULL, new_entry, NULL), NULL); DEBUGP("%d new entries", nr_new); /* no new entries ?*/ status=0; if (nr_new) { STOPIF( ops__new_entries(old, nr_new, current.by_inode), "adding %d new entries", nr_new); } /* Free unused struct estats. */ /* We use by_name - there the pointers are sorted by usage. */ for(i=0; i < current.entry_count; i++) if (current.by_name[i] ) STOPIF( ops__free_entry( current.by_name+i ), NULL); /* Current is allocated on the stack, so we don't free it. */ IF_FREE(current.by_inode); IF_FREE(current.by_name); /* The strings are still used. We would have to copy them to a new area, * like we're doing above in the by_name array. */ // IF_FREE(current.strings); after_compare: /* There's no doubt now. * The old entries have already been checked, and if there are new * we're sure that this directory has changed. */ old->entry_status &= ~FS_LIKELY; /* If we find a new entry, we know that this directory has changed. * We cannot use the ops__mark_parent_* functions, as old can have no * children that we could give. */ if (nr_new) ops__mark_changed_parentcc(old, entry_status); ex: if (dir_hdl!=-1) { i=fchdir(dir_hdl); STOPIF_CODE_ERR(i == -1 && !status, errno, "cannot fchdir() back"); i=close(dir_hdl); STOPIF_CODE_ERR(i == -1 && !status, errno, "cannot close dirhandle"); } DEBUGP("update_dir reports %d new found, status %d", nr_new, status); return status; } /** Small helper macro for telling the user that the file is damaged. */ #define TREE_DAMAGED(condition, ...) \ STOPIF_CODE_ERR( condition, EINVAL, \ "!The entries file seems to be damaged -- \n" \ " %s.\n" \ "\n" \ "Please read the users@ mailing list.\n" \ " If you know what you're doing you could " \ "try using 'sync-repos'\n" \ " (but please _read_the_documentation_!)\n" \ " 'We apologize for the inconvenience.'", \ __VA_ARGS__); /** -. * This may silently return -ENOENT, if the waa__open fails. * * The \a callback is called for \b every entry read; but for performance * reasons the \c path parameter will be \c NULL. * */ int waa__input_tree(struct estat *root, struct waa__entry_blocks_t **blocks, action_t *callback) { int status, waa_info_hdl=-1; int i, cur, first; unsigned count, subdirs, string_space; /* use a cache for directories, so that the parent can be located quickly */ /* substitute both array with one struct estat **cache, * which runs along ->by_inode until NULL */ ino_t parent; char header[HEADER_LEN]; char *filename; struct estat *sts, *stat_mem; char *strings; int sts_free; char *dir_mmap, *dir_end, *dir_curr; off_t length; t_ul header_len; struct estat *sts_tmp; waa__entry_block.first=root; waa__entry_block.count=1; waa__entry_block.next=waa__entry_block.prev=NULL; length=0; dir_mmap=NULL; status=waa__open_dir(NULL, WAA__READ, &waa_info_hdl); if (status == ENOENT) { status=-ENOENT; goto ex; } STOPIF(status, "cannot open .dir file"); length=lseek(waa_info_hdl, 0, SEEK_END); STOPIF_CODE_ERR( length == (off_t)-1, errno, "Cannot get length of .dir file"); DEBUGP("mmap()ping %llu bytes", (t_ull)length); dir_mmap=mmap(NULL, length, PROT_READ, MAP_SHARED, waa_info_hdl, 0); /* If there's an error, return it. * Always close the file. Check close() return code afterwards. */ status=errno; i=close(waa_info_hdl); STOPIF_CODE_ERR( !dir_mmap, status, "mmap failed"); STOPIF_CODE_ERR( i, errno, "close() failed"); dir_end=dir_mmap+length; TREE_DAMAGED( length < (HEADER_LEN+5) || dir_mmap[HEADER_LEN-1] != '\n' || dir_mmap[HEADER_LEN-2] != '$', "the header is not correctly terminated"); /* Cut $ and beyond. Has to be in another buffer, as the file's * mmap()ed read-only. */ memcpy(header, dir_mmap, HEADER_LEN-2); header[HEADER_LEN-2]=0; status=sscanf(header, waa__header_line, &i, &header_len, &count, &subdirs, &string_space, &max_path_len); DEBUGP("got %d header fields", status); TREE_DAMAGED( status != 6, "not all needed header fields could be parsed"); dir_curr=dir_mmap+HEADER_LEN; TREE_DAMAGED( i != WAA_VERSION || header_len != HEADER_LEN, "the header has a wrong version"); /* For progress display */ approx_entry_count=count; /* for new subdirectories allow for some more space. * Note that this is not clean - you may have to have more space * than that for large structures! */ max_path_len+=1024; DEBUGP("reading %d subdirs, %d entries, %d bytes string-space", subdirs, count, string_space); /* Isn't there a snscanf() or something similar? I remember having seen * such a beast. There's always the chance of a damaged file, so * I wouldn't depend on sscanf staying in its buffer. * * I now check for a \0\n at the end, so that I can be sure * there'll be an end to sscanf. */ TREE_DAMAGED( dir_mmap[length-2] != '\0' || dir_mmap[length-1] != '\n', "the file is not correctly terminated"); DEBUGP("ok, found \\0 or \\0\\n at end"); STOPIF( hlp__alloc( &strings, string_space), NULL); root->strings=strings; /* read inodes */ cur=0; sts_free=1; first=1; /* As long as there should be entries ... */ while ( count > 0) { DEBUGP("curr=%p, end=%p, count=%d", dir_curr, dir_end, count); TREE_DAMAGED( dir_curr>=dir_end, "An entry line has a wrong number of entries"); if (sts_free == 0) { /* In all situations I can think about this will simply * result in a big calloc, as at this time no block will * have been freed, and the freelist will be empty. */ STOPIF( ops__allocate(count, &stat_mem, &sts_free), NULL ); /* This block has to be updated later. */ STOPIF( waa__insert_entry_block(stat_mem, sts_free), NULL); } sts_free--; count--; sts=first ? root : stat_mem+cur; DEBUGP("about to parse %p = '%-.40s...'", dir_curr, dir_curr); STOPIF( ops__load_1entry(&dir_curr, sts, &filename, &parent), NULL); /* Should this just be a BUG_ON? To not waste space in the release * binary just for people messing with their dir-file? */ TREE_DAMAGED( (parent && first) || (!parent && !first) || (parent && parent-1>cur), "the parent pointers are invalid"); if (first) first=0; else cur++; /* First - set all fields of this entry */ strcpy(strings, filename); sts->name=strings; strings += strlen(filename)+1; BUG_ON(strings - root->strings > string_space); if (parent) { if (parent == 1) sts->parent=root; else { i=parent-2; BUG_ON(i >= cur); sts->parent=stat_mem+i; } sts->parent->by_inode[ sts->parent->child_index++ ] = sts; BUG_ON(sts->parent->child_index > sts->parent->entry_count, "too many children for parent"); /* Check the revision */ if (sts->repos_rev != sts->parent->repos_rev) { sts_tmp=sts->parent; while (sts_tmp && !sts_tmp->other_revs) { sts_tmp->other_revs = 1; sts_tmp=sts_tmp->parent; } } } /* if parent */ /* if it's a directory, we need the child-pointers. */ if (S_ISDIR(sts->st.mode)) { /* if it had children, we need to read them first - so make an array. */ if (sts->entry_count) { STOPIF( hlp__alloc( &sts->by_inode, sizeof(*sts->by_inode) * (sts->entry_count+1)), NULL); sts->by_inode[sts->entry_count]=NULL; sts->child_index=0; } } if (callback) STOPIF( callback(sts), NULL); } /* while (count) read entries */ ex: /* Return the first block even if we had eg. ENOENT */ if (blocks) *blocks=&waa__entry_block; if (dir_mmap) { i=munmap(dir_mmap, length); if (!status) STOPIF_CODE_ERR(i, errno, "munmap() failed"); } return status; } /** Check whether the conditions for update and/or printing the directory * are fulfilled. * * A directory has to be printed * - when it is to be fully processed (and not only walked through * because of some children), * - and either * - it has changed (new or deleted entries), or * - it was freshly added. * * */ static inline int waa___check_dir_for_update(struct estat *sts) { int status; status=0; if (!sts->do_this_entry) goto ex; /* If we have only do_a_child set, we don't update the directory - * so the changes will be found on the next commit. */ /* If this directory has changed, check for new files. */ /* If this entry was replaced, it must not have been a directory * before, so ->entry_count is defined as 0 (see ops__load_1entry()). * For replaced entries which are _now_ directories we'll always * get here, and waa__update_dir() will give us the children. */ if ((sts->entry_status || (opt__get_int(OPT__CHANGECHECK) & CHCHECK_DIRS) || (sts->flags & RF_ADD) || (sts->flags & RF_CHECK) ) && ops__are_children_interesting(sts) && action->do_update_dir) { DEBUGP("dir_to_print | CHECK for %s", sts->name); STOPIF( waa__update_dir(sts), NULL); /* Now the status could have changed, and therefore the filter might * now apply. */ ops__calc_filter_bit(sts); } /* Whether to do something with this directory or not shall not be * decided here. Just pass it on. */ if (ops__allowed_by_filter(sts)) STOPIF( ac__dispatch(sts), NULL); ex: return status; } /** Does an update on the specified directory, and checks for completeness. * We get here if all \b known children have been loaded, and have to look * whether the subchildren are finished, too. * */ int waa___finish_directory(struct estat *sts) { int status; struct estat *walker; status=0; walker=sts; while (1) { DEBUGP("checking directory %s: %u unfini, %d of %d (%s)", walker->name, walker->unfinished, walker->child_index, walker->entry_count, st__status_string(walker)); if (walker->unfinished > 0) break; /* This (parent) might not be finished yet; but don't discard empty * directories (should be only on first loop invocation - all other * entries *have* at least a single child). */ if (walker->entry_count == 0) BUG_ON(walker != sts); else if (walker->child_index < walker->entry_count) break; DEBUGP("walker=%s; status=%s", walker->name, st__status_string_fromint(walker->entry_status)); if (!TEST_PACKED(S_ISDIR, walker->local_mode_packed) || (walker->entry_status & FS_REPLACED) == FS_REMOVED) { /* If * - it got replaced by another type, or * - the directory doesn't exist anymore, * we have already printed it. */ } else if (!(opt__get_int(OPT__FILTER) & FS_NEW)) { /* If new entries are not wanted, we simply do the callback - if it * matches the users' wishes. */ if (ops__allowed_by_filter(walker)) STOPIF( ac__dispatch(walker), NULL); } else { /* Check the parent for added entries. Deleted entries have already * been found missing while running through the list. */ STOPIF( waa___check_dir_for_update(walker), NULL); /* We increment the unfinished value, so that this entry won't be * done again. */ walker->unfinished+=0x1000; } /* This directory is done, tell the parent. */ walker=walker->parent; if (!walker) break; DEBUGP("%s has a finished child, now %d unfinished", walker->name, walker->unfinished); /* We must not decrement if we don't count them. */ if (walker->unfinished) walker->unfinished--; } if (walker == sts->parent && walker) DEBUGP("deferring parent %s/%s (%d unfinished)", walker->name, sts->name, walker->unfinished); ex: return status; } /** -. * * On input we expect a tree of nodes starting with \a root; the entries * that need updating have estat::do_userselected set, and their children * get marked via ops__set_todo_bits(). * * On output we have estat::entry_status set; and the current \ref * action->local_callback gets called. * * It's not as trivial to scan the inodes in ascending order as it was when * this part of code was included in * \c waa__input_tree(); but we get a list of (location, number) * blocks to run through, so it's the same performance-wise. * * This function \b consumes the list of entry blocks, ie. it destroys * their data - the \a first pointer gets incremented, \a count * decremented. * *

Threading

* We could use several threads, to get more that one \c lstat() to run at * once. I have done this and a patch available, but testing on linux/x86 on * ext3 seems to readahead the inodes, so the wall time got no shorter. * * If somebody wants to test with threads, I'll post the patch. * * For threading there has to be some synchronization - an entry can be done * only if its parent has been finished. That makes sense insofar, as when * some directory got deleted we don't need to \c lstat() the children - they * must be gone, too. * *

KThreads

* On LKML there was a discussion about making a list of syscalls, for * getting them done without user/kernel switches. (About 2007 or so? * Don't even know whether that was merged in the end.) * * On cold caches this won't really help, I think; but I didn't test * whether that would help for the hot-cache case. * *

Design

*
    *
  1. If the parent of the current entry is removed, this entry is too; * skip the other checks. *
  2. Check current status. *
  3. All entries that are not a directory \b now can be printed * immediately; decrement parent's \c unfinished counter. *
  4. Directory entries
      *
    • These increment their parent's \c unfinished value, as they might * have children to do. *
    • If they have \b no known entries (have been empty) they may get * checked for changes (\c waa__update_dir()), and are finished - * decrement parent's \c unfinished counter. *
    • Else they wait for their \c child_index value to reach the \c * entry_count number; then, as soon as their \c unfinished value gets * zero, they're really done. *
    *
  5. If a directory has no more \c unfinished entries, it can be checked * for changes, and is finished - decrement parent's \c unfinished counter. *
* * The big obstacle is that arbitrary (sub-)paths might be wanted by the * user; so we have to take great care about the child-counting. * */ int waa__update_tree(struct estat *root, struct waa__entry_blocks_t *cur_block) { int status; struct estat *sts; if (! (root->do_userselected || root->do_child_wanted) ) { /* If neither is set, waa__partial_update() wasn't called, so * we start from the root. */ root->do_userselected = root->do_this_entry = root->do_filter_allows_done = root->do_filter_allows = 1; DEBUGP("Full tree update"); } /* TODO: allow non-remembering behaviour */ action->keep_children=1; status=0; while (cur_block) { /* For convenience */ sts=cur_block->first; DEBUGP("doing update for %s ... %d left in %p", sts->name, cur_block->count, cur_block); /* For directories initialize the child counter. * We don't know the current type yet! */ if (S_ISDIR(sts->st.mode)) sts->child_index = sts->unfinished = 0; /* If the entry was just added, we already set its estat::st and filter * bits. */ if (!(sts->flags & RF_ISNEW)) STOPIF( ops__update_filter_set_bits(sts), NULL); if (!(sts->do_this_entry || sts->do_child_wanted)) goto next; /* Now sts->local_mode_packed has been set. */ if (sts->entry_status) ops__mark_parent_cc(sts, entry_status); if (sts->parent) { if (TEST_PACKED(S_ISDIR, sts->old_rev_mode_packed)) sts->parent->unfinished++; if (sts->parent->entry_status & FS_REMOVED) goto next; } if (sts->entry_status & FS_REMOVED) { if (sts->parent) { /* If this entry is removed, the parent has changed. */ sts->parent->entry_status &= (~FS_LIKELY); sts->parent->entry_status |= FS_CHANGED; /* The FS_CHILD_CHANGED markings are already here. */ } /* If a directory is removed, we don't allocate the by_inode * and by_name arrays, and it is set to no child-entries. */ if (TEST_PACKED(S_ISDIR, sts->old_rev_mode_packed) && !action->keep_children) sts->entry_count=0; /* One worry less for the parent. */ if (TEST_PACKED(S_ISDIR, sts->old_rev_mode_packed)) // if (S_ISDIR(sts->st.mode)) // if (TEST_PACKED(S_ISDIR, sts->local_mode_packed)) sts->parent->unfinished--; } if (S_ISDIR(PACKED_to_MODE_T(sts->local_mode_packed)) && (sts->entry_status & FS_REPLACED) == FS_REPLACED) { /* This entry was replaced, ie. was another type before, and is a * directory *now*. * * So the shared members (entry_count, by_inode) have wrong data. * We have to correct that here. * * That causes a call of waa__update_dir(), which is exactly what we * want. */ DEBUGP("new directory %s", sts->name); sts->entry_count=0; sts->unfinished=0; sts->by_inode=sts->by_name=NULL; sts->strings=NULL; /* TODO: fill this members from the ignore list */ // sts->active_ign=sts->subdir_ign=NULL; } next: /* This is more or less the same as below, only for this entry and not * its parent. */ if (TEST_PACKED(S_ISDIR, sts->local_mode_packed) && sts->entry_count==0) { DEBUGP("doing empty directory %s %d", sts->name, sts->do_this_entry); /* Check this entry for added entries. There cannot be deleted * entries, as this directory had no entries before. */ STOPIF( waa___finish_directory(sts), NULL); } /* If this is a normal entry *now*, we print it. * Non-empty directories are shown after all child nodes have been * checked. */ if (!TEST_PACKED(S_ISDIR, sts->local_mode_packed) && sts->do_this_entry) STOPIF( ac__dispatch(sts), NULL); /* The parent must be done *after* the last child node ... at least * that's what's documented above :-) */ /* If there's a parent, and it's still here *or* we have to remember * the children anyway ... */ if (sts->parent && action->keep_children ) { sts->parent->child_index++; /* If we did the last child of a directory ... */ if (sts->parent->child_index >= sts->parent->entry_count && sts->parent->do_this_entry) { DEBUGP("checking parent %s/%s", sts->parent->name, sts->name); /* Check the parent for added entries. * Deleted entries have already been found missing while * running through the list. */ STOPIF( waa___finish_directory(sts->parent), NULL); } else DEBUGP("deferring parent %s/%s%s: %d of %d, %d unfini", sts->parent->name, sts->name, sts->parent->do_this_entry ? "" : " (no do_this_entry)", sts->parent->child_index, sts->parent->entry_count, sts->parent->unfinished); } /* Sadly there's no continue block, like in perl. * Advance the pointers. */ cur_block->first++; cur_block->count--; if (cur_block->count <= 0) { /* We should possibly free this memory, but as there's normally only 1 * struct allocated (the other declared static) we'd save about 16 bytes. */ cur_block=cur_block->next; } } ex: return status; } /** -. * * \a argc and \a normalized tell which entries should be updated. * * We return the \c -ENOENT from waa__input_tree() if no working * copy could be found. \c ENOENT is returned for a non-existing entry * given on the command line. * * The \a callback is called for \b every entry read by waa__input_tree(), * not filtered like the normal actions. */ int waa__read_or_build_tree(struct estat *root, int argc, char *normalized[], char *orig[], action_t *callback, int return_ENOENT) { int status; struct waa__entry_blocks_t *blocks; status=0; status=waa__input_tree(root, &blocks, callback); DEBUGP("read tree = %d", status); if (status == -ENOENT) { /* Some callers want to know whether we *really* know these entries. */ if (return_ENOENT) return -ENOENT; } else STOPIF( status, NULL); if (opt__get_int(OPT__PATH) == PATH_CACHEDENVIRON) STOPIF( hlp__match_path_envs(root), NULL); /* Do update. */ STOPIF( waa__partial_update(root, argc, normalized, orig, blocks), NULL); /* In case we're doing commit or something with progress report, * uninit the progress. */ if (action->local_uninit) STOPIF( action->local_uninit(), NULL); ex: return status; } /** -. * * This function calculates the common root of the given paths, and tries * to find a working copy base there (or above). * It returns the paths of the parameters relative to the base found. * * Eg.: for \c /a/wc/sub/sub2 and \c /a/wc/file it returns * - \c base = \c /a/wc * - \c normalized[0] = \c sub/sub2 * - \c normalized[1] = \c file * * We have to find a wc root before we can load the entries file; so we'd * have to process the given paths twice, possibly each time by prepending * the current working directory and so on; that's why this function returns * a block of relative path pointers. These have just to be walked up to the * root to process them (eg. mark for processing). * * * \c *normalized should be \c free()d after use; but as the converted * arguments are all allocated one by one it won't help that much. * * \note In case \b no matching base is found, the common part of the paths * is returned as base, and the paths are normalized relative to it. \c * ENOENT is returned. * \todo Should that be changed to base="/"? * * * If we get \b no parameters, we fake the current working directory as * parameter and return it in \c normalized[0]. \c argc in the caller will * still be \c 0! * * - If we have a dir-file, we look only from the current directory below - * so fake a parameter. * - If we have no dir-file: * - If we find a base, we fake a parameter and show only below. * - If we find no base, we believe that we're at the root of the wc. * * The parameter must not be shown as "added" ("n...") - because it isn't. * * For the case that the WC root is \c "/", and we shall put a \c "./" in * front of the normalized paths, we need an additional byte per argument, * so that eg. \c "/etc" can be changed to \c "./etc" - see the PDS * comments. * * In order to correctly handle cases like \c * "/symlink/to/a/directory/subd/file", we do a realpath() call of the \b * directory of the argument (with \c ".../" assumed to be \c ".../."), and * use that as base path for the filename. * */ int waa__find_common_base2(int argc, char *args[], char ***normalized, int flags) { int status, i, j; int len; char *cp, *confname; char *paths[argc], *base_copy; char **norm; char *nullp[2]; char *last_ps; const char *path2copy, *basepath2copy; /* A bit long, but it doesn't really matter whether it's on the stack or * the heap. */ char canon[PATH_MAX]; static const char ps[]={PATH_SEPARATOR, 0}; int fnlen; status=0; norm=NULL; /* Step 0: Special case for *no* arguments. */ if (argc == 0) { argc=1; nullp[0]=start_path; nullp[1]=NULL; args=nullp; DEBUGP("faked a single parameter to %s", *args); } /* Step 1: Allocation. * We need (argc || 1) pointers (plus a NULL) and the base path. * The relative paths are done in-place; we waste a bit of memory, but * there won't be that many arguments normally. * */ len = argc * sizeof(char*) + sizeof(NULL); STOPIF( hlp__alloc( &norm, len), NULL); /* Step 2: Get the real path of all filenames, and store them. * Delimiters are \0. */ len=0; status=0; for(i=0; i= PATH_MAX, "path longer than PATH_MAX"); path2copy=last_ps; basepath2copy=canon; } else BUG_ON(1); /* +1 because of PDS, both times. */ STOPIF( hlp__alloc( paths+i, fnlen+1), NULL); paths[i]++; hlp__pathcopy(paths[i], &j, basepath2copy, ps, path2copy, NULL); if (len 1 && paths[i][len-1] == PATH_SEPARATOR) paths[i][--len]=0; DEBUGP("got argument #%d as %s[%d]", i, paths[i], len); } /* Step 3: find the common base. */ /* len always points to the *different* character (or to \0). */ len=strlen(paths[0]); for(i=1; i0) len--; } BUG_ON(len < 0, "Paths not even equal in separator - " "they have nothing in common!"); /* paths[0][0] == PATH_SEPARATOR is satisfied by both branches above. */ if (len == 0) { /* Special case - all paths are starting from the root. */ len=1; DEBUGP("we're at root."); } STOPIF( hlp__strnalloc(len, &base_copy, paths[0]), NULL); DEBUGP("starting search at %s", base_copy); /* Step 4: Look for a wc. * The given value could possible describe a file (eg. if the only * argument is its path) - we have to find a directory. */ while (1) { /* We cannot look for the entry file, because on the first commit it * doesn't exist. * A wc is defined by having an URL defined. */ DEBUGP("looking for %s", base_copy); status=waa__open(base_copy, NULL, 0, 0); /* Is there a base? */ if (!status) break; if (len <= 1) break; base_copy[len]=0; cp=rindex(base_copy, PATH_SEPARATOR); if (cp) { /* If we're at "/", don't delete the root - try with it, and stop. */ if (cp == base_copy) cp++; *cp=0; len=cp - base_copy; } } DEBUGP("after loop is len=%d, base=%s, and status=%d", len, base_copy, status); /* Now status is either 0, or eg. ENOENT - just what we'd like to return. * But do that silently. * * Note: if there's *no* base found, we take the common path. */ STOPIF( status, "!Couldn't find a working copy with matching base."); /* We hope (?) that the action won't overwrite these strings. */ wc_path=base_copy; wc_path_len=len; DEBUGP("found working copy base at %s", wc_path); STOPIF_CODE_ERR( chdir(wc_path) == -1, errno, "chdir(%s)", wc_path); setenv(FSVS_EXP_WC_ROOT, wc_path, 1); /* Step 5: Generate pointers to normalized paths. * len is still valid, so we just have to use paths[i]+len. */ for(i=0; ionly_opt_filter || opt__get_int(OPT__FILTER) == 0) opt__set_int(OPT__FILTER, PRIO_MUSTHAVE, FILTER__ALL); DEBUGP("filter has mask 0x%X (%s)", opt__get_int(OPT__FILTER), st__status_string_fromint(opt__get_int(OPT__FILTER))); ex: if (status && status!=ENOENT) { /* Free only if error encountered */ IF_FREE(norm); } else { /* No problems, return pointers. */ if (normalized) *normalized=norm; } return status; } /** -. * * We get a tree starting with \a root, and all entries from \a normalized * get estat::do_userselected and estat::do_this_entry set. These flag gets * used by waa__update_tree(). * */ int waa__partial_update(struct estat *root, int argc, char *normalized[], char *orig[], struct waa__entry_blocks_t *blocks) { int status; struct estat *sts; int i, flags, ign; int faked_arg0; status=0; /* If the user gave no path argument to the action, the current directory * is faked into the first path, but without changing argc. (Some actions * want to know whether *any* path was given). */ faked_arg0=(argc == 0 && *normalized); /* Not fully correct - we fake now, haven't faked ;-) */ if (faked_arg0) argc=1; for(i=0; iarg) sts->arg= faked_arg0 ? "" : orig[i]; /* This entry is marked as full, parents as "look below". */ sts->do_userselected = sts->do_this_entry = 1; /* Set auto-props as needed. See * http://fsvs.tigris.org/ds/viewMessage.do?dsForumId=3928&dsMessageId=2981798 * */ STOPIF(prp__sts_has_no_properties(sts, &ign), NULL); if (ign) STOPIF( ign__is_ignore(sts, &ign), NULL); while ( 1 ) { /* This new entry is surely updated. * But what about its (new) parents? * They're not in the blocks list (that we get as parameter), so * they'd get wrong information on commit. */ /* Without the 2nd parameter sts->st might not get set, depending on * action->overwrite_sts_st (implemented in another branch). */ if (sts->flags & RF_ISNEW) { STOPIF( ops__update_single_entry(sts, &sts->st), NULL); sts->entry_status=FS_NEW; ops__calc_filter_bit(sts); } sts = sts->parent; if (!sts) break; sts->do_child_wanted = 1; } } STOPIF( waa__update_tree(root, blocks), NULL); ex: return status; } /** -. */ int waa__new_entry_block(struct estat *entry, int count, struct waa__entry_blocks_t *previous) { int status; struct waa__entry_blocks_t *eblock; status=0; STOPIF( hlp__alloc( &eblock, sizeof(*eblock)), NULL); eblock->first=entry; eblock->count=count; /* The block is appended after the given block. * - The root node is still the first entry. * - We need not go to the end of the list, we have O(1). */ eblock->next=previous->next; eblock->prev=previous; previous->next=eblock; if (eblock->next) eblock->next->prev=eblock; ex: return status; } /** -. * */ int waa__find_base(struct estat *root, int *argc, char ***args) { int status; char **normalized; status=0; /* Per default we use (shortened) per-wc paths, as there'll be no * arguments. */ root->arg=""; STOPIF( waa__find_common_base( *argc, *args, &normalized), NULL); if (*argc > 0 && strcmp(normalized[0], ".") == 0) { /* Use it for display, but otherwise ignore it. */ root->arg = **args; (*args) ++; (*argc) --; } STOPIF_CODE_ERR( *argc, EINVAL, "!Only a working copy root is a valid path."); /* Return the normalized value */ **args = normalized[0]; ex: return status; } /** Abbreviation function for tree recursion. */ static inline int waa___recurse_tree(struct estat **list, action_t handler, int (*me)(struct estat *, action_t )) { struct estat *sts; int status; status=0; while ( (sts=*list) ) { if (sts->do_this_entry && ops__allowed_by_filter(sts)) STOPIF( handler(sts), NULL); /* If the entry was removed, sts->updated_mode is 0, so we have to take * a look at the old sts->st.mode to determine whether it was a * directory. */ /* The OPT__ALL_REMOVED check is duplicated from ac__dispatch, to avoid * recursing needlessly. */ if ((sts->do_child_wanted || sts->do_userselected) && sts->entry_count && (sts->local_mode_packed ? TEST_PACKED(S_ISDIR, sts->local_mode_packed) : ((sts->entry_status & FS_REMOVED) && S_ISDIR(sts->st.mode) && opt__get_int(OPT__ALL_REMOVED)==OPT__YES)) ) /* if (TEST_PACKED( S_ISDIR, sts->local_mode_packed) && (sts->do_child_wanted || sts->do_userselected) && sts->entry_count && ((opt__get_int(OPT__ALL_REMOVED)==OPT__YES) || ((sts->entry_status & FS_REPLACED) != FS_REMOVED)) ) */ STOPIF( me(sts, handler), NULL); list++; } ex: return status; } /** -. * */ int waa__do_sorted_tree(struct estat *root, action_t handler) { int status; status=0; /* Do the root as first entry. */ if (!root->parent && root->do_this_entry) STOPIF( handler(root), NULL); if ( !root->by_name) STOPIF( dir__sortbyname(root), NULL); STOPIF( waa___recurse_tree(root->by_name, handler, waa__do_sorted_tree), NULL); ex: IF_FREE(root->by_name); return status; } /** -. * * The cwd is the directory to be looked at. * * IIRC the inode numbers may change on NFS; but having the WAA on NFS * isn't a good idea, anyway. * */ int waa__dir_enum(struct estat *this, int est_count, int by_name) { int status; struct sstat_t cwd_stat; status=0; STOPIF( hlp__lstat(".", &cwd_stat), NULL); DEBUGP("checking: %llu to %llu", (t_ull)cwd_stat.ino, (t_ull)waa_stat.ino); /* Is the parent the WAA? */ if (cwd_stat.dev == waa_stat.dev && cwd_stat.ino == waa_stat.ino) goto ex; /* If not, get a list. */ STOPIF( dir__enumerator(this, est_count, by_name), NULL); ex: return status; } static struct estat **to_append; static int append_count; int remember_to_copy(struct estat *sts, struct estat **sts_p) { char *path; ops__build_path(&path, sts); DEBUGP("copy %s", path); to_append[append_count]=sts; append_count++; return 0; } /** -. * * \a dest must already exist; its name is \b not overwritten, as it is * (usually) different for the copy base entry. * * Existing entries of \a dest are not replaced or deleted; * other entries are appended, with a status of \c FS_REMOVED. * * This works for both directory and non-directory entries. */ int waa__copy_entries(struct estat *src, struct estat *dest) { int status; struct estat *newdata, **tmp, **_old_to_append; int left, space; int _old_append_count; _old_append_count = append_count; _old_to_append = to_append; to_append=NULL; append_count = 0; status=0; ops__copy_single_entry(src, dest); if (!S_ISDIR(src->st.mode)) goto ex; append_count=0; STOPIF( hlp__calloc( &to_append, src->entry_count+1, sizeof(src->by_name[0])), NULL); STOPIF( ops__correlate_dirs( src, dest, remember_to_copy, waa__copy_entries, NULL, NULL), NULL); /* Now we know how many new entries there are. */ /* Now the data in to_append gets switched from old entry to newly * allocated entry; we count in reverse direction, to know how many * entries are left and must be allocated. */ /* We re-use the name string. */ space=0; for( tmp=to_append, left=append_count; left>0; left--, tmp++, space--) { if (space) newdata++; else STOPIF( ops__allocate( left, &newdata, &space), NULL); newdata->parent=dest; newdata->name=(*tmp)->name; /* Copy old data, and change what's needed. */ STOPIF( waa__copy_entries(*tmp, newdata), NULL); /* If URL is different from parent URL, it's a new base. */ /* Remember new address. */ (*tmp) = newdata; } STOPIF( ops__new_entries(dest, append_count, to_append), NULL); ex: IF_FREE(to_append); append_count = _old_append_count ; to_append = _old_to_append ; return status; } /** -. * * If \a base_dir is \c NULL, a default path is taken; else the string is * copied and gets an arbitrary postfix. If \a base_dir ends in \c * PATH_SEPARATOR, \c "fsvs" is inserted before the generated postfix. * * \a *output gets set to the generated filename, and must not be \c * free()d. */ int waa__get_tmp_name(const char *base_dir, char **output, apr_file_t **handle, apr_pool_t *pool) { int status; static struct cache_t *cache; static struct cache_entry_t *tmp_cache=NULL; static const char to_append[]=".XXXXXX"; static const char to_prepend[]="fsvs"; char *filename; int len; STOPIF( cch__new_cache(&cache, 12), NULL); len= base_dir ? strlen(base_dir) : 0; if (!len) { if (!tmp_cache) { /* This function caches the value itself, but we'd have to store the * length ourselves; furthermore, we get a copy every time - which * fills the pool, whereas we could just use our cache. */ STOPIF( apr_temp_dir_get(&base_dir, pool), "Getting a temporary directory path"); len=strlen(base_dir); /* We need an extra byte for the PATH_SEPARATOR, and a \0. */ STOPIF( cch__entry_set( &tmp_cache, 0, base_dir, len +1 +1, 0, NULL), NULL); tmp_cache->data[len++]=PATH_SEPARATOR; tmp_cache->data[len]=0; /* We set tmp_cache->len, which would be inclusive the alignment space * at end, to the *actual* length, because we need that on every * invocation. * That works because tmp_cache is never changed again. */ tmp_cache->len=len; } len=tmp_cache->len; base_dir=tmp_cache->data; BUG_ON(base_dir[len] != 0); } STOPIF( cch__add(cache, 0, base_dir, /* Directory PATH_SEPARATOR pre post '\0' */ len + 1 + strlen(to_prepend) + strlen(to_append) + 1 + 3, &filename), NULL); if (base_dir[len-1] == PATH_SEPARATOR) { strcpy( filename + len, to_prepend); len+=strlen(to_prepend); } strcpy( filename + len, to_append); /* The default values include APR_DELONCLOSE, which we only want if the * caller is not interested in the name. */ STOPIF( apr_file_mktemp(handle, filename, APR_CREATE | APR_READ | APR_WRITE | APR_EXCL | (output ? 0 : APR_DELONCLOSE), pool), "Cannot create a temporary file for \"%s\"", filename); if (output) *output=filename; ex: return status; } /** -. * The \a dir must be absolute; this function makes an own copy, so the * value will be unchanged. */ int waa__set_working_copy(const char const *wc_dir) { int status; status=0; BUG_ON(*wc_dir != PATH_SEPARATOR); wc_path_len=strlen(wc_dir); STOPIF( hlp__strnalloc( wc_path_len, &wc_path, wc_dir), NULL); ex: return status; } /** -. * The \a dir must be absolute; this function makes an own copy, so the * value will be unchanged. */ int waa__create_working_copy(const char const *wc_dir) { int status; char *dir; if (wc_dir) STOPIF(waa__set_working_copy(wc_dir), NULL); BUG_ON(!wc_path); /* Create the WAA base directory. */ STOPIF( waa__get_waa_directory( wc_path, &dir, NULL, NULL, GWD_WAA | GWD_MKDIR), NULL); STOPIF( waa__mkdir(dir, 1), NULL); /* Create the CONF base directory. */ STOPIF( waa__get_waa_directory( wc_path, &dir, NULL, NULL, GWD_CONF | GWD_MKDIR), NULL); STOPIF( waa__mkdir(dir, 1), NULL); /* Make an informational file to point to the base directory. */ /* Should we ignore errors? */ STOPIF( waa__make_info_file(wc_path, WAA__README, wc_path), NULL); ex: return status; } fsvs-1.2.6/src/diff.c0000644000202400020240000006544012467104255013331 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include "global.h" #include "revert.h" #include "helper.h" #include "interface.h" #include "url.h" #include "status.h" #include "options.h" #include "est_ops.h" #include "ignore.h" #include "waa.h" #include "racallback.h" #include "cp_mv.h" #include "warnings.h" #include "diff.h" /** \file * The \ref diff command source file. * * Currently only diffing single files is possible; recursive diffing * of trees has to be done. * * For trees it might be better to fetch all files in a kind of * update-scenario; then we'd avoid the many round-trips we'd have with * single-file-fetching. * Although an optimized file-fetching (rsync-like block transfers) would * probably save a lot of bandwidth. * */ /** \addtogroup cmds * * \section diff * * \code * fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...] * \endcode * * This command gives you diffs between local and repository files. * * With \c -v the meta-data is additionally printed, and changes shown. * * If you don't give the revision arguments, you get a diff of the base * revision in the repository (the last commit) against your current local file. * With one revision, you diff this repository version against your local * file. With both revisions given, the difference between these repository * versions is calculated. * * You'll need the \c diff program, as the files are simply passed as * parameters to it. * * The default is to do non-recursive diffs; so fsvs diff . will * output the changes in all files in the current directory and * below. * * The output for special files is the diff of the internal subversion * storage, which includes the type of the special file, but no newline at * the end of the line (which \c diff complains about). * * For entries marked as copy the diff against the (clean) source entry is * printed. * * Please see also \ref o_diff and \ref o_colordiff. * * \todo Two revisions diff is buggy in that it (currently) always fetches * the full trees from the repository; this is not only a performance * degradation, but you'll see more changed entries than you want (like * changes A to B to A). This will be fixed. * */ int cdiff_pipe=STDOUT_FILENO; pid_t cdiff_pid=0; /** A number that cannot be a valid pointer. */ #define META_DIFF_DELIMITER (0xf44fee31) /** How long may a meta-data diff string be? */ #define META_DIFF_MAXLEN (256) /** Diff the given meta-data. * The given \a format string is used with the va-args to generate two * strings. If they are equal, one is printed (with space at front); else * both are shown (with '-' and '+'). * The delimiter between the two argument lists is via \ref * META_DIFF_DELIMITER. (NULL could be in the data, eg. as integer \c 0.) * * It would be faster to simply compare the values given to \c vsnprintf(); * that could even be done here, by using two \c va_list variables and * comparing. But it's not a performance problem. */ int df___print_meta(char *format, ... ) { int status; va_list va; char buf_old[META_DIFF_MAXLEN], buf_new[META_DIFF_MAXLEN]; int l1, l2; status=0; va_start(va, format); l1=vsnprintf(buf_old, META_DIFF_MAXLEN-1, format, va); DEBUGP("meta-diff: %s", buf_old); l2=0; while (va_arg(va, int) != META_DIFF_DELIMITER) { l2++; BUG_ON(l2>5, "Parameter list too long"); } l2=vsnprintf(buf_new, META_DIFF_MAXLEN-1, format, va); DEBUGP("meta-diff: %s", buf_new); STOPIF_CODE_ERR( l1<0 || l2<0 || l1>=META_DIFF_MAXLEN || l2>=META_DIFF_MAXLEN, EINVAL, "Printing meta-data strings format error"); /* Different */ STOPIF_CODE_EPIPE( printf( (l1 != l2 || strcmp(buf_new, buf_old) !=0) ? "-%s\n+%s\n" : " %s\n", buf_old, buf_new), NULL); ex: return status; } /** Get a file from the repository, and initiate a diff. * * Normally rev1 == root->repos_rev; to diff against * the \e base revision of the file. * * If the user specified only a single revision (rev2 == 0), * the local file is diffed against this; else against the * other repository version. * * \a rev2_file is meaningful only if \a rev2 is 0; this file gets removed * after printing the difference! * */ int df__do_diff(struct estat *sts, svn_revnum_t rev1, svn_revnum_t rev2, char *rev2_file) { int status; int ch_stat; static pid_t last_child=0; static char *last_tmp_file=NULL; static char *last_tmp_file2=NULL; pid_t tmp_pid; char *path, *disp_dest, *disp_source; int len_d, len_s; char *b1, *b2; struct estat sts_r2; char short_desc[10]; char *new_mtime_string, *other_mtime_string; char *url_to_fetch, *other_url; int is_copy; int fdflags; apr_hash_t *props_r1, *props_r2; status=0; /* Check whether we have an active child; wait for it. */ if (last_child) { /* Keep the race window small. */ tmp_pid=last_child; last_child=0; STOPIF_CODE_ERR( waitpid(tmp_pid, &ch_stat, 0) == -1, errno, "Waiting for child gave an error"); DEBUGP("child %d exitcode %d - status 0x%04X", tmp_pid, WEXITSTATUS(ch_stat), ch_stat); STOPIF_CODE_ERR( !WIFEXITED(ch_stat), EIO, "!Child %d terminated abnormally", tmp_pid); if (WEXITSTATUS(ch_stat) == 1) DEBUGP("exit code 1 - file has changed."); else { STOPIF( wa__warn(WRN__DIFF_EXIT_STATUS, EIO, "Child %d gave an exit status %d", tmp_pid, WEXITSTATUS(ch_stat)), NULL); } } /* \a last_tmp_file should only be set when last_child is set; * but who knows. * * This cleanup must be done \b after waiting for the child - else we * might delete the file before it was opened! * */ if (last_tmp_file) { STOPIF_CODE_ERR( unlink(last_tmp_file) == -1, errno, "Cannot remove temporary file %s", last_tmp_file); last_tmp_file=NULL; } if (last_tmp_file2) { STOPIF_CODE_ERR( unlink(last_tmp_file2) == -1, errno, "Cannot remove temporary file %s", last_tmp_file2); last_tmp_file2=NULL; } /* Just uninit? */ if (!sts) goto ex; STOPIF( ops__build_path( &path, sts), NULL); url_to_fetch=NULL; /* If this entry is freshly copied, get it's source URL. */ is_copy=sts->flags & RF___IS_COPY; if (is_copy) { /* Should we warn if any revisions are given? Can we allow one? */ STOPIF( cm__get_source(sts, NULL, &url_to_fetch, &rev1, 0), NULL); /* \TODO: That doesn't work for unknown URLs - but that's needed as * soon as we allow "fsvs cp URL path". */ STOPIF( url__find(url_to_fetch, &sts->url), NULL); } else url_to_fetch=path+2; current_url = sts->url; /* We have to fetch a file and do the diff, so open a session. */ STOPIF( url__open_session(NULL, NULL), NULL); /* The function rev__get_file() overwrites the data in \c *sts with * the repository values - mtime, ctime, etc. * We use this as an advantage and remember the current time - so that * we can print both. */ /* \e From is always the "old" - base revision, or first given revision. * \e To is the newer version - 2nd revision, or local file. */ /* TODO: use delta transfers for 2nd file. */ sts_r2=*sts; if (rev2 != 0) { STOPIF( url__full_url(sts, &other_url), NULL); STOPIF( url__canonical_rev(current_url, &rev2), NULL); STOPIF( rev__get_text_to_tmpfile(other_url, rev2, DECODER_UNKNOWN, NULL, &last_tmp_file2, NULL, &sts_r2, &props_r2, current_url->pool), NULL); } else if (rev2_file) { DEBUGP("diff against %s", rev2_file); /* Let it get removed. */ last_tmp_file2=rev2_file; } /* Now fetch the \e old version. */ STOPIF( url__canonical_rev(current_url, &rev1), NULL); STOPIF( rev__get_text_to_tmpfile(url_to_fetch, rev1, DECODER_UNKNOWN, NULL, &last_tmp_file, NULL, sts, &props_r1, current_url->pool), NULL); /* If we didn't flush the stdio buffers here, we'd risk getting them * printed a second time from the child. */ fflush(NULL); last_child=fork(); STOPIF_CODE_ERR( last_child == -1, errno, "Cannot fork diff program"); if (!last_child) { STOPIF( hlp__format_path(sts, path, &disp_dest), NULL); /* Remove the ./ at the front */ setenv(FSVS_EXP_CURR_ENTRY, path+2, 1); disp_source= is_copy ? url_to_fetch : disp_dest; len_d=strlen(disp_dest); len_s=strlen(disp_source); if (cdiff_pipe != STDOUT_FILENO) { STOPIF_CODE_ERR( dup2(cdiff_pipe, STDOUT_FILENO) == -1, errno, "Redirect output"); /* Problem with svn+ssh - see comment below. */ fdflags=fcntl(STDOUT_FILENO, F_GETFD); fdflags &= ~FD_CLOEXEC; /* Does this return errors? */ fcntl(STDOUT_FILENO, F_SETFD, fdflags); } /* We need not be nice with memory usage - we'll be replaced soon. */ /* 30 chars should be enough for everyone */ b1=malloc(len_s + 60 + 30); b2=malloc(len_d + 60 + 30); STOPIF( hlp__strdup( &new_mtime_string, ctime(& sts_r2.st.mtim.tv_sec)), NULL); STOPIF( hlp__strdup( &other_mtime_string, ctime(&sts->st.mtim.tv_sec)), NULL); sprintf(b1, "%s \tRev. %llu \t(%-24.24s)", disp_source, (t_ull) rev1, other_mtime_string); if (rev2 == 0) { sprintf(b2, "%s \tLocal version \t(%-24.24s)", disp_dest, new_mtime_string); strcpy(short_desc, "local"); } else { sprintf(b2, "%s \tRev. %llu \t(%-24.24s)", disp_dest, (t_ull) rev2, new_mtime_string); sprintf(short_desc, "r%llu", (t_ull) rev2); } /* Print header line, just like a recursive diff does. */ STOPIF_CODE_EPIPE( printf("diff -u %s.r%llu %s.%s\n", disp_source, (t_ull)rev1, disp_dest, short_desc), "Diff header"); if (opt__is_verbose() > 0) // TODO: && !symlink ...) { STOPIF( df___print_meta( "Mode: 0%03o", sts->st.mode & 07777, META_DIFF_DELIMITER, sts_r2.st.mode & 07777), NULL); STOPIF( df___print_meta( "MTime: %.24s", other_mtime_string, META_DIFF_DELIMITER, new_mtime_string), NULL); STOPIF( df___print_meta( "Owner: %d (%s)", sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"), META_DIFF_DELIMITER, sts_r2.st.uid, hlp__get_uname(sts_r2.st.uid, "undefined") ), NULL); STOPIF( df___print_meta( "Group: %d (%s)", sts->st.gid, hlp__get_grname(sts->st.gid, "undefined"), META_DIFF_DELIMITER, sts_r2.st.gid, hlp__get_grname(sts_r2.st.gid, "undefined") ), NULL); } fflush(NULL); // TODO: if special_dev ... /* Checking \b which return value we get is unnecessary ... On \b * every error we get \c -1 .*/ execlp( opt__get_string(OPT__DIFF_PRG), opt__get_string(OPT__DIFF_PRG), opt__get_string(OPT__DIFF_OPT), last_tmp_file, "--label", b1, (rev2 != 0 ? last_tmp_file2 : rev2_file ? rev2_file : path), "--label", b2, opt__get_string(OPT__DIFF_EXTRA), NULL); STOPIF_CODE_ERR( 1, errno, "Starting the diff program \"%s\" failed", opt__get_string(OPT__DIFF_PRG)); } ex: return status; } /** Cleanup rests. */ int df___cleanup(void) { int status; int ret; if (cdiff_pipe != STDOUT_FILENO) STOPIF_CODE_ERR( close(cdiff_pipe) == -1, errno, "Cannot close colordiff pipe"); if (cdiff_pid) { /* Should we kill colordiff? Let it stop itself? Wait for it? * It should terminate itself, because STDIN gets no more data. * * But if we don't wait, it might get scheduled after the shell printed * its prompt ... and that's not fine. But should we ignore the return * code? */ STOPIF_CODE_ERR( waitpid( cdiff_pid, &ret, 0) == -1, errno, "Can't wait"); DEBUGP("child %d exitcode %d - status 0x%04X", cdiff_pid, WEXITSTATUS(ret), ret); } STOPIF( df__do_diff(NULL, 0, 0, 0), NULL); ex: return status; } /// FSVS GCOV MARK: df___signal should not be executed /** Signal handler function. * If the user wants us to quit, we remove the temporary files, and exit. * * Is there a better/cleaner way? * */ static void df___signal(int sig) { DEBUGP("signal %d arrived!", sig); df___cleanup(); exit(0); } /** Does a diff of the local non-directory against the given revision. * */ int df___type_def_diff(struct estat *sts, svn_revnum_t rev, apr_pool_t *pool) { int status; char *special_stg, *fn; apr_file_t *apr_f; apr_size_t wr_len, exp_len; status=0; special_stg=NULL; switch (sts->st.mode & S_IFMT) { case S_IFREG: STOPIF( df__do_diff(sts, rev, 0, NULL), NULL); break; case S_IFCHR: case S_IFBLK: case S_IFANYSPECIAL: special_stg=ops__dev_to_filedata(sts); /* Fallthrough, ignore first statement. */ case S_IFLNK: if (!special_stg) STOPIF( ops__link_to_string(sts, NULL, &special_stg), NULL); STOPIF( ops__build_path( &fn, sts), NULL); STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n", fn), NULL); /* As "diff" cannot handle special files directly, we have to * write the expected string into a file, and diff against * that. * The remote version is fetched into a temporary file anyway. */ STOPIF( waa__get_tmp_name(NULL, &fn, &apr_f, pool), NULL); wr_len=exp_len=strlen(special_stg); STOPIF( apr_file_write(apr_f, special_stg, &wr_len), NULL); STOPIF_CODE_ERR( wr_len != exp_len, ENOSPC, NULL); STOPIF( apr_file_close(apr_f), NULL); STOPIF( df__do_diff(sts, rev, 0, fn), NULL); break; default: BUG("type?"); } ex: return status; } /** -. */ int df___direct_diff(struct estat *sts) { int status; svn_revnum_t rev1, rev2; char *fn; STOPIF( ops__build_path( &fn, sts), NULL); status=0; if (!S_ISDIR(sts->st.mode)) { DEBUGP("doing %s", fn); /* Has to be set per sts. */ rev1=sts->repos_rev; rev2=0; if ( (sts->entry_status & FS_REMOVED)) { STOPIF_CODE_EPIPE( printf("Only in repository: %s\n", fn), NULL); goto ex; } if (sts->to_be_ignored) goto ex; if ( (sts->entry_status & FS_NEW) || !sts->url) { if (sts->flags & RF___IS_COPY) { /* File was copied, we have a source */ } else { if (opt__is_verbose() > 0) STOPIF_CODE_EPIPE( printf("Only in local filesystem: %s\n", fn), NULL); goto ex; } } /* Local files must have changed; for repos-only diffs do always. */ if (sts->entry_status || opt_target_revisions_given) { DEBUGP("doing diff rev1=%llu", (t_ull)rev1); if (S_ISDIR(sts->st.mode)) { /* TODO: meta-data diff? */ } else { /* TODO: Some kind of pool handling in recursion. */ STOPIF( df___type_def_diff(sts, rev1, global_pool), NULL); } } } else { /* Nothing to do for directories? */ } ex: return status; } /** A cheap replacement for colordiff. * Nothing more than a \c cat. */ int df___cheap_colordiff(void) { int status; char *tmp; const int tmp_size=16384; status=0; tmp=alloca(tmp_size); while ( (status=read(STDIN_FILENO,tmp, tmp_size)) > 0 ) if ( (status=write(STDOUT_FILENO, tmp, status)) == -1) break; if (status == -1) { STOPIF_CODE_ERR(errno != EPIPE, errno, "Getting or pushing diff data"); status=0; } ex: return status; } /** Tries to start colordiff. * If colordiff can not be started, but the option says \c auto, we just * forward the data. Sadly neither \c splice nor \c sendfile are available * everywhere. * */ int df___colordiff(int *handle, pid_t *cd_pid) { const char *program; int status; int pipes[2], fdflags, success[2]; status=0; program=opt__get_int(OPT__COLORDIFF) ? opt__get_string(OPT__COLORDIFF) : "colordiff"; STOPIF_CODE_ERR( pipe(pipes) == -1, errno, "No more pipes"); STOPIF_CODE_ERR( pipe(success) == -1, errno, "No more pipes, case 2"); /* There's a small problem if the parent gets scheduled before the child, * and the child doesn't find the colordiff binary; then the parent might * only find out when it tries to send the first data across the pipe. * * But the successfully spawned colordiff won't report success, so the * parent would have to wait for a fail message - which delays execution * unnecessary - or simply live with diff getting EPIPE. * * Trying to get it scheduled by sending it a signal (which will be * ignored) doesn't work reliably, too. * * The only way I can think of is opening a second pipe in reverse * direction; if there's nothing to be read but EOF, the program could be * started - else we get a single byte, signifying an error. */ *cd_pid=fork(); STOPIF_CODE_ERR( *cd_pid == -1, errno, "Cannot fork colordiff program"); if (!*cd_pid) { close(success[0]); fdflags=fcntl(success[1], F_GETFD); fdflags |= FD_CLOEXEC; fcntl(success[1], F_SETFD, fdflags); STOPIF_CODE_ERR( ( dup2(pipes[0], STDIN_FILENO) | close(pipes[1]) | close(pipes[0]) ) == -1, errno, "Redirecting IO didn't work"); execlp( program, program, NULL); /* "" as value means best effort, so no error; any other string should * give an error. */ if (opt__get_int(OPT__COLORDIFF) != 0) { fdflags=errno; if (!fdflags) fdflags=EINVAL; /* Report an error to the parent. */ write(success[1], &fdflags, sizeof(fdflags)); STOPIF_CODE_ERR_GOTO(1, fdflags, quit, "!Cannot start colordiff program \"%s\"", program); } close(success[1]); /* Well ... do the best. */ /* We cannot use STOPIF() and similar, as that would return back up to * main - and possibly cause problems somewhere else. */ status=df___cheap_colordiff(); quit: exit(status ? 1 : 0); } close(pipes[0]); close(success[1]); status=read(success[0], &fdflags, sizeof(fdflags)); close(success[0]); STOPIF_CODE_ERR( status>0, fdflags, "!The colordiff program \"%s\" doesn't accept any data.\n" "Maybe it couldn't be started, or stopped unexpectedly?", opt__get_string(OPT__COLORDIFF) ); /* For svn+ssh connections a ssh process is spawned off. * If we don't set the CLOEXEC flag, it inherits the handle, and so the * colordiff child will never terminate - it might get data from ssh, after * all. */ fdflags=fcntl(pipes[1], F_GETFD); fdflags |= FD_CLOEXEC; /* Does this return errors? */ fcntl(pipes[1], F_SETFD, fdflags); *handle=pipes[1]; DEBUGP("colordiff is %d", *cd_pid); ex: return status; } /** Prints diffs for all entries with estat::entry_status or * estat::remote_status set. */ int df___diff_wc_remote(struct estat *entry, apr_pool_t *pool) { int status; struct estat **sts; int removed; char *fn; apr_pool_t *subpool; status=0; subpool=NULL; STOPIF( apr_pool_create(&subpool, pool), NULL); removed = ( ((entry->remote_status & FS_REPLACED) == FS_REMOVED) ? 1 : 0 ) | ( ((entry->remote_status & FS_REPLACED) == FS_NEW) ? 2 : 0 ) | ( ((entry->entry_status & FS_REPLACED) == FS_REMOVED) ? 2 : 0 ); STOPIF( ops__build_path(&fn, entry), NULL); DEBUGP_dump_estat(entry); /* TODO: option to print the whole lot of removed and "new" lines for * files existing only at one point? */ switch (removed) { case 3: /* Removed both locally and remote; no change to print. (?) */ break; case 1: /* Remotely removed. */ STOPIF_CODE_EPIPE( printf("Only locally: %s\n", fn), NULL); break; case 2: /* Locally removed. */ STOPIF_CODE_EPIPE( printf("Only in the repository: %s\n", fn), NULL); break; case 0: /* Exists on both; show (recursive) differences. */ if ((entry->local_mode_packed != entry->new_rev_mode_packed)) { /* Another type, so a diff doesn't make much sense, does it? */ STOPIF_CODE_EPIPE( printf("Type changed from local %s to %s: %s\n", st__type_string(PACKED_to_MODE_T(entry->local_mode_packed)), st__type_string(PACKED_to_MODE_T(entry->new_rev_mode_packed)), fn), NULL); /* Should we print some message that sub-entries are available? if (opt__is_verbose() > 0) { } */ } else if (entry->entry_status || entry->remote_status) { /* Local changes, or changes to repository. */ if (S_ISDIR(entry->st.mode)) { /* TODO: meta-data diff? */ if (entry->entry_count) { sts=entry->by_inode; while (*sts) { STOPIF( df___diff_wc_remote(*sts, subpool), NULL); sts++; } } } else STOPIF( df___type_def_diff(entry, entry->repos_rev, subpool), NULL); } break; } ex: /* This is of type (void), so we don't have any status to check. */ if (subpool) apr_pool_destroy(subpool); return status; } /** Set the entry as BASE (has no changes). */ int df___reset_remote_st(struct estat *sts) { sts->remote_status=0; return 0; } /** Does a repos/repos diff. * Currently works only for files. */ int df___repos_repos(struct estat *sts) { int status; char *fullpath, *path; struct estat **children; STOPIF( ops__build_path( &fullpath, sts), NULL); DEBUGP("%s: %s", fullpath, st__status_string_fromint(sts->remote_status)); STOPIF( hlp__format_path( sts, fullpath, &path), NULL); if ((sts->remote_status & FS_REPLACED) == FS_REPLACED) STOPIF_CODE_EPIPE( printf("Completely replaced: %s\n", path), NULL); else if (sts->remote_status & FS_NEW) STOPIF_CODE_EPIPE( printf("Only in r%llu: %s\n", (t_ull)opt_target_revision2, path), NULL); else if ((sts->remote_status & FS_REPLACED) == FS_REMOVED) STOPIF_CODE_EPIPE( printf("Only in r%llu: %s\n", (t_ull)opt_target_revision, path), NULL); else if (sts->remote_status) switch (sts->st.mode & S_IFMT) { case S_IFDIR: /* TODO: meta-data diff? */ if (sts->entry_count) { children=sts->by_inode; while (*children) STOPIF( df___repos_repos(*(children++)), NULL); } break; /* Normally a repos-repos diff can only show symlinks changing - * all other types of special entries get *replaced*. */ case S_IFANYSPECIAL: /* We don't know yet which special type it is. */ case S_IFLNK: case S_IFBLK: case S_IFCHR: STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n", path), NULL); /* Fallthrough */ case S_IFREG: STOPIF( df__do_diff(sts, opt_target_revision, opt_target_revision2, NULL), NULL); break; default: BUG("type?"); } ex: return status; } /** -. * * We get the WC status, fetch the named changed entries, and call * an external diff program for each. * * As a small performance optimization we do that kind of parallel - * while we're fetching a file, we run the diff. */ int df__work(struct estat *root, int argc, char *argv[]) { int status; int i, deinit; char **normalized; svn_revnum_t rev, base; char *norm_wcroot[2]= {".", NULL}; status=0; deinit=1; STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); STOPIF(ign__load_list(NULL), NULL); signal(SIGINT, df___signal); signal(SIGTERM, df___signal); signal(SIGHUP, df___signal); signal(SIGCHLD, SIG_DFL); /* check for colordiff */ if (( opt__get_int(OPT__COLORDIFF)==0 || opt__doesnt_say_off(opt__get_string(OPT__COLORDIFF)) ) && (isatty(STDOUT_FILENO) || opt__get_prio(OPT__COLORDIFF) > PRIO_PRE_CMDLINE) ) { DEBUGP("trying to use colordiff"); STOPIF( df___colordiff(&cdiff_pipe, &cdiff_pid), NULL); } /* TODO: If we get "-u X@4 Y@4:3 Z" we'd have to do different kinds of * diff for the URLs. * What about filenames? */ STOPIF( url__mark_todo(), NULL); switch (opt_target_revisions_given) { case 0: /* Diff WC against BASE. */ action->local_callback=df___direct_diff; /* We know that we've got a wc base because of * waa__find_common_base() above. */ STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1), NULL); break; case 1: /* WC against rX. */ /* Fetch local changes ... */ action->local_callback=st__progress; action->local_uninit=st__progress_uninit; STOPIF( waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1), NULL); // Has to set FS_CHILD_CHANGED somewhere /* Fetch remote changes ... */ while ( ! ( status=url__iterator(&rev) ) ) { STOPIF( cb__record_changes(root, rev, current_url->pool), NULL); } STOPIF_CODE_ERR( status != EOF, status, NULL); STOPIF( df___diff_wc_remote(root, current_url->pool), NULL); break; case 2: /* rX:Y. * This works in a single loop because the URLs are sorted in * descending priority, and an entry removed at a higher priority * could be replaced by one at a lower. */ /* TODO: 2 revisions per-URL. */ /* If no entries are given, do the whole working copy. */ if (!argc) normalized=norm_wcroot; while ( ! ( status=url__iterator(&rev) ) ) { STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL); STOPIF( url__canonical_rev(current_url, &opt_target_revision2), NULL); /* Take the values at the first revision as base; say that we've * got nothing. */ current_url->current_rev=0; action->repos_feedback=df___reset_remote_st; STOPIF( cb__record_changes(root, opt_target_revision, current_url->pool), NULL); /* Now get changes. We cannot do diffs directly, because * we must not use the same connection for two requests * simultaneously. */ action->repos_feedback=NULL; /* We say that the WC root is at the target revision, but that some * paths are not. */ base=current_url->current_rev; current_url->current_rev=opt_target_revision2; STOPIF( cb__record_changes_mixed(root, opt_target_revision2, normalized, base, current_url->pool), NULL); } STOPIF_CODE_ERR( status != EOF, status, NULL); /* If we'd use the log functions to get a list of changed files * we'd be slow for large revision ranges; for the various * svn_ra_do_update, svn_ra_do_diff2 and similar functions we'd * need the (complete) working copy base to get deltas against (as * we don't know which entries are changed). * * This way seems to be the fastest, and certainly the easiest for * now. */ /* "time fsvs diff -r4:4" on "ssh+svn://localhost/..." for 8400 * files gives a real time of 3.6sec. * "time fsvs diff > /dev/null" on "ssh+svn://localhost/..." for 840 * of 8400 files changed takes 1.8sec. * */ /* A possible idea would be to have a special delta-editor that * accepts (not already known) directories as unchanged. * Then it should be possible [1] to ask for the *needed* parts * only, which should save a fair bit of bandwidth. * * Ad 1: Ignoring "does not exist" messages when we say "directory * 'not-needed' is already at revision 'target'" and this isn't * true. TODO: Test whether all ra layers make that possible. */ STOPIF( df___repos_repos(root), NULL); status=0; break; default: BUG("what?"); } STOPIF( df__do_diff(NULL, 0, 0, 0), NULL); ex: if (deinit) { deinit=0; i=df___cleanup(); if (!status && i) STOPIF(i, NULL); } return status; } fsvs-1.2.6/src/checksum.h0000644000202400020240000000406711135025745014223 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CHECKSUM_H__ #define __CHECKSUM_H__ #include "global.h" #include "interface.h" #include /** \file * CRC, manber function header file. */ /** This structure is used for one big file. * It stores the CRCs and MD5s of the manber-blocks of this file. */ struct cs__manber_hashes { /** The manber-hashes */ AC_CV_C_UINT32_T *hash; /** The MD5-digests */ md5_digest_t *md5; /** The position of the first byte of the next block, ie. * N for a block which ends at byte N-1. */ off_t *end; /** The index into the above arrays - sorted by manber-hash. */ AC_CV_C_UINT32_T *index; /** Number of manber-hash-entries stored */ unsigned count; }; /** Checks whether a file has changed. */ int cs__compare_file(struct estat *sts, char *fullpath, int *result); /** Puts the hex string of \a md5 into \a dest, and returns \a dest. */ char* cs__md5tohex(const md5_digest_t md5, char *dest); /** Converts an MD5 digest to an ASCII string in a self-managed buffer. */ char *cs__md5tohex_buffered(const md5_digest_t md5); /** Converts an ASCII string to an MD5 digest. */ int cs__char2md5(const char *input, char **eos, md5_digest_t md5); /** Callback for the checksum layer. */ int cs__set_file_committed(struct estat *sts); /** Creates a \c svn_stream_t pipe, which writes the checksums of the * manber hash blocks to the \ref md5s file. */ int cs__new_manber_filter(struct estat *sts, svn_stream_t *stream_input, svn_stream_t **filter_stream, apr_pool_t *pool); /** Reads the \ref md5s file into memory. */ int cs__read_manber_hashes(struct estat *sts, struct cs__manber_hashes *data); /** Hex-character pair to ascii. */ int cs__two_ch2bin(char *stg); #endif fsvs-1.2.6/src/global.h0000644000202400020240000010006511657241136013657 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __GLOBAL_H__ #define __GLOBAL_H__ #include "config.h" #include "preproc.h" #include #include #include #include #include #include #include #include #include #include /** \file * Global definitions. * Here are things defined that are needed in almost every action. */ /** If this pointer is non-\c NULL, free() it and set it * to \c NULL, to avoid repeated free()ing. */ #define IF_FREE(x) do { if (x) free(x); x=NULL; } while (0) /* \addtogroup compati */ /* @{ */ /** The system-specific character to delimit directories. * Surely there's something like that in APR somewhere. */ #define PATH_SEPARATOR ('/') /** The system-specific character to be used before environment variables. * For DOS-compatibility printing that behind the name would be necessary, * too - therefore it's not automatic per \c configure. */ #define ENVIRONMENT_START ('$') /** @} */ /** We have some functions which don't use all parameters. */ #define UNUSED __attribute__ ((__unused__)) /** A type for holding an MD5 digest. * Only the digest, not the working data set. */ typedef unsigned char md5_digest_t[APR_MD5_DIGESTSIZE]; /** \addtogroup PrintfTypes Types for consistent printf() output. * \ingroup compati * * These types are used to get consistent stack usage for \c printf() * and friends. * The types for eg. \a ino_t vary in different architectures -- they are * 32 or 64bit. If we just pushed the arch-dependent size on the stack, * but used always the same format string, \c printf() would (on some * architectures) take the wrong argument off the stack, which results * in corrupted output of later data. */ /** @{ */ typedef unsigned long long t_ull; typedef long long t_ll; typedef unsigned long t_ul; /** @} */ /** \anchor PatTypes * \name Pattern types. * * The ignore/take specifications can be in several forms; please * see the doc/IGNORING documentation. */ /** @{ */ #define PT_SHELL (1) #define PT_PCRE (2) #define PT_DEVICE (3) #define PT_INODE (4) #define PT_SHELL_ABS (5) /** Data storage for ignore patterns. */ struct ignore_t { /** The pattern string as given by the user, including flags. */ char *pattern; /** The group this belongs to, as string. */ const char *group_name; /** The group definition. */ struct grouping_t *group_def; /** The calculated pattern string. * Does no longer include the flags (like \e take), and shell syntax * is converted to PCRE. */ char *compare_string; /** How many times this pattern was visited. */ unsigned stats_tested; /** How many times this pattern matched. * Both are not strictly needed; we could simply do a difference between * this and the next value, but that
    *
  • wouldn't work for the last entry (we'd have to store the tests * done globally, and store how many matches at each level), *
  • isn't so easy to check for bugs ;-), and *
  • it wouldn't work anyway as soon as groups could accumulate in some * way. *
* * So, for simplicities' sake, we just store both values. */ unsigned stats_matches; union { /* for shell and pcre */ struct { /** PCRE main data storage */ pcre *compiled; /** PCRE extra data storage. Currently nearly unused. */ pcre_extra *extra; }; /** For device compares */ struct { /** The major number */ int major; /** The minor number */ int minor; /** \see PatDevComp */ char compare; /** Flag saying whether a minor number was given, or if only the major * number was told. */ char has_minor; }; /** Inode compares are easy: just an inode number and a device. */ struct { /** Inode number */ ino_t inode; /** Device */ dev_t dev; }; }; /** AND-value for mode matching, or \c 0 for not chosen. */ unsigned short mode_match_and; /** CMP-value for mode matching. */ unsigned short mode_match_cmp; /** Should this match only directories? */ unsigned int dir_only:1; /** Ignore case for comparing? */ unsigned int is_icase:1; /** Is it an \e internally generated pattern (for the WAA area)? * Internal patterns are not saved and not printed. */ unsigned int is_user_pat:1; /** Which type is this pattern? See \ref PatTypes. */ /* This is at the end because of alignment issues. */ unsigned int type:3; }; /** Whether the device compare should be * for \e equal, \e lesser or \e higher devices. * * If the device numbers get completely randomized (was discussed * on \c linux-kernel some time ago) this will be useless; * we'll have to add another pattern type like eg. * all devices with a major number like the device node * \c /dev/ram0 to make sense again. */ #define PAT_DEV__UNSPECIFIED (0) #define PAT_DEV__LESS (1) #define PAT_DEV__EQUAL (2) #define PAT_DEV__GREATER (4) #define PAT_DEV___INVALID_MASK (PAT_DEV__LESS | PAT_DEV__GREATER) #define PAT_DEV__HAVE_MINOR (0x80) /** @} */ /** The special value used for a not-yet-valid url_t::internal_number. */ #define INVALID_INTERNAL_NUMBER (-1) /** All the data FSVS must know about an URL. */ struct url_t { /** The URL itself (http:// or svn:// or similar) */ char *url; /** The user-given priority; need not be unique. * The lower the number, the higher the priority. */ int priority; /** The length of the URL, not counting the \c \\0. */ int urllen; /** The revision we'd like that URL to be at - normally HEAD. */ svn_revnum_t target_rev; /** The revision the user gave for this command for this URL. * Normally equals \c target_rev. */ svn_revnum_t current_target_rev; /** The revision number this URL is currently at. */ svn_revnum_t current_rev; /** The \c HEAD revision, or \c SVN_INVALID_REVNUM if not yet known. */ svn_revnum_t head_rev; /** The user-given symbolic name */ char *name; /** The number which is used in the dir-lists to reference this url_t. * Must be unique in the URL-list. * * This is a different thing as the priority - the user must be able * to change the priority of the URLs, without changing our internal * references! */ int internal_number; /** A count of entries using this url. Used for determining if it's still * needed. */ unsigned count; /** A session connected to this URL. * \todo Session sharing for URLs in the same repository. */ svn_ra_session_t *session; /** The pool this session was allocated in. */ apr_pool_t *pool; /** Changelist counter. */ int entry_list_count; /** Flag saying whether this URL should be done. * Should not be queried directly, but by using url__to_be_handled(). */ int to_be_handled:1; /** Whether the user gave a specific override revision number. */ int current_target_override:1; /** Is a commit disallowed? */ unsigned is_readonly:1; }; /** \addtogroup Entries Entry data storage. * \ingroup dev * * The basic data structure for entries; an entry can be a file, directory, * or special node (symlink or device). * */ /** @{ */ /** A shortened struct stat64. * The glibc version needs 96 bytes, this version only 52 bytes. * * We save 44 bytes; multiplied with 150 000 entries this makes a difference * of 6.6MB. */ /* We may not use the st_ names, as some have #defined names. */ /* On my system these are all either 64bit or 32bit types, so there's * no alignment problem (which would arise if there was eg. a 16bit type, * then space would be wasted) */ struct sstat_t { /* For easier comparison, we overlay an 64bit type. */ union { /** The modification time as \c seconds, \c microseconds. */ struct timespec mtim; /** The same value in a single integer value. * \deprecated Currently unused. */ unsigned long long _mtime; }; union { /** The creation time as \c seconds, \c microseconds. */ struct timespec ctim; /** The same value in a single integer value. * \deprecated Currently unused. */ unsigned long long _ctime; }; union { /** The size in bytes (for files, symlinks and directories). */ off_t size; /** The device number (for devices). */ dev_t rdev; }; /** Device number of \b host filesystem. */ dev_t dev; /** Inode */ ino_t ino; /** The access mode (like \c 0700, \c 0755) with all other (non-mode) * bits, ie S_IFDIR. */ mode_t mode; /** The owner's id. */ uid_t uid; /** The group number. */ gid_t gid; }; /** The central structure for data storage. * * The name comes from extended struct stat. * This structure is used to build the tree of entries that we're processing. * * We need both a local and a remote status, to see on update when there * might be a conflict. \todo Single status, and check entries on-time? */ struct estat { /** The parent of this entry, used for tree walking. * Must be \c NULL for the root entry and the root entry alone. */ struct estat *parent; /** Name of this entry. */ char *name; /** Meta-data of this entry. * Most important: the entry type that is used for the shared members * below is determined by \c st.mode. */ struct sstat_t st; /** Revision of this entry. Currently only the value in the root entry is * used; this will be moved to \c * \ref url and removed from here. */ svn_revnum_t repos_rev; /** The revision number before updating. */ svn_revnum_t old_rev; /** The URL this entry is from. * Will be used for multi-url updates. */ struct url_t *url; /** If an entry gets removed, the old version is remembered (if needed) * via the \c old pointer (eg to know which children were known and may * be safely removed). */ struct estat *old; /** Data about this entry. */ union { /** For files */ struct { /** The en/decoder string. * Only gets set if \c action->needs_decoder!=0 from * fsvs:update-pipe, or in commit from * fsvs:commit-pipe. */ char *decoder; /** MD5-hash of the repository version. While committing it is set * to the \e new MD5, and saved with \a waa__output_tree(). */ md5_digest_t md5; /** Whether we got an "original" MD5 from the repository to compare. * */ unsigned int has_orig_md5:1; /** Flag whether this entry has changed or not changed (as per * MD5/manber-compare), or if this is unknown yet. * See \ref ChgFlag. */ unsigned int change_flag:2; }; /** For directories. * The by_inode and by_name members are positioned so that they collide * with the \c md5 file member above - in case of incorrect file types * that's safer, as they'll contain invalid pointers instead of (the * valid) \c decoder. */ struct { /** Name storage space for sub- (and sub-sub- etc.) entries. * Mainly used in the root inode, but is used in newly found directories * too. \c NULL for most directory entries. */ char *strings; /** List of child entries. * Sorted by inode number, NULL-terminated. * Always valid. */ struct estat **by_inode; /** List of child entries. * Sorted by name, NULL-terminated. * May be NULL if not currently used; can be (re-)generated by calling * dir__sortbyname(). */ struct estat **by_name; /** How many entries this directory has. */ AC_CV_C_UINT32_T entry_count; /** Used to know when this directories' children are finished. * Counts the number of unfinished subdirectories. * This is volatile and should be in the union below (with \ref * estat::child_index), but as it's only used for directories it * conserves memory to keep it here. */ AC_CV_C_UINT32_T unfinished; /** This flag is set if any child is *not* at the same revision, * so this directory has to be descended on reporting. */ unsigned int other_revs:1; /** If this bit is set, the directory has to be re-sorted before * being written out -- it may have new entries, which are not in * the correct order. */ unsigned int to_be_sorted:1; /* Currently unused - see ignore.c. */ #if 0 struct ignore_t **active_ign; struct ignore_t **subdir_ign; #endif }; }; /** These are for temporary use. */ union { /** For commit. */ struct { /** This entries' baton. */ void *baton; }; /** Export for a file. */ struct { /** The pool used for the filehandles; for a discussion see \ref FHP. */ apr_pool_t *filehandle_pool; }; /** Export of a special entry. */ struct { /** String-buffers for special entries. * While a file is \b always streamed to disk, special entries are * \b always done in memory. */ svn_stringbuf_t *stringbuf_tgt; }; struct { /** Used in waa__input_tree() and waa__update_tree(). */ AC_CV_C_UINT32_T child_index; }; struct { /** Used in output_tree(). */ AC_CV_C_UINT32_T file_index; }; }; /** \name Common variables for all types of entries. */ /** Which argument causes this path to be done. */ char *arg; /** Which pattern matched this entry. * Only set for new entries, and \c NULL if none. */ struct ignore_t *match_pattern; /** Stored user-defined properties as \c name=>svn_string_t, if \c * action->keep_user_prop is set. * Allocated in a subpool of \c estat::url->pool, so that it's still * available after cb__record_changes() returns. * The subpool is available from a hash lookup with key "" (len=0). */ apr_hash_t *user_prop; /** Flags for this entry. See \ref EntFlags for constant definitions. */ AC_CV_C_UINT32_T flags; /** Packed representations of the file type; see \c preproc.h for * details. * * The convention is that \c estat::st has the \e current (mostly local) * value, defining which of the estat::entry_count and similar shared * members are valid. * * See the special \ref fsvsS_constants below, too. * @{ */ /** This is the value of the old revision. */ unsigned old_rev_mode_packed:PACKED_MODE_T_NEEDED_BITS; /** This is the new value, which we got from the repository. */ unsigned new_rev_mode_packed:PACKED_MODE_T_NEEDED_BITS; /** This is the current local value, and is always set on \c * ops__update_single_entry(). */ unsigned local_mode_packed:PACKED_MODE_T_NEEDED_BITS; /** @} */ /** Local status of this entry - \ref fs_bits. */ unsigned int entry_status:10; /** Remote status of this entry. \ref fs_bits. */ unsigned int remote_status:10; /** Cache index number +1 of this entries' path. * \c 0 (and \c >MAX_CACHED_PATHS) is used as \e uninitialized; so the * value here has range of [1 .. MAX_CACHED_PATHS] instead of * the usual [0 .. MAX_CACHED_PATHS-1]. */ unsigned int cache_index:6; /** Length of path up to here. Does not include the \c \\0. See \ref * ops__calc_path_len. */ unsigned short path_len:16; /** Whether this entry was already printed. \todo Remove by changing the * logic. */ unsigned int was_output:1; /** This flag tells whether the string for the decoder is surely correct. * It is currently used for updates; after we parse the properties in * cb__record_changes(), we'll have the correct value. */ unsigned int decoder_is_correct:1; /** Flag saying whether this entry was specified by the user on the * command line. */ unsigned int do_userselected:1; /** Says that a child of this entry was given by the user on the * commandline. * Unlike \a FS_CHILD_CHANGED, which is set if some child has \e actually * changed, this just says that we have to check. */ unsigned int do_child_wanted:1; /** Flag derived from parents' \ref estat::do_userselected. * Set for \b all entries which should be handled. */ unsigned int do_this_entry:1; /** Flag saying whether the \c "-f" filter condition applies. * Normally set in \ref ops__set_todo_bits(), can be cleared in \ref * ops__update_filter_set_bits(). */ unsigned int do_filter_allows:1; /** Flag used for debugging. If estat::do_filter_allows is queried * without being defined earlier, we trigger a \ref BUG(). * Was conditionalized on \c ENABLE_DEBUG - but that got ugly. */ unsigned int do_filter_allows_done:1; /** Whether this entry should not be written into the * \ref dir "entry list", and/or ignored otherwise. */ unsigned int to_be_ignored:1; }; /** \anchor fsvsS_constants Special FSVS file type constants. * @{ */ #define S_IFUNDEF (0) /** All sockets get filtered out when the directory gets read, so we can * safely reuse that value for the case where we don't know \b what kind of * special entry that is (eg when receiving \c "svn:special" from the * repository). */ #define S_IFANYSPECIAL S_IFSOCK #define S_ISANYSPECIAL S_ISSOCK /** These values are used to say that such an entry is lying around and has * to be removed first. */ #define S_IFGARBAGE S_IFIFO #define S_ISGARBAGE S_ISFIFO /** @} */ /** \anchor EntFlags Various flags for entries. * * The RF means repos-flags, as these flags have a meaning when talking * to the repository. */ /** This item will be unversioned, ie remotely deleted and locally * purged from the \b tree, but not from the filesystem. */ #define RF_UNVERSION (1) /** Such an entry will be sent to the repository as a new item. * Used if this entry would get ignored by some pattern, but the user * has specifically told to take it, too. */ #define RF_ADD (2) /** This entry should be checked for modifications. * Is currently used for directories; if they are stored in the WAA with * their current mtime they wouldn't get checked for modifications. * Using this flag it's possibly to specify that they should be read. * \note Persistent until commit! */ #define RF_CHECK (4) /** Properties have changed locally, must be committed. * Needed in case this is the \b only change - else we would not commit * this entry. */ #define RF_PUSHPROPS (8) /** Set if this entry was marked as copy. * If it is a directory, the children will have the \c RF_COPY_SUB flag, * unless the copy attribute is not inherited, but they're themselves * copies from other entries. */ #define RF_COPY_BASE (16) /** Set if this entry got implicitly copied (sub-entry). */ #define RF_COPY_SUB (32) /** Has this entry a conflict? */ #define RF_CONFLICT (64) /** This entry may not be written by waa__output_tree(). */ #define RF_DONT_WRITE (1 << 18) /** Whether this entry was just created by \a ops__traverse(). */ #define RF_ISNEW (1 << 19) /** Print this entry, even if not changed. */ #define RF_PRINT (1 << 20) /** Which of the flags above should be stored in the WAA. */ #define RF___SAVE_MASK (RF_UNVERSION | RF_ADD | RF_CHECK | \ RF_COPY_BASE | RF_COPY_SUB | RF_PUSHPROPS | RF_CONFLICT) /** Mask for commit-relevant flags. * An entry with \c RF_COPY_BASE must (per definition) marked as \c RF_ADD; * and RF_PUSHPROPS gets folded into FS_PROPERTIES. */ #define RF___COMMIT_MASK (RF_UNVERSION | RF_ADD | RF_COPY_BASE | RF_PUSHPROPS) #define RF___IS_COPY (RF_COPY_BASE | RF_COPY_SUB) /** \name File statii. * \anchor fs_bits * */ /** @{ */ #define FS_NO_CHANGE (0) #define FS_NEW (1 << 0) #define FS_REMOVED (1 << 1) #define FS_CHANGED (1 << 2) /** This flag says that it's an "approximate" answer, - no hashing has been * done. */ #define FS_LIKELY (1 << 3) #define FS_REPLACED (FS_NEW | FS_REMOVED) /** Flag for update/commit. Note that this doesn't normally get set when a * property has been changed locally - for that the persistent flag * RF_PUSHPROPS is used. * */ #define FS_PROPERTIES (1 << 4) /** Meta-data flags. */ #define FS_META_MTIME (1 << 5) #define FS_META_OWNER (1 << 6) #define FS_META_GROUP (1 << 7) #define FS_META_UMODE (1 << 8) #define FS_META_CHANGED (FS_META_MTIME | FS_META_OWNER | \ FS_META_GROUP | FS_META_UMODE) /** This flag on a directory entry says that the directory itself was * not changed, but some child, so the children of this directory * have to be checked for modifications. */ #define FS_CHILD_CHANGED (1 << 9) #define FS__CHANGE_MASK (FS_NEW | FS_REMOVED | FS_CHANGED | \ FS_META_CHANGED | FS_PROPERTIES) /** @} */ /** \anchor ChgFlag Change detection flags. */ #define CF_UNKNOWN (0) #define CF_CHANGED (1) #define CF_NOTCHANGED (2) /** @} */ /** \section TravFlags Flags defining the behaviour of \ref ops__traverse() * and sub-calls. */ /** Non-existing paths should be created */ #define OPS__CREATE (1) /** Newly created entries should be put on the update list */ #define OPS__ON_UPD_LIST (2) /** Whether this entry \b has to exist in the cached tree -- normally that * means it must not be created. */ #define OPS__FAIL_NOT_LIST (4) /** With this flag \a ops__traverse returns \c ENOENT if the entry does not * exist in the local filesystem. */ #define OPS__FAIL_NOT_FS (16) /** \addtogroup Debug Debugging and error checking. * \ingroup dev * * Depending on configure options (\c --enable-debug, \c --enable-release) * and system state (valgrind found) various debug and error check functions * are compiled out. * Makes the binary much smaller, but leaves no chance for debugging. */ /** @{ */ #ifdef ENABLE_RELEASE #define DEBUGP(...) do { } while (0) #else /** Declaration of the debug function. */ extern void _DEBUGP(const char *file, int line, const char *func, char *format, ...) __attribute__ ((format (printf, 4, 5) )); /** The macro used for printing debug messages. * Includes time, file, line number and function name. * Allows filtering via opt_debugprefix. * \note Check for \ref PrintfTypes "argument sizes". */ #define DEBUGP(...) do { if (debuglevel) _DEBUGP(__FILE__, __LINE__, __PRETTY_FUNCTION__, __VA_ARGS__); } while (0) #endif /** \name Error-printing and -handling functions. * * Except for the subversion-library wrapper macros they need exactly this * function layout: * * \code * int some_function( ... some args ... ) * { * int status; * * STOPIF( function_call(1, 2, "a"), * "String describing the error situation with %s", * "parameters as needed"); * * ex: * cleanups(); * return status; * } * \endcode * * It works by checking the return value; if it is not zero, a * goto ex is done. At this mark some cleanup is possible. */ /** @{ */ /** A flag to turn error printing temporarily off. * This is useful where entire calltrees would have to be equipped with * some \c silent parameter. */ extern int make_STOP_silent; /** Master error function. */ extern int _STOP(const char *file, int line, const char *function, int errl, const char *format, ...) __attribute__ ((format (printf, 5, 6) )); /** Completely customizable error call macro. * Seldom used, as all things are parametrized. * \note This is like SVN_ERR(), but has one difference: The function * is not simply ended (via return), cleanup is still possible. * \note Putting the \c make_STOP_silent check here enlarges the \c .text * section of FSVS for about 3kByte! */ #define STOPIF_FULLPARM(cond, status, code, go, ... ) \ do \ { \ if (cond) \ { \ status=code; \ _STOP(__FILE__, __LINE__, __PRETTY_FUNCTION__, code, __VA_ARGS__); \ goto go; \ } \ } while (0) /** Another error call macro. * Normally not used. */ #define STOPIF_CODE_ERR_GOTO(cond, code, ex, ... ) STOPIF_FULLPARM(cond, status, code, ex, __VA_ARGS__) /** Error call macro for system functions. * \param cond The condition; TRUE means an error happened. * \param code The error code to return. Normally an \c E* value. * This error macro is used mainly for system calls, where a certain value * specifies that an error has happened and some other data (mostly \c errno) * stores the detailed error code. * \code * STOPIF_CODE_ERR( fork("ls -la") == -1, errno, * "Fork() failed!"); * \endcode */ #define STOPIF_CODE_ERR(cond, code, ... ) STOPIF_CODE_ERR_GOTO(cond, code, ex, __VA_ARGS__) /* Main error call macro. * This is the macro that should be used for all internal function calls. * \param code The status code to check. * All other things are hardcoded. */ #define STOPIF(code, ... ) \ do \ { \ status=(code); \ if (status) \ { \ _STOP(__FILE__, __LINE__, __PRETTY_FUNCTION__, status, __VA_ARGS__); \ goto ex; \ } \ } while (0) /** A simplified error call macro for returning ENOMEM. * \code * void *x; * * x=malloc(1024); * STOPIF_ENOMEM(!x); * \endcode * */ #define STOPIF_ENOMEM(cond) STOPIF_CODE_ERR(cond, ENOMEM, NULL) /** An error return macro that is used for user output - special handling * \c EPIPE to get a silent return. * If \c code returns something negative (like printf, puts, putc ... do; * \c EOF is defined as \c -1), and \a error is \c EPIPE, go on with \c * -EPIPE. */ #define STOPIF_CODE_EPIPE(code, ...) \ do \ { \ if ((code) < 0) \ { \ status=errno; \ if (status == EPIPE) status= -EPIPE; \ STOPIF(status, "Error writing output"); \ } \ } while (0) /** \page svnlibwrap Subversion library calls wrapper. * If this is used in some function, an additional variable is needed: * \code * int some_function( ... some args ... ) * { * int status; * svn_error_t *status_svn; * * STOPIF_SVNERR( svn_function, * (parameters ...) ); * * ex: * STOP_HANDLE_SVNERR(status_svn); * ex2: * ... cleanups here ... * return status; * } * \endcode */ /** The master error macro for calling subversion functions. */ #define STOPIF_SVNERR_TEXT(func, parm, fmt, ...) \ do \ { \ status_svn=func parm; \ STOPIF_CODE_ERR( status_svn, status_svn->apr_err, \ fmt ": %s", ## __VA_ARGS__, status_svn->message); \ } while (0) /* The mainly used function wrapper. * \param func Name of the subversion function * \param parm A parenthesized list of arguments. * \code * STOPIF_SVNERR( svn_ra_initialize, (global_pool)); * \endcode */ #define STOPIF_SVNERR(func, parm) STOPIF_SVNERR_TEXT(func, parm, #func) /** Convert the svn_error_t into a message and a returnable integer. */ #define STOP_HANDLE_SVNERR(svnerr) STOPIF_CODE_ERR_GOTO(svnerr, svnerr->apr_err, ex2, "%s", (const char*)svnerr->message) /** The opposite to STOP_HANDLE_SVNERR(); this converts an status * to the svn_error_t. * Needed for all callbacks (eg. editor functions) which have to * return a svn_error_t. */ #define RETURN_SVNERR(status) return status ? \ svn_error_create (status, NULL, \ __PRETTY_FUNCTION__) : SVN_NO_ERROR; /** \name Runtime check macros */ /** @{ */ /** Makes the program abort. * If the configure had --enable-debug and \c gdb is in the path, try * to use \c gdb to debug this problem (only if STDIN and STDOUT are ttys). */ #define BUG(...) do { fflush(NULL); debuglevel=1; DEBUGP(__VA_ARGS__); *(int*)42=__LINE__; } while (0) /** The same as BUG(), but conditionalized. * \code * BUG_ON(a == b, "HELP") * \endcode * would print * INTERNAL BUG * a == b * HELP * and try to start gdb or abort. */ #define BUG_ON(cond, ...) do { if (cond) BUG( "INTERNAL BUG\n " #cond "\n " __VA_ARGS__); } while (0) /** @} */ /** \name Valgrind memory addressing checking. * * These are copied from \c valgrind/memcheck.h; they will be overridden * by the correct valgrind definitions if the valgrind headers are found * and fsvs is configured with \c --enable-debug. */ /** @{ */ #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) do { } while(0) #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) do { } while(0) #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) do { } while(0) #ifdef ENABLE_DEBUG #ifdef HAVE_VALGRIND #undef VALGRIND_MAKE_MEM_DEFINED #undef VALGRIND_MAKE_MEM_UNDEFINED #undef VALGRIND_MAKE_MEM_NOACCESS #include #endif #endif /** @} */ /** @} */ /** \addtogroup Globals Global options. * \ingroup dev * * A list of variables that can be set by commandline parameters or * environment variables; these are used in nearly every action. */ /** @{ */ /** Flag for recursive/non-recursive behaviour. * Starting with 0, gets incremented with \c -R and decremented with \c * -N. Different actions have different default levels. */ extern int opt_recursive; /** If this is an import/export command (eg restoration after harddisk * crash), we don't use the WAA for data storage. */ extern int is_import_export; /** Whether debug messages are wanted. */ extern int debuglevel; /** A pointer to the commit message; possibly a mmap()ped file. */ extern char *opt_commitmsg; /** The file name of the commit message file. */ extern char *opt_commitmsgfile; /** The revision we're getting from the repository. */ extern svn_revnum_t target_revision; /** The revision the user wants to get at (\c -r parameter). * \c HEAD is represented by \c SVN_INVALID_REVNUM. * Has to be splitted per-URL when we're going to multi-url operation. */ extern svn_revnum_t opt_target_revision; /** The second revision number the user specified. */ extern svn_revnum_t opt_target_revision2; /** How many revisions the user specified on the commandline (0, 1 or 2). * For multi-update operations it's possibly to update the urls to different * revisions; then we need to know for which urls the user specified a * revision number. Per default we go to \c HEAD. * */ extern int opt_target_revisions_given; /** The local character encoding, according to \a LC_ALL or \a LC_CTYPE) */ #ifdef HAVE_LOCALES extern char *local_codeset; #endif /** The session handle for RA operations. */ extern svn_ra_session_t *session; /** The first allocated APR pool. All others are derived from it and its * children. */ extern apr_pool_t *global_pool; /** The array of URLs. */ extern struct url_t **urllist; /** Number of URLs we have. */ extern int urllist_count; /** Pointer to \b current URL. */ extern struct url_t *current_url; extern unsigned approx_entry_count; /** @} */ /** Modification time - \c svn:text-time */ extern const char propname_mtime[]; /** Owner - \c svn:owner */ extern const char propname_owner[]; /** Group - \c svn:group */ extern const char propname_group[]; /** Unix mode - \c svn:unix-mode */ extern const char propname_umode[]; /** Original MD5 for encoded entries. */ extern const char propname_origmd5[]; /** Flag for special entry. */ extern const char propname_special[]; /** The value for the special property; normally \c "*". */ extern const char propval_special[]; /** Commit-pipe program. */ extern const char propval_commitpipe[]; /** Update-pipe program. */ extern const char propval_updatepipe[]; /** \addtogroup cmds_strings Common command line strings * \ingroup compat * * These strings may have to be localized some time, that's why they're * defined in this place. */ /** @{ */ extern char parm_dump[], parm_test[], parm_load[]; /** @} */ /** Remember where we started. */ extern char *start_path; /** How much bytes the \ref start_path has. */ extern int start_path_len; /** ANSI color sequences, for colorized outputs. * I tried using ncurses - but that messes with the terminal upon * initialization. I couldn't find a sane way to make that work. Anybody? * * \todo We assume light text on dark background here. * @{ */ #define ANSI__BLUE "\x1b[1;34m" #define ANSI__GREEN "\x1b[1;32m" #define ANSI__RED "\x1b[1;31m" #define ANSI__WHITE "\x1b[1;37m" #define ANSI__NORMAL "\x1b[0;0m" /** @} */ /** For Solaris */ extern char **environ; #endif fsvs-1.2.6/src/checksum.c0000644000202400020240000007465611334063060014222 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include "checksum.h" #include "helper.h" #include "global.h" #include "est_ops.h" #include "waa.h" /** \file * CRC, manber functions. */ #define MAPSIZE (32*1024*1024) /** CRC table. * We calculate it once, and reuse it. */ struct t_manber_parms { AC_CV_C_UINT32_T values[256]; }; /** Everything needed to calculate manber-hashes out of a stream. * */ struct t_manber_data { /** The entry this calculation is for. */ struct estat *sts; /** The stream we're filtering. */ svn_stream_t *input; /** Start of the current block. */ off_t last_fpos; /** The current position in the file. Is always >= \a last_fpos. */ off_t fpos; /** MD5-Context of full file. */ apr_md5_ctx_t full_md5_ctx; /** MD5 of full file. */ md5_digest_t full_md5; /** MD5-Context of current block. */ apr_md5_ctx_t block_md5_ctx; /** MD5 of last block. */ md5_digest_t block_md5; /** The file descriptor where the manber-block-MD5s will be written to. */ int manber_fd; /** The internal manber-state. */ AC_CV_C_UINT32_T state; /** The previous manber-state. */ AC_CV_C_UINT32_T last_state; /** Count of bytes in backtrack buffer. */ int bktrk_bytes; /** The last byte in the rotating backtrack-buffer. */ int bktrk_last; /** The backtrack buffer. */ unsigned char backtrack[CS__MANBER_BACKTRACK]; /** Flag to see whether we're in a zero-bytes block. * If there are large blocks with only \c \\0 in them, we don't CRC * or MD5 them - just output as zero blocks with a MD5 of \c \\0*16. * Useful for sparse files. */ int data_bits; }; /** The precalculated CRC-table. */ struct t_manber_parms manber_parms; /** \b The Manber-structure. * Currently only a single instance of manber-hashing runs at once, * so we simply use a static structure. */ static struct t_manber_data cs___manber; /** The write format string for \ref md5s. */ const char cs___mb_wr_format[]= "%s %08x %10llu %10llu\n"; /** The read format string for \ref md5s. */ const char cs___mb_rd_format[]= "%*s%n %x %llu %llu\n"; /** The maximum line length in \ref md5s : * - MD5 as hex (constant-length), * - state as hex (constant-length), * - offset of block, * - length of block, * - \\n, * - \\0 * */ #define MANBER_LINELEN (APR_MD5_DIGESTSIZE*2+1 + 8+1 + 10+1 +10+1 + 1) /** Initializes a Manber-data structure from a struct \a estat. */ int cs___manber_data_init(struct t_manber_data *mbd, struct estat *sts); /** Returns the position of the last byte of a manber-block. */ int cs___end_of_block(const unsigned char *data, int maxlen, int *eob, struct t_manber_data *mb_f); /** Hex-character to ascii. * Faster than sscanf(). * Returns -1 on error. */ inline static int cs__hex2val(char ch) { /* I thought a bit whether I should store the values+1, ie. keep most of * the array as 0 - but that doesn't save any memory, it only takes more * time. * Sadly the various is*() functions (like isxdigit()) don't seem to * include that information yet, and I couldn't find some table in the * glibc sources. * (I couldn't find anything in that #define mess, TBH.) */ static const signed char values[256]={ /* 0x00 */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0x20 = space ... */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0x30 = "012345" */ +0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, /* 0x40 = "@ABCD" ... */ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0x60 = "`abcd" ... */ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0x80 */ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; /* To avoid having problems with "negative" characters */ return values[ ch & 0xff ]; } /** -. * Faster than sscanf(). * Returns -1 on error. */ inline int cs__two_ch2bin(char *stg) { return (cs__hex2val(stg[0]) << 4) | (cs__hex2val(stg[1]) << 0); } /** -. * Exactly the right number of characters must be present. */ int cs__char2md5(const char *input, char **eos, md5_digest_t md5) { int i, status, x, y; status=0; for(i=0; i= sizeof(stg)/sizeof(stg[0])) last=0; cur=stg[last]; return cs__md5tohex(md5, cur); } /** Finish manber calculations. * * Calculates the full-file MD5 hash, and copies it into the associated * struct \a estat . - see comment at \a cs__new_manber_filter() . */ int cs___finish_manber(struct t_manber_data *mb_f) { int status; status=0; STOPIF( apr_md5_final(mb_f->full_md5, & mb_f->full_md5_ctx), "apr_md5_final failed"); if (mb_f->sts) memcpy(mb_f->sts->md5, mb_f->full_md5, sizeof(mb_f->sts->md5)); mb_f->sts=NULL; ex: return status; } /** * -. * \param sts Which entry to check * \param fullpath The path to the file (optionally, else \c NULL). If the * file has been checked already and fullpath is \c NULL, a debug message * can write \c (null), as then even the name calculation * is skipped. * \param result is set to \c 0 for identical to old and \c >0 for * changed. * As a special case this function returns \c <0 for don't know * if the file is unreadable due to a \c EACCESS. * * \note Performance optimization * In normal circumstances not the whole file has to be read to get the * result. On update a checksum is written for each manber-block of about * 128k (but see \ref CS__APPROX_BLOCKSIZE_BITS); as soon as one is seen as * changed the verification is stopped. * */ int cs__compare_file(struct estat *sts, char *fullpath, int *result) { int i, status, fh; unsigned length_mapped, map_pos, hash_pos; off_t current_pos; struct cs__manber_hashes mbh_data; unsigned char *filedata; int do_manber; char *cp; struct sstat_t actual; md5_digest_t old_md5 = { 0 }; static struct t_manber_data mb_dat; /* Default is "don't know". */ if (result) *result = -1; /* It doesn't matter whether we test this or old_rev_mode_packed - if * they're different, this entry was replaced, and we never get here. */ if (S_ISDIR(sts->st.mode)) return 0; fh=-1; /* hash already done? */ if (sts->change_flag != CF_UNKNOWN) { DEBUGP("change flag for %s: %d", fullpath, sts->change_flag); goto ret_result; } status=0; if (!fullpath) STOPIF( ops__build_path(&fullpath, sts), NULL); DEBUGP("checking for modification on %s", fullpath); DEBUGP("hashing %s",fullpath); memcpy(old_md5, sts->md5, sizeof(old_md5)); /* We'll open and read the file now, so the additional lstat() doesn't * really hurt - and it makes sure that we see the current values (or at * least the _current_ ones :-). */ STOPIF( hlp__lstat(fullpath, &actual), NULL); if (S_ISREG(actual.mode)) { do_manber=1; /* Open the file and read the stream from there, comparing the blocks * as necessary. * If a difference is found, stop, and mark file as different. */ /* If this call returns ENOENT, this entry simply has no md5s-file. * We'll have to MD5 it completely. */ if (actual.size < CS__MIN_FILE_SIZE) do_manber=0; else { status=cs__read_manber_hashes(sts, &mbh_data); if (status == ENOENT) do_manber=0; else STOPIF(status, "reading manber-hash data for %s", fullpath); } hash_pos=0; STOPIF( cs___manber_data_init(&mb_dat, sts), NULL ); /* We map windows of the file into main memory. Never more than 256MB. */ current_pos=0; fh=open(fullpath, O_RDONLY); /* We allow a single special case on error handling: EACCES, which * could simply mean that the file has mode 000. */ if (fh<0) { /* The debug statement might change errno, so we have to save the * value. */ status=errno; DEBUGP("File %s is unreadable: %d", fullpath, status); if (status == EACCES) { status=0; goto ex; } /* Can that happen? */ if (!status) status=EBUSY; STOPIF(status, "open(\"%s\", O_RDONLY) failed", fullpath); } status=0; while (current_pos < actual.size) { if (actual.size-current_pos < MAPSIZE) length_mapped=actual.size-current_pos; else length_mapped=MAPSIZE; DEBUGP("mapping %u bytes from %llu", length_mapped, (t_ull)current_pos); filedata=mmap(NULL, length_mapped, PROT_READ, MAP_SHARED, fh, current_pos); STOPIF_CODE_ERR( filedata == MAP_FAILED, errno, "comparing the file %s failed (mmap)", fullpath); map_pos=0; while (map_posmd5[0] ^= 0x1; i=-2; break; } DEBUGP("block #%u ok...", hash_pos); hash_pos++; /* If this gets true (which it should never), we must not * print the hash values etc. ... The index [hash_pos] is outside * the array boundaries. */ if (hash_pos > mbh_data.count) goto changed; } /* We have to reset the blocks even if we have no manber hashes ... * so the eg. data_bits value gets reset. */ STOPIF( cs___end_of_block(NULL, 0, NULL, &mb_dat), NULL ); map_pos+=i; } STOPIF_CODE_ERR( munmap((void*)filedata, length_mapped) == -1, errno, "unmapping of file failed"); current_pos+=length_mapped; if (i==-2) break; } STOPIF( cs___finish_manber( &mb_dat), NULL); } else if (S_ISLNK(sts->st.mode)) { STOPIF( ops__link_to_string(sts, fullpath, &cp), NULL); apr_md5(sts->md5, cp, strlen(cp)); } else { DEBUGP("nothing to hash for %s", fullpath); } sts->change_flag = memcmp(old_md5, sts->md5, sizeof(sts->md5)) == 0 ? CF_NOTCHANGED : CF_CHANGED; DEBUGP("change flag for %s set to %d", fullpath, sts->change_flag); ret_result: if (result) *result = sts->change_flag == CF_CHANGED; DEBUGP("comparing %s=%d: md5 %s", fullpath, sts->change_flag == CF_CHANGED, cs__md5tohex_buffered(sts->md5)); status=0; ex: if (fh>=0) close(fh); return status; } /** -. * If a file has been committed, this is where various checksum-related * uninitializations can happen. */ int cs__set_file_committed(struct estat *sts) { int status; status=0; if (S_ISDIR(sts->st.mode)) goto ex; /* Now we can drop the check flag. */ sts->flags &= ~(RF_CHECK | RF_PUSHPROPS); sts->repos_rev=SET_REVNUM; ex: return status; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Stream functions and callbacks * for manber-filtering * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ int cs___manber_data_init(struct t_manber_data *mbd, struct estat *sts) { int status; memset(mbd, 0, sizeof(*mbd)); mbd->manber_fd=-1; BUG_ON(mbd->sts, "manber structure already in use!"); mbd->sts=sts; mbd->fpos= mbd->last_fpos= 0; apr_md5_init(& mbd->full_md5_ctx); STOPIF( cs___end_of_block(NULL, 0, NULL, mbd), NULL ); ex: return status; } void cs___manber_init(struct t_manber_parms *mb_d) { int i; AC_CV_C_UINT32_T p; if (mb_d->values[0]) return; /* Calculate the CS__MANBER_BACKTRACK power of the prime */ /* TODO: speedup like done in RSA - log2(power) */ for(p=1,i=0; ivalues[i]=(i*p) & CS__MANBER_MODULUS; } /* This function finds the position which * * a b c d e f g h i j k l m n * |..Block.1..| |..Block.2... * Here it would return h; ie. the number of characters * found in this data block belonging to the current block. * * If the whole data buffer belongs to the current block -1 is returned * in *eob. * */ int cs___end_of_block(const unsigned char *data, int maxlen, int *eob, struct t_manber_data *mb_f) { int status; int i; status=0; if (!data) { DEBUGP("manber reinit"); mb_f->state=0; mb_f->last_state=0; mb_f->bktrk_bytes=0; mb_f->bktrk_last=0; mb_f->data_bits=0; apr_md5_init(& mb_f->block_md5_ctx); memset(mb_f->block_md5, 0, sizeof(mb_f->block_md5)); cs___manber_init(&manber_parms); goto ex; } *eob = -1; i=0; /* If we haven't had at least this many bytes in the current block, * read up to this amount. */ while (ibktrk_bytes < CS__MANBER_BACKTRACK) { /* In this initialization, we simply \c OR the bytes together. * On block end detection we see if this is at least a * \c CS__MANBER_BACKTRACK bytes long zero-byte block. */ mb_f->data_bits |= data[i]; mb_f->state = (mb_f->state * CS__MANBER_PRIME + data[i] ) % CS__MANBER_MODULUS; mb_f->backtrack[ mb_f->bktrk_last ] = data[i]; /* The reason why CS__MANBER_BACKTRACK must be a power of 2: * bitwise-AND is much faster than a modulo. * In this loop the & is redundant - in a new block we should * start from bktrk_last==0, but the AND is only 1 or 2 cycles, * and we hope that gcc optimizes that. */ mb_f->bktrk_last = ( mb_f->bktrk_last + 1 ) & (CS__MANBER_BACKTRACK - 1); mb_f->bktrk_bytes++; i++; } if (!mb_f->data_bits) { /* No bits in the data set - only zeroes so far. * Look for the next non-zero byte; there's a block border. */ /* memchr is the exact opposite of what we need. */ while (ilast_state gets the previous CRC, and this gets stored. * This is because the ->state has, on a block border, a lot of * zeroes (per definition); so we store the previous value, which * may be better suited for comparison. If the blocks are equal * up to byte N, they're equal up to N-1, too. */ /* This need not be calculated in the previous loop, as we do no * border-checking there. Only here, in this loop, * is the value needed. */ mb_f->last_state=mb_f->state; mb_f->state = (mb_f->state*CS__MANBER_PRIME + data[i] - manber_parms.values[ mb_f->backtrack[ mb_f->bktrk_last ] ] ) % CS__MANBER_MODULUS; mb_f->backtrack[ mb_f->bktrk_last ] = data[i]; mb_f->bktrk_last = ( mb_f->bktrk_last + 1 ) & (CS__MANBER_BACKTRACK - 1); /* This value has already been used. */ i++; /* special value ? */ if ( !(mb_f->state & CS__MANBER_BITMASK) ) { *eob=i; apr_md5_update(& mb_f->block_md5_ctx, data, i); apr_md5_final( mb_f->block_md5, & mb_f->block_md5_ctx); DEBUGP("manber found a border: %u %08X %08X %s", i, mb_f->last_state, mb_f->state, cs__md5tohex_buffered(mb_f->block_md5)); break; } } /* Update md5 up to current byte. */ if (*eob == -1) apr_md5_update(& mb_f->block_md5_ctx, data, i); } /* Update file global information */ apr_md5_update(& mb_f->full_md5_ctx, data, i); mb_f->fpos += (*eob == -1) ? maxlen : *eob; ex: DEBUGP("on return at fpos=%llu: %08X (databits=%2x)", (t_ull)mb_f->fpos, mb_f->state, mb_f->data_bits); return status; } int cs___update_manber(struct t_manber_data *mb_f, const unsigned char *data, apr_size_t len) { int status; int eob, i; /* MD5 as hex (constant-length), * state as hex (constant-length), * offset of block, length of block, * \n, \0, reserve */ char buffer[MANBER_LINELEN+10]; char *filename; status=0; /* We tried to avoid doing this calculation for small files. * * But: this does not work. * As *on update* we don't know how many bytes we'll * have to process, and the buffer size is not specified, * we might be legitimately called with single-byte values. * * On my machine I get for commit/update requests of 100kB * (yes, 102400 bytes), so I thought to have at least some chance. * But: svn_ra_get_file uses 16k blocks ... * * A full solution would either have to * - know how many bytes we get (update; on commit we know), or to * - buffer all to-be-written-data unless we have more bytes * than limited. * The second variant might be easier. * * As we now check on end-of-stream for the size and remove the file if * necessary, this is currently deactivated. */ DEBUGP("got a block with %llu bytes", (t_ull)len); while (1) { #if 0 /* If first call, and buffer smaller than 32kB, avoid this calling ... */ if (mb_f->fpos == 0 && len<32*1024) eob=-1; else #endif STOPIF( cs___end_of_block(data, len, &eob, mb_f), NULL ); if (eob == -1) { DEBUGP("block continues after %lu.", (unsigned long)mb_f->fpos); break; } data += eob; len -= eob; DEBUGP("block ends after %lu; size %lu bytes (border=%u).", (unsigned long)mb_f->fpos, (unsigned long)(mb_f->fpos - mb_f->last_fpos), eob); /* write new line to data file */ i=sprintf(buffer, cs___mb_wr_format, cs__md5tohex_buffered(mb_f->block_md5), mb_f->last_state, (t_ull)mb_f->last_fpos, (t_ull)(mb_f->fpos - mb_f->last_fpos)); BUG_ON(i > sizeof(buffer)-3, "Buffer too small - stack overrun"); if (mb_f->manber_fd == -1) { /* The file has not been opened yet. * Do it now. * */ STOPIF( ops__build_path(&filename, mb_f->sts), NULL); STOPIF( waa__open_byext(filename, WAA__FILE_MD5s_EXT, WAA__WRITE, & cs___manber.manber_fd), NULL ); DEBUGP("now doing manber-hashing for %s...", filename); } STOPIF_CODE_ERR( write( mb_f->manber_fd, buffer, i) != i, errno, "writing to manber hash file"); /* re-init manber state */ STOPIF( cs___end_of_block(NULL, 0, NULL, mb_f), NULL ); mb_f->last_fpos = mb_f->fpos; } ex: return status; } svn_error_t *cs___mnbs_close(void *baton); svn_error_t *cs___mnbs_read(void *baton, char *data, apr_size_t *len) { int status; svn_error_t *status_svn; struct t_manber_data *mb_f=baton; status=0; /* Get the bytes, then process them. */ STOPIF_SVNERR( svn_stream_read, (mb_f->input, data, len) ); if (*len && data) STOPIF( cs___update_manber(mb_f, (unsigned char*)data, *len), NULL); else STOPIF_SVNERR( cs___mnbs_close, (baton)); ex: RETURN_SVNERR(status); } svn_error_t *cs___mnbs_write(void *baton, const char *data, apr_size_t *len) { int status; svn_error_t *status_svn; struct t_manber_data *mb_f=baton; status=0; /* We first write to the output stream, to know how many bytes could * be processed. Then we use that bytes. * If we just processed the incoming bytes, then fewer would get written, * and the remaining would be re-done we'd hash them twice. */ STOPIF_SVNERR( svn_stream_write, (mb_f->input, data, len) ); if (*len && data) STOPIF( cs___update_manber(mb_f, (unsigned char*)data, *len), NULL); else STOPIF_SVNERR( cs___mnbs_close, (baton)); ex: RETURN_SVNERR(status); } svn_error_t *cs___mnbs_close(void *baton) { int status; svn_error_t *status_svn; struct t_manber_data *mb_f=baton; status=0; /* If there have been less than CS__MIN_FILE_SIZE bytes, we * don't keep that file. */ if (mb_f->manber_fd != -1) { STOPIF( waa__close(mb_f->manber_fd, mb_f->fpos < CS__MIN_FILE_SIZE ? ECANCELED : status != 0), NULL ); mb_f->manber_fd=-1; } if (mb_f->input) { STOPIF_SVNERR( svn_stream_close, (mb_f->input) ); mb_f->input=NULL; } STOPIF( cs___finish_manber(mb_f), NULL); ex: RETURN_SVNERR(status); } /** -. * On commit and update we run the stream through a filter, to create the * manber-hash data (see \ref md5s) on the fly. * * \note * We currently give the caller no chance to say whether he wants the * full MD5 or not. * If we ever need to let him decide, he must either * - save the old MD5 * - or (better!) says where the MD5 should be stored - this pointer * would replace \c mb_f->full_md5 . * */ int cs__new_manber_filter(struct estat *sts, svn_stream_t *stream_input, svn_stream_t **filter_stream, apr_pool_t *pool) { int status; svn_stream_t *new_str; char *filename; status=0; STOPIF( cs___manber_data_init(&cs___manber, sts), "manber-data-init failed"); cs___manber.input=stream_input; new_str=svn_stream_create(&cs___manber, pool); STOPIF_ENOMEM( !new_str ); svn_stream_set_read(new_str, cs___mnbs_read); svn_stream_set_write(new_str, cs___mnbs_write); svn_stream_set_close(new_str, cs___mnbs_close); STOPIF( ops__build_path(&filename, sts), NULL); DEBUGP("initiating MD5 streaming for %s", filename); *filter_stream=new_str; /* The file with the hashes for the blocks is not immediately opened. * only when we detect that we have at least a minimum file size * we do the whole calculation.*/ ex: return status; } /** \defgroup md5s_overview Overview * \ingroup perf * * When we compare a file with its last version, we read all the * manber-hashes into memory. * When we use them on commit for constructing a delta stream, we'll * have to have them sorted and/or indexed for fast access; then we * can't read them from disk or something like that. * * * \section md5s_perf Performance considerations * * \subsection md5s_count Count of records, memory requirements * * We need about 16+4+8 (28, with alignment 32) bytes per hash value, * and that's for approx. 128kB. So a file of * 1M needs 8*32 => 512 bytes, * 1G needs 8k*32 => 512 kB, * 1T needs 8M*32 => 512 MB. * If this is too much, you'll have to increase CS__APPROX_BLOCKSIZE_BITS * and use bigger blocks. * * (Although, if you've got files greater than 1TB, you'll have other * problems than getting >512MB RAM) * And still, there's (nearly) always a swap space ... * * * \subsection md5s_perf Allocation * * To avoid the costs of the unused 4 bytes (which accumulate to 32MB * on a 1TB file) and to get the manber-hashes better into * L2 cache (only the 32bit value - the rest is looked up after we * found the correct hash) we allocate 3 memory regions - * one for each data. * * * \subsection Reading for big files * * It's a tiny bit disturbing that we read the whole file at once and not * as-needed. * On the hypothetical 1TB file we'll be reading 512MB before seeing that * the first block had changed... * Perhaps this should be expanded later, to say something like "already * open, return next N entries" - file handle, last N, etc. should be stored * in struct cs__manber_hashes. * For now I'll just ignore that, as a (practical) 4GB file (a DVD) * will lead to 2MB read - and on average we'll find a difference after * 2GB more reading. * * A better way than read()/lseek() would be mmap of binary files. * But that wouldn't allow to have the data compressed. * I don't know if that matters; the 1TB file has * 8M lines * 60 Bytes => 480MB on ASCII-data. * * If we assume that the CRCs and lengths can be compressed away, * we still need the offsets and MD5s, so we would still end with * 8M * 24 Bytes, ie. at least 196MB. * I don't think that's a necessity. * * * \section Hash-collisions on big files * * Please note, that on 1TB files you'll have 8M of hash blocks, which * have a significant collision-chance on 32bit hash values! * (look up discussion about rsync and its bugs on block change detection). * We'd either have to use bigger blocks or a bigger hash value - * the second might be easier and better, esp. as files this big should * be on a 64bit platform, where a 64bit hash won't be slow. * * * \section The last block * * The last block in a file ends per definition *not* on a manber-block- * border (or only per chance). This block is not written into the md5s file. * The data is verified by the full-file MD5 that we've been calculating. * * \todo When we do a rsync-copy from the repository, we'll have to look at * that again! Either we write the last block too, or we'll have to ask for * the last few bytes extra. * * */ /** -. * \param sts The entry whose md5-data to load * \param data An allocated struct \c cs__manber_hashes; its arrays get * allocated, and, on error, deallocated. * If no error code is returned, freeing of the arrays has to be done by * the caller. * */ int cs__read_manber_hashes(struct estat *sts, struct cs__manber_hashes *data) { int status; char *filename; int fh, i, spp; unsigned estimated, count; t_ull length, start; char buffer[MANBER_LINELEN+10], *cp; AC_CV_C_UINT32_T value; status=0; memset(data, 0, sizeof(*data)); fh=-1; STOPIF( ops__build_path(&filename, sts), NULL); /* It's ok if there's no md5s file. simply return ENOENT. */ status=waa__open_byext(filename, WAA__FILE_MD5s_EXT, WAA__READ, &fh); if (status == ENOENT) goto ex; STOPIF( status, "reading md5s-file for %s", filename); DEBUGP("reading manber-hashes for %s", filename); /* We don't know in advance how many lines (i.e. manber-hashes) * there will be. * So we just interpolate from the file size and the (near-constant) * line-length and add a bit for good measure. * The rest is freed as soon as we've got all entries. */ length=lseek(fh, 0, SEEK_END); STOPIF_CODE_ERR( length==-1, errno, "Cannot get length of file %s", filename); STOPIF_CODE_ERR( lseek(fh, 0, SEEK_SET), errno, "Cannot seek in file %s", filename); /* We add 5%; due to integer arithmetic the factors have to be separated */ estimated = length*21/(MANBER_LINELEN*20)+4; DEBUGP("estimated %u manber-hashes from filelen %llu", estimated, length); /* Allocate memory ... */ STOPIF( hlp__calloc( &data->hash, estimated, sizeof(*data->hash)), NULL); STOPIF( hlp__calloc( & data->md5, estimated, sizeof( *data->md5)), NULL); STOPIF( hlp__calloc( & data->end, estimated, sizeof( *data->end)), NULL); count=0; while (1) { i=read(fh, buffer, sizeof(buffer)-1); STOPIF_CODE_ERR( i==-1, errno, "reading manber-hash data"); if (i==0) break; /* ensure strchr() stops */ buffer[i]=0; cp=strchr(buffer, '\n'); STOPIF_CODE_ERR(!cp, EINVAL, "line %u is invalid", count+1 ); /* reposition to start of next line */ STOPIF_CODE_ERR( lseek(fh, -i // start of this line + (cp-buffer) // end of this line + 1, // over \n SEEK_CUR) == -1, errno, "lseek back failed"); *cp=0; i=sscanf(buffer, cs___mb_rd_format, &spp, &value, &start, &length); STOPIF_CODE_ERR( i != 3, EINVAL, "cannot parse line %u for %s", count+1, filename); data->hash[count]=value; data->end[count]=start+length; buffer[spp]=0; STOPIF( cs__char2md5(buffer, NULL, data->md5[count]), NULL); count++; BUG_ON(count > estimated, "lines should have syntax errors - bug in estimation."); } data->count=count; DEBUGP("read %u entry tuples.", count); if (estimated-count > 3) { DEBUGP("reallocating..."); /* reallocate memory = free */ STOPIF( hlp__realloc( &data->hash, count*sizeof(*data->hash)), NULL); STOPIF( hlp__realloc( & data->md5, count*sizeof(* data->md5)), NULL); STOPIF( hlp__realloc( & data->end, count*sizeof(* data->end)), NULL); } /* The index is not always needed. Don't generate now. */ ex: if (status) { IF_FREE(data->hash); IF_FREE(data->md5); IF_FREE(data->end); } if (fh != -1) STOPIF_CODE_ERR( close(fh) == -1, errno, "Cannot close manber hash file (fd=%d)", fh); return status; } fsvs-1.2.6/src/info.h0000644000202400020240000000114110756467655013364 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __INFO_H__ #define __INFO_H__ #include "actions.h" /** \file * \ref info command header file. */ /** The \ref info action. */ work_t info__work; /** Printing the entries' info. */ action_t info__action; #endif fsvs-1.2.6/src/info.c0000644000202400020240000000727411264677022013356 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * \ref info action. * * Allows the user to display various informations about his working copy * entries - eg. URL, revision number, stored meta-data, etc. */ /** \addtogroup cmds * * \section info * * \code * fsvs info [-R [-R]] [PATH...] * \endcode * * Use this command to show information regarding one or more entries in your * working copy. \n * You can use \c -v to obtain slightly more information. * * This may sometimes be helpful for locating bugs, or to obtain the * URL and revision a working copy is currently at. * * Example: * \code * $ fsvs info * URL: file:///tmp/ram/fsvs-test-1000/tmp-repos-path/trunk * .... 200 . * Type: directory * Status: 0x0 * Flags: 0x100000 * Dev: 0 * Inode: 24521 * Mode: 040755 * UID/GID: 1000/1000 * MTime: Thu Aug 17 16:34:24 2006 * CTime: Thu Aug 17 16:34:24 2006 * Revision: 4 * Size: 200 * \endcode * * The default is to print information about the given entry only. * With a single \c -R you'll get this data about \b all entries of a given * directory; with another \c -R you'll get the whole (sub-)tree. */ #include #include #include "global.h" #include "est_ops.h" #include "cp_mv.h" #include "url.h" #include "status.h" #include "waa.h" #include "warnings.h" /** Utility function - prints the normal status and the extended * information. */ int info__action(struct estat *sts) { int status; /* Always print this entry. */ sts->was_output=0; sts->flags |= RF_PRINT; /* TODO: entry_type is already overwritten by ops__stat_to_action() */ STOPIF( st__status(sts), NULL); STOPIF( st__print_entry_info(sts), NULL); ex: return status; } int info__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); status=url__load_list(NULL, 0); if (status==ENOENT) STOPIF( wa__warn(WRN__NO_URLLIST, status, "No URLs defined"), NULL); else STOPIF_CODE_ERR( status, status, NULL); /* Default is single-element only. */ opt_recursive-=2; /* If verbose operation was wanted, we want return the copyfrom URL. * We cannot simply look at opt__is_verbose() and set VERBOSITY_COPYFROM, * because with "-v" the OPT__VERBOSE priority is already at * PRIO_CMDLINE, so PRIO_PRE_CMDLINE doesn't work - and simply overriding * a specific wish (as given with "-o verbose=") isn't nice, too. * * So we check whether it seems that a single "-v" was given, and react * to that; perhaps we should resurrect the global opt_verbose variable, * and check what's the best verbosity default is in each worker. */ if (opt__get_int(OPT__VERBOSE) == VERBOSITY_DEFAULT_v) opt__set_int( OPT__VERBOSE, PRIO_CMDLINE, opt__get_int(OPT__VERBOSE) | VERBOSITY_COPYFROM); /* do not update the entries; print info based on *known* data. */ status=waa__read_or_build_tree(root, argc, normalized, argv, NULL, 1); if (status == -ENOENT) { printf("No tree information available. Did you commit?\n"); status=0; goto ex; } STOPIF( status, "Cannot get tree information"); ex: return status; } fsvs-1.2.6/src/export.h0000644000202400020240000000127410756467655013761 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __EXPORT_H__ #define __EXPORT_H__ #include "actions.h" #include "update.h" /** \file * \ref export action header file */ /** The \ref export action. */ work_t exp__work; /** This function exports \a url into the current working directory. */ int exp__do(struct estat *root, struct url_t *url); #endif fsvs-1.2.6/src/export.c0000644000202400020240000001263111264677022013735 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * \ref export action. * * This is mostly the same as update; the difference is that * we export the given URL to the current directory, and don't use * a WAA area. * */ #include #include #include #include "export.h" #include "helper.h" #include "url.h" #include "racallback.h" /** * \addtogroup cmds * \section export * * \code * fsvs export REPOS_URL [-r rev] * \endcode * * If you want to export a directory from your repository \b without * storing any FSVS-related data you can use this command. * * This restores all meta-data - owner, group, access mask and modification * time; its primary use is for data recovery. * * The data gets written (in the correct directory structure) below the * current working directory; if entries already exist, the export will stop, * so this should be an empty directory. * */ /** \name Undefined functions * * There are some callbacks that should never get called. * Rather than setting their pointer in the \c export_editor structure * to \c NULL (and risk a \c SEGV if the ra layer definition changes), we use * some nearly empty functions to cry if something unexpected happens. */ /** @{ */ /// FSVS GCOV MARK: exp__invalid should not be executed /** This function is used to write messages in case an undefined function * gets called by the ra layer. */ svn_error_t *exp__invalid(const char *name) { int status; STOPIF_CODE_ERR(1, EINVAL, "The function %s got called " "during an export operation;\n" "this call is unexpected and won't be handled.\n" "This should not happen.\n", name); ex: RETURN_SVNERR(status); } /// FSVS GCOV MARK: exp__delete should not be executed svn_error_t *exp__delete(const char *path UNUSED, svn_revnum_t revision UNUSED, void *parent_baton UNUSED, apr_pool_t *pool UNUSED) { return exp__invalid(__PRETTY_FUNCTION__); } /// FSVS GCOV MARK: exp__open_dir should not be executed svn_error_t *exp__open_dir(const char *path UNUSED, void *parent_baton UNUSED, svn_revnum_t base_revision UNUSED, apr_pool_t *dir_pool UNUSED, void **child_baton UNUSED) { return exp__invalid(__PRETTY_FUNCTION__); } /// FSVS GCOV MARK: exp__open_file should not be executed svn_error_t *exp__open_file(const char *path UNUSED, void *parent_baton UNUSED, svn_revnum_t base_revision UNUSED, apr_pool_t *file_pool UNUSED, void **file_baton UNUSED) { return exp__invalid(__PRETTY_FUNCTION__); } /** @} */ /** The export editor functions. * The functionality is the same as on update, so we simply use that * functions. */ const svn_delta_editor_t export_editor = { .set_target_revision = up__set_target_revision, .open_root = up__open_root, .delete_entry = exp__delete, .add_directory = up__add_directory, .open_directory = exp__open_dir, .change_dir_prop = up__change_dir_prop, .close_directory = up__close_directory, .absent_directory = up__absent_directory, .add_file = up__add_file, .open_file = exp__open_file, .apply_textdelta = up__apply_textdelta, .change_file_prop = up__change_file_prop, .close_file = up__close_file, .absent_file = up__absent_file, .close_edit = up__close_edit, .abort_edit = up__abort_edit, }; /** -. * \a root must already be initialized. * * The difference to update is that export expects an empty filesystem, ie. * to fetch *all* nodes; it doesn't check whether some already exist * locally. */ int exp__do(struct estat *root, struct url_t *url) { int status; svn_revnum_t rev; svn_error_t *status_svn; void *report_baton; const svn_ra_reporter2_t *reporter; status_svn=NULL; current_url=url; STOPIF( url__open_session(NULL, NULL), NULL); rev=url->target_rev; /* See the comment in update.c */ STOPIF( url__canonical_rev(current_url, &rev), NULL); /* export files */ STOPIF_SVNERR( svn_ra_do_update, (current_url->session, &reporter, &report_baton, opt_target_revision, "", TRUE, &export_editor, root, current_url->pool) ); /* We always pretend to start empty. */ STOPIF_SVNERR( reporter->set_path, (report_baton, "", rev, TRUE, NULL, current_url->pool)); STOPIF_SVNERR( reporter->finish_report, (report_baton, current_url->pool)); ex: STOP_HANDLE_SVNERR(status_svn); ex2: return status; } /** -. * * This does a checkout of the given URL (using the various meta-data flags), * but without writing to the WAA. * */ int exp__work(struct estat *root, int argc, char *argv[]) { int status; struct url_t url; status=0; STOPIF_CODE_ERR(argc!=1, EINVAL, "1 parameter (URL) expected"); STOPIF( url__parse(argv[0], &url, NULL), NULL); /* Initialize root structure */ STOPIF( hlp__lstat(".", &root->st), "Cannot retrieve information about '.'"); STOPIF( exp__do(root, &url), NULL); printf("Exported revision\t%ld.\n", target_revision); /* As this URL is not stored in the urllist array, it wouldn't get * cleaned up. */ STOPIF( url__close_session(&url), NULL); ex: return status; } fsvs-1.2.6/src/est_ops.h0000644000202400020240000001573711207674777014122 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __EST_OPS_H__ #define __EST_OPS_H__ #include "global.h" #include "waa.h" #include "props.h" #include "options.h" /** \file * Functions for handling of indiviual struct estats. */ /** Return the path of this entry. */ int ops__build_path(char **path, struct estat *sts); /** Calculate the length of the path for this entry. */ int ops__calc_path_len(struct estat *sts); /** Compare the \c struct \c sstat_t , and set the \c entry_status. */ int ops__stat_to_action(struct estat *sts, struct sstat_t *new); /** \name Finding entries */ /** @{ */ /** Find an entry in the \a dir by \a bname. */ int ops__find_entry_byname(struct estat *dir, char *name, struct estat **sts, int ignored_too); /** Find an entry in \a dir by the inode. */ int ops__find_entry_byinode(struct estat *dir, dev_t dev, ino_t inode, struct estat **sts); /** Value for unknown indizes in \c ops__delete_entry(). */ #define UNKNOWN_INDEX (-1) /** Delete an entry by either \a index_byinode, or \a index_byname, or \a * sts. */ int ops__delete_entry(struct estat *dir, struct estat *sts, int index_byinode, int index_byname); /** @} */ /** This function returns blocks of (struct estat), possibly smaller than * wanted by the caller. */ int ops__allocate(int needed, struct estat **where, int *count); /** Frees the memory associated with this entry and all its children. */ int ops__free_entry(struct estat **sts_p); /** Frees all "marked" entries in the given directory at once. */ int ops__free_marked(struct estat *dir, int fast_mode); /** Appends the array of \a count \a new_entries as children to \a dir. */ int ops__new_entries(struct estat *dir, int count, struct estat **new_entries); /** Writes a textual description of the given \a sts to the \a filehandle. * */ int ops__save_1entry(struct estat *sts, ino_t parent_ino, int filehandle); /** Fills \a sts from a buffer \a where. */ int ops__load_1entry(char **where, struct estat *sts, char **filename, ino_t *parent_i); /** Does a \c lstat() on the given entry, and sets the \c entry_status. */ int ops__update_single_entry(struct estat *sts, struct sstat_t *output); /** Wrapper for \c ops__update_single_entry and some more. */ int ops__update_filter_set_bits(struct estat *sts); /** Converts a string describing a special node to the \c struct \c sstat_t * data. */ int ops__string_to_dev(struct estat *sts, char *data, char **info); /** Converts a device entry into a string suitable for storage in the WAA * area (using a \c : separator). */ char *ops__dev_to_waa_string(struct estat *sts); /** See \c ops__dev_to_waa_string(), but uses a space character (\c \\x20 ) * for subversion compatibility. */ char *ops__dev_to_filedata(struct estat *sts); /** Reads a file. */ int ops__read_special_entry(apr_file_t *a_stream, char **data, int max, ssize_t *real_len, char *filename, apr_pool_t *pool); /** Reads a symlink and returns a pointer to its destination. */ int ops__link_to_string(struct estat *sts, char *filename, char **erg); /** Returns the filename. */ char *ops__get_filename(char *path); /** Copies the data of a single struct estat. */ void ops__copy_single_entry(struct estat *src, struct estat *dest); /** Create or find an entry below parent. */ int ops__traverse(struct estat *parent, char *relative_path, int flags, int sts_flags, struct estat **ret); /** Set the \ref estat::do_userselected and \ref estat::do_this_entry * attributes depending on \ref opt_recursive and the parent's bits. */ void ops__set_todo_bits(struct estat *sts); /** Determines whether child entries of this entry should be done, based on * the recursive settings and \a dir's todo-bits. */ int ops__are_children_interesting(struct estat *dir); /** Applies the defined group to the entry \a sts. */ int ops__apply_group(struct estat *sts, hash_t *props, apr_pool_t *pool); /** Creates a copy of \a sts, and keeps it referenced by \c sts->old. * */ int ops__make_shadow_entry(struct estat *sts, int flags); #define SHADOWED_BY_REMOTE (1) #define SHADOWED_BY_LOCAL (2) #ifdef ENABLE_RELEASE static inline void DEBUGP_dump_estat(struct estat *sts UNUSED) { } #else void DEBUGP_dump_estat(struct estat *sts); #endif inline static int ops__allowed_by_filter(struct estat *sts) { #ifdef ENABLE_DEBUG BUG_ON(!sts->do_filter_allows_done, "%s: do_filter_allows not done", sts->name); #endif return sts->do_filter_allows; } inline static int ops__calc_filter_bit(struct estat *sts) { sts->do_filter_allows_done=1; sts->do_filter_allows = opt__get_int(OPT__FILTER) == FILTER__ALL || /* or it's an interesting entry. */ (sts->entry_status & opt__get_int(OPT__FILTER)); return sts->do_filter_allows; } /** Correlating entries from two directories \a dir_a and \a dir_B. * @{ */ /** Callback function type for A-only and B-only elements. * The first parameter is a pointer to the current struct estat; the other * is the pointer to the pointer in the directory structure. * Not often needed ... could be done by var_args. */ typedef int (*ops__correlate_fn1_t)(struct estat *, struct estat **); typedef int (*ops__correlate_fn2_t)(struct estat *, struct estat *); /** The function to go through the lists. */ int ops__correlate_dirs(struct estat *dir_A, struct estat *dir_B, ops__correlate_fn1_t only_A, ops__correlate_fn2_t both, ops__correlate_fn1_t only_B, ops__correlate_fn2_t for_every); /** @} */ /** Startstrings for links in the repository. */ extern const char link_spec[], cdev_spec[], bdev_spec[]; #define ops__mark_childchanged(start, field) \ do { \ register struct estat *_s=(start); \ while (_s && !(_s->field & FS_CHILD_CHANGED)) \ { \ _s->field |= FS_CHILD_CHANGED; \ _s=_s->parent; \ } \ } while (0) #define ops__mark_parent_cc(changed_entry, field) \ ops__mark_childchanged(changed_entry->parent, field) #define ops__mark_changed_parentcc(changed_entry, field) \ do { \ changed_entry->field |= FS_CHANGED; \ ops__mark_parent_cc(changed_entry, field); \ } while (0) /** Do we want this entry written in the entry list? */ static inline int ops__should_entry_be_written_in_list(struct estat *sts) { if (sts->to_be_ignored) return 0; if (sts->flags & RF_DONT_WRITE) return 0; return 1; } static inline int ops__has_children(struct estat *sts) { return S_ISDIR(sts->st.mode) && sts->entry_count; } #endif fsvs-1.2.6/src/tools/0000755000202400020240000000000012554717233013407 5ustar marekmarekfsvs-1.2.6/src/tools/man-repair.pl0000755000202400020240000000162211100577403015770 0ustar marekmarek#!/usr/bin/perl # # This has gone much too far. # Doxygen just writes garbage as man page. # But what else should I use? # $output=shift || die "output?"; $new_title=shift; ($section) = ($output =~ m/(\d)$/); open(STDOUT, "> $output") || die "write $output: $!"; $done=0; $had_title=0; while () { # Change title and section. $done=s{^ (\.TH \s+) "([^"]+)" \s+ \d \s+ }{ $1 . '"' . ($new_title || $2) . '"' . " $section " }ex unless $done; # Title again - it's merged with the first headline. s/^(.*\S)\s*- (\.SH ".*")/($new_title || $1) . "\n" . $2/e; # Doxygen generates wrong lines before headlines. if ($_ eq "\\fC \\fP\n") { $x=; # Only print this string if next line is no header. print $_ if $x !~ m/^\.S[HS]/; $_=$x; } # \fC may not have a ' directly afterwards. s#^\\fC'#\\fC '#; print($_) || die $!; } close(STDOUT) || die $!; exit !$done; fsvs-1.2.6/src/tools/fsvs-chrooter.c0000644000202400020240000001744711147210573016364 0ustar marekmarek/************************************************************************ * Copyright (C) 2007 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * * A chroot environment for fsvs. * * Please see the \ref howto_chroot documentation for details. */ /** \defgroup howto_chroot HOWTO: About running fsvs on older systems * \ingroup howto * * This document explains how the chroot-wrapper for FSVS works, and how * it has to be configured. * * * \section chrooter_why Why do I need this? * * You possibly want to use FSVS on some older system, but don't want to * build everything needed there - apr, subversion, neon, ... * * * \section chrooter_not How it doesn't work * * An easy workaround would be using a chroot-environment - but then you * don't have access to the data you'd like versioned. * * Another way is to use \c LD_LIBRARY_PATH - but that doesn't work (at * least for me) for the later-loaded libraries, like \c libnss_dns and so * on. * * Even using the \c rpath parameter for linking doesn't quite work - all * dynamically loaded things, like locale data, timezones, message tables, * and so on are taken from the current root on - and may not match the * needed versions. * * * \section chrooter_how How it does work * * A small helper program allows to copy FSVS (with needed libraries) * from a current system (like debian unstable) to any * (architecturally matching) other distribution, without worrying about * library incompatibilities. * * This works by calling this wrapper program; it goes into a \c chroot * jail and calls FSVS with additional data; FSVS then tries to load * all needed libraries (see \ref hlp__chrooter), and goes out of the jail * to resume operations from the default enviroment. * * * \section chrooter_old On the old system * * On your \e old system you use an additional parameter for * configure: * \code * ./configure --with-chroot=/usr/local/fsvs-chroot * make * \endcode * * This builds only \c tools/fsvs-chrooter -- this you put into \c \c * /usr/local/bin or whereever you like. It should be in a directory listed * in \c PATH! * * * \section chrooter_current What to do on the current (updated) machine * * You take FSVS and copy that with all needed libraries into * some new directory structure on your old system; eg. \c * /usr/local/fsvs-chroot. * Don't forget to copy the later-loaded libraries and data files - ldd * fsvs won't give you the whole list! You can get a good list to * start (on the current machine) with * \code * strace -e open -o /tmp/list fsvs remote-status * \endcode * as that opens a repository connection. Not everything from this list is * needed; generally only files matching *.so.*, and \c * locale-archive. * * Please create the whole structure (as far as needed) as it is - ie. * \code * /usr/local/fsvs-chroot/ * lib/ * libc.so.6 * ld-linux.so.2 * ... * usr/ * lib/ * libnss_dns.so * ... * local/ * bin/ * fsvs * \endcode * * Why? First, it's easier for you to update later, and second the dynamic * linker knows where to look. * * \note You'll also see some additional files in the \c strace output - * such things as \c /etc/hosts, \c /etc/resolv.conf, \c /etc/nsswitch.conf * and so on. These tell the network libraries how to resolve names via * DNS, and similar data. \n They should normally be identical to the file * on the \b target machine; to keep them the same, it might be a good idea * to have them copied into the chroot jail from time to time. \note A * binding mount would be better still - but as \c /etc/ld.so.cache should * be taken from the newer machine, you'd have to do every single file. * \note It should be possible to simply have \b no \c ld.so.cache file; * then the dynamic linker would have to search the directories by himself. * * * \section chrooter_usage How is this used, then? * * FSVS-chrooter can be called just like fsvs - it relays all parameters * into the jailed binary. * * Although it might be better to set the environment variables for \c * fsvs-chrooter in a shell script named FSVS - then the other programs * won't have to put up with the long library list. \n The \ref chrooter_sh * "prepare script" below generates such a file. * * * \section chrooter_sh Prepare script * * If you look into \c tools/, you'll find a script named \c * prepare-chroot.pl. This is what I use to create the \c chroot jail on my * debian unstable machine. * * \note Most of the libraries listed in the environment variable could be * removed, as they're referenced in the fsvs binary. Only the few that are * \b not automatically loaded have to be in the list. * * * \section chrooter_rest Some thoughts and technical details * * \note Why does FSVS-chrooter set two directory variables? \n * We need the old \c / to set the correct root directory back; and the * current working directory has to be restored, too.\n * If we did a chroot(current working directory), we'd see a * completly different directory structure than all the other filesystem * tools (except for the case cwd = "/", of course). * * \note Maybe give the chrooter setuid and drop priviledges after * returning from chroot() jail? Not sure about security implications, * seems to be unsafe. Does anybody know how to do that in a safe * manner? * * \note If your \e old system is a \b really old system, with a kernel * before 2.4.17 or something like that, you \b might get problems with the * threading libraries - \c libpthread.so. \n * Search for \c LD_ASSUME_KERNEL to read a bit about the implications. \n \n * Information about how to proceed there is wanted. * * If this doesn't work for you, because some function which would load * additional datafiles isn't called, try the \c strace trick. * Patches are welcome! * * * Ideas, suggestions, feedback please to the mailing lists. * */ #include #include #include #include #include #include #include "../config.h" #include "../interface.h" #define STOP(...) \ do { \ fprintf(stderr, __VA_ARGS__); \ fprintf(stderr, "\n%s (%d)\n" \ "fsvs-chrooter (licensed under the GPLv3), (C) by Ph. Marek;" \ " version " FSVS_VERSION "\n", \ strerror(errno), errno); \ exit(errno); \ } while (0) void open_keep_set(char *fn, char *env) { char stg[10]; int hdl; int flags; int status; hdl=open(fn, O_RDONLY); if (hdl<0) STOP("Cannot open directory %s", fn); flags=fcntl(hdl, F_GETFD); if ( flags == -1 ) STOP("Cannot get fd flags"); flags &= ~FD_CLOEXEC; status=fcntl(hdl, F_SETFD, flags); if ( flags == -1 ) STOP("Cannot set fd flags"); sprintf(stg,"%d",hdl); setenv(env, stg, 1); } int main(int argc, char *args[]) { errno=0; if ( getenv(CHROOTER_LIBS_ENV) == NULL) STOP("Please specify in %s which libraries should be preloaded.", CHROOTER_LIBS_ENV); open_keep_set("/", CHROOTER_ROOT_ENV); open_keep_set(".", CHROOTER_CWD_ENV); if (chroot(CHROOTER_JAIL)==-1) STOP("Cannot do chroot(%s)", CHROOTER_JAIL); if (chdir("/") == -1) STOP("Cannot do chdir(/) call"); execvp("fsvs",args); STOP("Executing fsvs in the chroot jail failed"); return 0; } fsvs-1.2.6/src/tools/prepare-chroot.pl0000755000202400020240000001123211071203660016663 0ustar marekmarek#!/usr/bin/perl # set fileencoding=utf-8 ########################################################################## # Copyright (C) 2007 Philipp Marek. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. ########################################################################## #### # # A script to prepare a chroot environment for the fsvs-chrooter helper # program. # The list of libraries for *my* system is at the end. # #### $destdir=shift() || die "Where should I put the libraries?\n"; if (!-d $destdir) { print "The destination directory\n", " $destdir\n", "does not exist. Shall I create it (Y/N) ?\n"; $answer=; die "No valid destination.\n" if ($answer !~ m/^y/i); # we put the fsvs binary in /bin. system qw(mkdir -p),$destdir . "/bin"; die "mkdir $destdir/bin failed.\n" if $? || !-d $destdir; } # We simply use rsync, which creates all needed directories. # But either we would copy only the links, or only their data ... both is # not pretty. # So we send a list to copy, and copy symlinks too. open(RSYNC, "| rsync -aH --files-from=- / '$destdir/' ") # --stats") || die "cannot start rsync: $!"; while () { chomp; next if m#^\s*$#; # Maybe not the whole list would be necessary, as a single library might # bring in all others ... but who wants to parse the tree of dependencies? # Could be a later version. $needed_libs{$1}++ if m#/(lib[^/]+\.so\.\d+[^/]*)$#; $pre=""; while (1) { print $pre,$_; print RSYNC $_,"\n"; last if !-l; $pre=" -> "; $n=readlink($_); if ($n =~ m#^/#) { $_=$n; } else { s*/[^/]+$*/$n*; } } print "\n"; } close(RSYNC) || die "Copying failed? $!"; print "Library copying finished.\n\n\n"; $binary=""; for $bin (grep(m#.# && -e $_, `which fsvs 2> /dev/null`, "./fsvs")) { system "cp", "-a", $bin, "$destdir/bin/"; print "Copying $bin failed: $?",next if $?; print "$bin was successfully copied into the destination directory.\n\n"; $binary=$bin; last; } print "Your fsvs binary could not be found. Please copy it into your\n", "chroot jail, eg. into $destdir/bin/.\n\n", "As a consequence library name elimination could not be fully done -\n", "your list of libraries to be imported is longer than necessary.\n\n" if !$binary; %libs_deps=(); $cur_bin=$binary; while (keys %needed_libs) { # Get dependencies for (`ldd "$cur_bin" 2>/dev/null`) { next unless m#=> .*/(lib[^/\s]+\.so\.[^/\s]+)#; $libs_deps{$1}{$cur_bin}++; # This library is required by another, so we need not preload it # explicitly. # print "$1 needed by $cur_bin\n"; delete $needed_libs{$1}; } last; } # Choose upper-most libraries. $libnames = join(" ", sort keys %needed_libs); $name="/tmp/fsvs-chrooter.ex.sh-$$"; open(T, "> $name") || die "open($name): $!"; print T "#!/bin/sh # fsvs-chrooter call script. # Autogenerated on " . localtime() . ' export FSVS_CHROOT_LIBS="' . $libnames . '" exec fsvs-chrooter "$@" '; close T; chmod 0755, $name; print "A shell script named\n", "\t$name\n", "has been written; you probably want to put that, along with ", "fsvs-chrooter\n", "(on the *other* machine!!) in a directory in your PATH.\n\n"; exit; __DATA__ /etc/ld.so.cache /lib/ld-linux.so.2 /lib/libcom_err.so.2 /lib/libcrypt.so.1 /lib/libc.so.6 /lib/libdl.so.2 /lib/libkeyutils.so.1 /lib/libm.so.6 /lib/libnsl.so.1 /lib/libnss_compat.so.2 /lib/libnss_dns.so.2 /lib/libnss_files.so.2 /lib/libpthread.so.0 /lib/libresolv.so.2 /lib/librt.so.1 /lib/libuuid.so.1 /usr/lib/gconv/gconv-modules.cache /usr/lib/i686/cmov/libcrypto.so.0.9.8 /usr/lib/i686/cmov/libssl.so.0.9.8 /usr/lib/libapr-1.so.0 /usr/lib/libaprutil-1.so.0 /usr/lib/libdb-4.4.so /usr/lib/libexpat.so.1 /usr/lib/libgcrypt.so.11 /usr/lib/libgdbm.so.3 /usr/lib/libgnutls.so.13 /usr/lib/libgpg-error.so.0 /usr/lib/libgssapi_krb5.so.2 /usr/lib/libk5crypto.so.3 /usr/lib/libkrb5.so.3 /usr/lib/libkrb5support.so.0 /usr/lib/liblber.so.2 /usr/lib/libldap_r.so.2 /usr/lib/libm.so /usr/lib/libneon.so.26 /usr/lib/libpcre.so.3 /usr/lib/libpq.so.5 /usr/lib/libsasl2.so.2 /usr/lib/libsqlite3.so.0 /usr/lib/libsvn_client-1.so.1 /usr/lib/libsvn_delta-1.so.1 /usr/lib/libsvn_diff-1.so.1 /usr/lib/libsvn_fs-1.so.1 /usr/lib/libsvn_fs_base-1.so.1 /usr/lib/libsvn_fs_fs-1.so.1 /usr/lib/libsvn_ra-1.so.1 /usr/lib/libsvn_ra_dav-1.so.1 /usr/lib/libsvn_ra_local-1.so.1 /usr/lib/libsvn_ra_svn-1.so.1 /usr/lib/libsvn_repos-1.so.1 /usr/lib/libsvn_subr-1.so.1 /usr/lib/libsvn_wc-1.so.1 /usr/lib/libtasn1.so.3 /usr/lib/libxml2.so.2 /usr/lib/libz.so.1 /usr/lib/locale/locale-archive /usr/share/locale/de/LC_MESSAGES/libc.mo /usr/share/locale/locale.alias fsvs-1.2.6/src/est_ops.c0000644000202400020240000013365612467104255014102 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include #include #include #include #include #include "global.h" #include "status.h" #include "cache.h" #include "actions.h" #include "est_ops.h" #include "ignore.h" #include "direnum.h" #include "warnings.h" #include "helper.h" #include "checksum.h" #include "url.h" /** \file * Handling of single struct \a estat s. * * */ /** Single-linked list for storing the freed entries. * The struct \a free_estat get written above the struct \a estat it replaces. * */ struct free_estat { /** Next free block(s) */ struct free_estat *next; /** Number of "struct estat"s that can be stored here. */ int count; }; /** Formats for writing entries in the \a dir files. * mode ctime mtime repo_flags dev_descr MD5_should * size repos_version url# dev# inode# parent_line# entry_count * uid gid name\\0\\n * Directories have an \c x instead of MD5_*. */ const char ops__dir_info_format_p[]="%07llo %8x %8x %x %s %s " "%lld %ld %u %lx %lld %lld %u " "%u %u %s"; #define WAA_MAX_DIR_INFO_CHARS (11+1+8+1+8+1+8+1+APR_MD5_DIGESTSIZE*2+1 \ +18+1+9+1+9+1+16+1+18+1+18+1+9+1+ \ 9+1+9+1+NAME_MAX+1+1) /** -. * * It's a bit unaesthetical that devices use a " " for the repository data, * but a ":" in the waa as delimiter. * But "link " is specified in subversion, and having the repository data * different would not be better. * So we just allow both at parsing, and use the "right" for each target. */ const char link_spec[]="link ", cdev_spec[]="cdev", bdev_spec[]="bdev"; static struct free_estat *free_list = NULL; /** -. * * \c info, if not \c NULL, gets the pointer to the first character after * the parsed text: For a symlink it is returned as the path it points to, * devices are fully decoded and should a pointer to \c \\0. */ int ops__string_to_dev(struct estat *sts, char *data, char **info) { int maj, min; int ft, mode, len; char delimiter; int status; status=0; if (0 == strncmp(data, link_spec, strlen(link_spec))) { mode=S_IFLNK; if (info) *info=data+5; } else { if (0 == strncmp(data, cdev_spec, strlen(cdev_spec))) { data+=strlen(cdev_spec); mode=S_IFCHR; } else if (0 == strncmp(data, bdev_spec, strlen(bdev_spec))) { data+=strlen(bdev_spec); mode=S_IFBLK; } else mode=0; ft=sscanf(data, "%c0x%X:0x%X%n", &delimiter, &maj, &min, &len); STOPIF_CODE_ERR(mode == 0 || ft != 3 || (delimiter != ':' && delimiter != ' '), EINVAL, "'%s' is not parseable as a special description", data); if (info) *info=data+len; #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else sts->st.rdev=MKDEV(maj, min); #endif } sts->st.mode= (sts->st.mode & ~S_IFMT) | mode; sts->local_mode_packed = MODE_T_to_PACKED(mode); ex: return status; } /** -. * The subversion header string for special nodes is prepended. * * The returned pointer in \a *erg must not be free()d. */ int ops__link_to_string(struct estat *sts, char *filename, char **erg) { static struct cache_t *cache=NULL; char *cp; int l, status, hlen; STOPIF( cch__new_cache(&cache, 4), NULL); status=0; BUG_ON(!S_ISLNK(sts->st.mode)); if (!filename) STOPIF( ops__build_path(&filename, sts), NULL); hlen=strlen(link_spec); l=sts->st.size + hlen + 1 + 8; STOPIF( cch__add(cache, 0, NULL, l, &cp), NULL); strcpy(cp, link_spec); STOPIF_CODE_ERR( readlink(filename, cp+hlen, sts->st.size) == -1, errno, "can't read link %s", filename); cp[hlen+sts->st.size]=0; *erg=cp; ex: return status; } char *ops___dev_to_string(struct estat *sts, char delimiter) { static char buffer[64]; /* I'm not fully sure about that. */ BUG_ON(!(sts->remote_status & FS_NEW) && !(S_ISBLK(sts->st.mode) || S_ISCHR(sts->st.mode)), "%s: mode is 0%o", sts->name, sts->st.mode); #ifdef DEVICE_NODES_DISABLED DEVICE_NODES_DISABLED(); #else sprintf(buffer, "%s%c0x%x:0x%x", S_ISBLK(sts->st.mode) ? bdev_spec : cdev_spec, delimiter, (int)MAJOR(sts->st.rdev), (int)MINOR(sts->st.rdev)); #endif return buffer; } /** -. * */ char *ops__dev_to_waa_string(struct estat *sts) { return ops___dev_to_string(sts, ':'); } /** -. * */ char *ops__dev_to_filedata(struct estat *sts) { return ops___dev_to_string(sts, ' '); } /** -. * Returns the change mask as a binary OR of the various \c FS_* constants, * see \ref fs_bits. */ int ops__stat_to_action(struct estat *sts, struct sstat_t *new) { struct sstat_t *old; int ft_old, ft_new; int file_status; old=&(sts->st); /* The exact comparison here would be * old->_mtime != new->_mtime || * old->_ctime != new->_ctime ? FS_META_MTIME : 0; * but that doesn't work, as most filesystems don't have * nanoseconds stored. Furthermore we get only usec in the repository * (due to svn_time_to_string), so the nsec make no sense here. * We compare only the "coarse", but common, granularity of seconds. * VFAT can store only even seconds! * * The problem gets a bit more complicated as the linux kernel keeps * nsec in the dentry (cached inode), but as soon as the inode has to be * read from disk it has possibly only seconds! * * There's a long thread on dev@subversion.tigris.org about the * granularity of timestamps - auto detecting vs. setting, etc. */ file_status = old->mtim.tv_sec != new->mtim.tv_sec ? FS_META_MTIME : 0; /* We don't show a changed ctime as "t" any more. On commit nothing * would change in the repository, and it looks a bit silly. * A changed ctime is now only used as an indicator for changes. */ if (old->uid != new->uid) file_status |= FS_META_OWNER; if (old->gid != new->gid) file_status |= FS_META_GROUP; if (old->mode != new->mode) file_status |= FS_META_UMODE; /* both of same type ? */ ft_old = old->mode & S_IFMT; ft_new = new->mode & S_IFMT; if (ft_old != ft_new) { file_status |= FS_REPLACED; goto ex; } /* same type - compare */ BUG_ON(sts->to_be_ignored); switch (ft_new) { case S_IFBLK: case S_IFCHR: DEBUGP("old=%llu new=%llu", (t_ull)old->rdev, (t_ull)new->rdev); file_status |= (old->rdev == new->rdev) ? FS_NO_CHANGE : FS_REPLACED; break; case S_IFLNK: case S_IFREG: if (old->size != new->size) file_status |= FS_CHANGED; else /* The changed flag can be set or cleared by cs__compare_file(). * We don't set it until we *know* the entry has changed. */ /* If the entry is copied, the ctime *must* be different (unless * it's a hardlink); here we assume that it's not changed, if the * mtime is the same. */ if ((file_status & FS_META_MTIME) || (old->ctim.tv_sec != new->ctim.tv_sec && !(sts->flags & RF___IS_COPY)) ) file_status |= FS_LIKELY; break; case S_IFDIR: /* This entry *could* be changed. * But as the changed flag is set if a child entry is missing * or if new entries are found, but never cleared, we don't set * it here. */ if ( (file_status & FS_META_MTIME) || old->ctim.tv_sec != new->ctim.tv_sec ) file_status |= FS_LIKELY; break; default: BUG_ON(1); // case FT_IGNORE: file_status=FS_NO_CHANGE; } ex: DEBUGP("change: types 0%o vs 0%o; 0x%x=%s", ft_old, ft_new, file_status, st__status_string_fromint(file_status)); return file_status; } /** -. * * The \a filename still points into the buffer (\c mmap()ed area) and must * be copied. * * \a mem_pos is advanced, and points \b after the \c \\0. If a \c \\n is * seen immediately afterwards, it is skipped, too. * * \a parent_i gets set to the stored value; the translation to a \c parent * pointer must be done in the caller. * * \c EOF cannot be reliable detected here; but we are guaranteed a * \\0\\n at the end of the string, to have a filename * termination. */ int ops__load_1entry(char **mem_pos, struct estat *sts, char **filename, ino_t *parent_i) { char *buffer, *before; int status; ino_t parent_inode; unsigned internal_number, e_t; status=0; buffer=*mem_pos; /* Now parse. Use temporary variables of defined size for some inputs. */ /* The parsing is in a block, followed by the checks, so that the icache * is happy. */ /* Hexadecimal allows more characters than octal, so we have to check * whether something was done here. * Below we can try some conversions (for the same base) one after * another, and only check the last - if a bad character is in the * string, all of the last calls will fail, so we only need to check the * last conversion in each per-base block. * We have to skip the initial whitespace; if there's eg " X" in the * string, the space would be skipped, and the pointers different. */ before=hlp__skip_ws(buffer); sts->st.mode = strtoul(before, &buffer, 8); sts->old_rev_mode_packed = sts->new_rev_mode_packed = sts->local_mode_packed = MODE_T_to_PACKED(sts->st.mode); if (before == buffer) goto inval; /* And we know that we write a space; without this a later test would * fail, and give some misleading error. */ if (*(buffer++) != ' ') goto inval; /* Base 16. */ sts->st.ctim.tv_sec= strtoul(buffer, &buffer, 16); sts->st.mtim.tv_sec= strtoul(buffer, &buffer, 16); before=hlp__skip_ws(buffer); sts->flags= strtoul(before, &buffer, 16); if (before == buffer) goto inval; if (*(buffer++) != ' ') goto inval; /* Devices have major:minor stored */ buffer=hlp__skip_ws(buffer); if (S_ISBLK(sts->st.mode) || S_ISCHR(sts->st.mode)) STOPIF( ops__string_to_dev(sts, buffer, &buffer), NULL); else buffer=hlp__get_word(buffer, NULL); /* All entries but directories have MD5 */ buffer=hlp__skip_ws(buffer); if (!S_ISDIR(sts->st.mode)) STOPIF( cs__char2md5( buffer, &buffer, sts->md5), "Parsing the md5 failed"); else buffer=hlp__get_word(buffer, NULL); /* Base 10. */ sts->st.size = strtoul(buffer, &buffer, 10); sts->old_rev = sts->repos_rev = strtoul(buffer, &buffer, 10); internal_number = strtoul(buffer, &buffer, 10); sts->st.dev = strtoul(buffer, &buffer, 16); sts->st.ino = strtoul(buffer, &buffer, 10); parent_inode= strtoul(buffer, &buffer, 10); e_t = strtoul(buffer, &buffer, 10); sts->st.uid = strtoul(buffer, &buffer, 10); before=hlp__skip_ws(buffer); sts->st.gid = strtoul(before, &buffer, 10); if (before == buffer) goto inval; /* We need to parse, but would overwrite the shared md5 member. */ if (S_ISDIR(sts->st.mode)) sts->entry_count = e_t; /* Only a directory may have children. We cannot test sts->entry_count, * as this is already overwritten by the MD5. */ BUG_ON(e_t && !S_ISDIR(sts->st.mode)); /* Skip over exactly one space - else we'd loose information about * filenames starting with whitespaces. */ if (*buffer != ' ') goto inval; *filename=buffer+1; /* Only the root entry has parent_inode==0; the others start counting * with 1. */ if (parent_inode) { /* There may be entries without an URL associated - eg. entries which * were just added, but not committed. */ if (internal_number) STOPIF( url__find_by_intnum(internal_number, &(sts->url)), NULL); } else { /* The root entry gets the highest priority url. * There may be no URLs defined! */ sts->url= urllist_count ? urllist[urllist_count-1] : NULL; } if (parent_i) *parent_i=parent_inode; /* Advance memory pointer past end of filename. * Skip \0 and \n. * If we didn't skip the \n, the next sscanf() should work, too; * but the caller would have a hard time figuring if we're already * finished. */ *mem_pos = *filename + strlen(*filename) + 1; if (**mem_pos == '\n') (*mem_pos)++; ex: return status; /* Out of line */ inval: STOPIF( EINVAL, "Error parsing entry line \"%s\";\n" "your entry list is corrupt, ask the users mailing list, please.", *mem_pos); /* gcc warns about reaching the end of a non-void function - which can't * really happen here, but to humor the compiler ... */ goto ex; } /** Returns the number of entries to write into the entry list. * Must be called with a directory entry. */ int ops___entries_to_write(struct estat *dir) { struct estat **list; int count; count=0; if (!dir->entry_count) return 0; list=dir->by_inode; while (*list) { if (ops__should_entry_be_written_in_list(*list)) count++; list++; } return count; } /** -. * The parameter \a parent_ino is a(n integer) reference to the parent * directory - the line number in which it was written. * The format is fixed (see \c ops__dir_info_format_p); the string includes * a \c \\0 for filename termination, and a \c \\n at the end. * * Any other characters that are allowed in a filename can be written - * even control characters like \c \\n, \c \\r, \c \\f and so on. * */ int ops__save_1entry(struct estat *sts, ino_t parent_ino, int filehandle) { int len; static char buffer[WAA_MAX_DIR_INFO_CHARS+2] = { // overrun detection for debugging [sizeof(buffer)-1]=0xff, [sizeof(buffer)-2]=0x0, }; int is_dir, is_dev, status; int intnum; #if 0 if (sts->parent) { /* For entries other than the root node: * If the entry was not added or copied, it has to have an URL. * * But we cannot test for that, as _build_list does exactly that - and * is needed by the tests. */ BUG_ON(!sts->url && !(sts->flags & (RF_COPY_SUB | RF_COPY_BASE | RF_ADD))); } #endif is_dir = S_ISDIR(sts->st.mode); is_dev = S_ISBLK(sts->st.mode) || S_ISCHR(sts->st.mode); if (sts->match_pattern) STOPIF( ops__apply_group(sts, NULL, NULL), NULL); if (sts->url) intnum=sts->url->internal_number; else { /* A non-root entry has no url. May happen with _build_list, when * there are no urls. */ if (sts->parent) DEBUGP("Non-root entry %s has no URL", sts->name); intnum=0; } len=sprintf(buffer, ops__dir_info_format_p, (t_ull)sts->st.mode, (int)sts->st.ctim.tv_sec, (int)sts->st.mtim.tv_sec, sts->flags & RF___SAVE_MASK, ( is_dev ? ops__dev_to_waa_string(sts) : "nd" ), ( is_dir ? "x" : cs__md5tohex_buffered(sts->md5) ), (t_ull)sts->st.size, sts->repos_rev == SET_REVNUM ? sts->url->current_rev : sts->repos_rev, intnum, (t_ul)sts->st.dev, (t_ull)sts->st.ino, (t_ull)parent_ino, /* We have to make sure that the entry count in the parent is * correct. */ is_dir ? ops___entries_to_write(sts) : 0, sts->st.uid, sts->st.gid, sts->name ); BUG_ON(len > sizeof(buffer)-2); len++; // include \0 buffer[len++]='\n'; // redundant (?) check BUG_ON(buffer[sizeof(buffer)-1]!=0xff || buffer[sizeof(buffer)-2]!=0x0); is_dir=write(filehandle, buffer, len); STOPIF_CODE_ERR(is_dir != len, errno, "write entry"); status=0; ex: return status; } /** -. * * If no \c PATH_SEPARATOR is found in the \a path, the \a path itself is * returned. */ char *ops__get_filename(char *path) { char *cp; cp=strrchr(path, PATH_SEPARATOR); return cp ? cp+1 : path; } /** Returns the "rest" of the path; a \c \\0 is written over the * path separator. * * So path="abc/def/ghi" becomes "abc\0def/ghi" and the * returned * pointer points to "def/ghi". * * If there's only a filename left (no \c / found), this returns \c NULL. * */ static inline const char *ops___split_fnpart(const char *path) { char *cp; cp=strchr(path, PATH_SEPARATOR); if (!cp) return NULL; /* Overwrite multiple path separators */ while (*cp == PATH_SEPARATOR) *(cp++)=0; /* If the path looks like "name////", there's no next_part, too. */ if (!*cp) return NULL; return cp; } /** The \e real recursive part of ops__build_path(). * * This function has a non-standard return parameter - it gives the number * of characters written, and 0 denotes an error. */ int ops__build_path2(char *path, int max, struct estat *sts) { int l,i; /* The path lenghts have just been fixed by ops__calc_path_len(), so we * can rely on that (minus the PATH_SEPARATOR). * If there's no parent, we're at ".". */ l=sts->parent ? sts->path_len - sts->parent->path_len - 1 : 1; if (l+1 > max) return 0; if (sts->parent) { i=ops__build_path2(path, max - (l+1), sts->parent); /* not enough space ? */ if (!i) return 0; } else { i=0; } memcpy(path+i, sts->name, l); path[i+l+0]=PATH_SEPARATOR; path[i+l+1]=0; return i+l+1; } /** -. * This function returns the number of characters needed. * We don't return success or failure; there should never be a problem, and * if we'd return 0 for success someone might put a \c STOPIF() in the * recursive call below, which would double the size of this function :-) * * We don't include the trailing \c \\0, as that would be counted on each * level. */ int ops__calc_path_len(struct estat *sts) { int plen; if (sts->parent) { if (!sts->parent->path_len) ops__calc_path_len(sts->parent); /* Include the path separator. */ plen=sts->parent->path_len+1; } else plen=0; sts->path_len = plen + strlen(sts->name); return sts->path_len; } /** -. * This function uses a rotating array of \c cache_entry_t. * This means that a few paths will be usable at once; * if some path has to be stored for a (possibly indefinite) time it should * be \c strdup()ed, or re-built upon need. * * A LRU eviction scheme is used - with last one marked. * * If some function modifies that memory, it should set the first * char to \c \\0, to signal that it's no longer valid for other users. * * \todo A further optimization would be to check if a parent is already * present, and append to that path. Similar for a neighbour entry. * * The \c cache_entry_t::id member is used as a pointer to the struct \ref * estat. * */ int ops__build_path(char **value, struct estat *sts) { static struct cache_t *cache=NULL; int status, i; unsigned needed_space; char *data; /* Please note that in struct \ref estat there's a bitfield, and its member * \ref cache_index must take the full range plus an additional "out of * range" value! */ STOPIF( cch__new_cache(&cache, 48), NULL); /* Look if it's cached. */ if (sts->cache_index>0 && sts->cache_index<=cache->max && cache->entries[sts->cache_index-1]->id == (cache_value_t)sts && cache->entries[sts->cache_index-1]->data[0]) { /* the found entry has index i; we'd like that to be the LRU. */ i=sts->cache_index-1; DEBUGP("%p found in cache index %d; lru %d", sts, i, cache->lru); cch__set_active(cache, i); goto ex; } if (!sts->path_len) ops__calc_path_len(sts); needed_space=sts->path_len+1; STOPIF( cch__add(cache, (cache_value_t)sts, NULL, needed_space, &data), NULL); /* Now we have an index, and enough space. */ status=ops__build_path2(data, needed_space, sts); if (status == 0) { /* Something happened with our path length counting - * it's really a bug. */ BUG("path len counting went wrong"); } data[status-1]=0; sts->cache_index=cache->lru+1; status=0; ex: // DEBUGP("status=%d; path=%s", status, cache->entries[cache->lru]->data); if (!status) *value=cache->entries[cache->lru]->data; return status; } /** -. * The directory gets by_name removed; by_inode is extended and sorted. * \note If this gets called multiple times for the same directory, * depending on the accesses in-between it might be possible to do the * sorting only once. */ int ops__new_entries(struct estat *dir, int count, struct estat **new_entries) { int status; status=0; /* By name is no longer valid. */ IF_FREE(dir->by_name); /* Now insert the newly found entries in the dir list. */ STOPIF( hlp__realloc( &dir->by_inode, (dir->entry_count+count+1) * sizeof(dir->by_inode[0])), NULL); memcpy(dir->by_inode+dir->entry_count, new_entries, count*sizeof(dir->by_inode[0])); dir->entry_count += count; dir->by_inode[dir->entry_count]=NULL; /* Re-sort the index next time it's needed. */ dir->to_be_sorted=1; ex: return status; } /** -. * * This function doesn't return \c ENOENT, if no entry is found; \a *sts * will just be \c NULL. * */ int ops__find_entry_byname(struct estat *dir, char *name, struct estat **sts, int ignored_too) { int status; struct estat **sts_p; char *filename; status=0; BUG_ON(!S_ISDIR(dir->st.mode)); if (!dir->by_name) STOPIF(dir__sortbyname(dir), NULL); /* Strip the path, leave the file name */ filename=ops__get_filename(name); /* find entry, binary search. */ sts_p=bsearch(filename, dir->by_name, dir->entry_count, sizeof(dir->by_name[0]), (comparison_fn_t)dir___f_sort_by_nameCS); if (sts_p) DEBUGP("found %s on %p; ignored: 0x%x", name, sts_p, (*sts_p)->to_be_ignored); /* don't return removed entries, if they're not wanted */ *sts=sts_p && (!(*sts_p)->to_be_ignored || ignored_too) ? *sts_p : NULL; if (!*sts) DEBUGP("Searching for %s (%s) found no entry (ignored_too=%d)", filename, name, ignored_too); ex: return status; } #if 0 // Currently unused /** -. * */ int ops__find_entry_byinode(struct estat *dir, dev_t dev, ino_t inode, struct estat **sts) { int status; struct estat **sts_p; struct estat sts_cmp; status=0; BUG_ON(!S_ISDIR(dir->st.mode)); if (!dir->by_inode) STOPIF(dir__sortbyinode(dir), NULL); sts_cmp.st.dev=dev; sts_cmp.st.ino=inode; /* find entry, binary search. */ sts_p=bsearch(&sts_cmp, dir->by_inode, dir->entry_count, sizeof(dir->by_inode[0]), (comparison_fn_t)dir___f_sort_by_inode); *sts=sts_p && ((*sts_p)->entry_status != FT_IGNORE) ? *sts_p : NULL; ex: return status; } #endif /** Inline function to abstract a move. */ static inline void ops___move_array(struct estat **array, int index, int len) { /* From A B C D E F i H J K l NULL * to A B C D E F H J K l NULL */ memmove( array+index, array+index+1, /* +1 below is for the NULL */ (len-index-1+1) * sizeof(*array) ); } /** -. * The returned area is zeroed. */ int ops__allocate(int needed, struct estat **where, int *count) { struct free_estat *free_p; int status, remain; int returned; status=0; BUG_ON(needed <=0, "not even a single block needed?"); DEBUGP("need %d blocks, freelist=%p", needed, free_list); if (free_list) { free_p=free_list; VALGRIND_MAKE_MEM_DEFINED(free_p, sizeof(*free_p)); if (free_p->count <= needed) { /* Whole free block is used up */ free_list=free_p->next; returned=free_p->count; *where=(struct estat*)free_p; } else { /* Only part of this block is needed. * We return the "higher" part in memory, so that the free list * is not changed. * * Needed: 3 * Free block: [0 1 2 3 4 5] size=6 * Returned: ^ * size=3 after*/ returned=needed; remain=free_p->count-needed; *where=((struct estat*)free_p) + remain; free_p->count=remain; DEBUGP("splitting block; %d remain", remain); } VALGRIND_MAKE_MEM_DEFINED(*where, sizeof(**where)*returned); /* Clear the memory. Not needed for calloc(). */ memset(*where, 0, sizeof(**where) * returned); } else { DEBUGP("no free list, allocating"); /* No more free entries in free list. Allocate. */ returned=needed; /* Allocate at least a certain block size. */ if (needed < 8192/sizeof(**where)) needed=8192/sizeof(**where); STOPIF( hlp__calloc( where, needed, sizeof(**where)), NULL); if (needed > returned) { free_list=(struct free_estat*)(*where+returned); free_list->next=NULL; free_list->count=needed-returned; } } DEBUGP("giving %d blocks at %p", returned, *where); BUG_ON(!returned, "Not even a single block returned!!"); if (count) *count=returned; /* The memory is cleared; cache_index == 0 means uninitialized, which is * exactly what we want. */ VALGRIND_MAKE_MEM_DEFINED(*where, sizeof(**where) * returned); ex: return status; } /** -. * The pointer to the entry is set to \c NULL, to avoid re-using. */ int ops__free_entry(struct estat **sts_p) { int i, status; struct estat *sts=*sts_p; struct free_estat *free_p, *free_p2; struct free_estat **prev; struct free_estat *block; status=0; if (sts->old) STOPIF( ops__free_entry(& sts->old), NULL); if (S_ISDIR(sts->st.mode)) { BUG_ON(sts->entry_count && !sts->by_inode); for(i=0; ientry_count; i++) STOPIF( ops__free_entry(sts->by_inode+i), NULL); IF_FREE(sts->by_inode); IF_FREE(sts->by_name); IF_FREE(sts->strings); sts->st.mode=0; } /* Clearing the memory here serves no real purpose; * the free list written here overwrites parts. * So we clear on allocate. */ /* TODO: insert into free list (pointer, element count) with merging. * That requires finding a free block just below or just above * the current sts, and check if the current and the free can be merged. * Currently the list is just prepended. * * TODO: The list should be sorted in some way. * Possibly by address and size in two trees, to quickly find * the largest free block or the nearest block. */ /* The freed block stores the list data to other blocks. */ DEBUGP("freeing block %p", *sts_p); block=(struct free_estat*)*sts_p; /* Can we merge? */ free_p=free_list; prev=&free_list; while (free_p) { VALGRIND_MAKE_MEM_DEFINED(free_p, sizeof(*free_p)); if ((char*)block + sizeof(struct estat) == (char*)free_p) { /* Copy data */ block->count = free_p->count+1; block->next = free_p->next; if (prev != &free_list) VALGRIND_MAKE_MEM_DEFINED(prev, sizeof(*prev)); *prev = block; if (prev != &free_list) VALGRIND_MAKE_MEM_NOACCESS(prev, sizeof(*prev)); break; } if ((char*)block == (char*)free_p+sizeof(struct estat)*free_p->count) { free_p->count++; break; } prev=&free_p->next; free_p2=free_p; free_p=*prev; VALGRIND_MAKE_MEM_NOACCESS(free_p2, sizeof(*free_p2)); } if (free_p) { DEBUGP("merged to %p; now size %d", block, free_p->count); VALGRIND_MAKE_MEM_NOACCESS(free_p, sizeof(*free_p)); } else { block->next=free_list; block->count=1; free_list=block; DEBUGP("new entry in free list"); } VALGRIND_MAKE_MEM_NOACCESS( block, sizeof(struct estat)); *sts_p=NULL; ex: return status; } /** -. * Only one the 3 specifications my be given; the other 2 values must be a * \c NULL resp. \a UNKNOWN_INDEX. * * If the entry is given via \a sts, but is not found, \c ENOENT is * returned. * * If an invalid index is given, we mark a \a BUG(). * * \todo Use a binary search in the \c by_inode and \c by_name arrays. */ int ops__delete_entry(struct estat *dir, struct estat *sts, int index_byinode, int index_byname) { int i; int status; BUG_ON( (sts ? 1 : 0) + (index_byinode >=0 ? 1 : 0) + (index_byname >=0 ? 1 : 0) != 1, "must have exactly 1 definition!!!"); BUG_ON(!S_ISDIR(dir->st.mode), "can remove only from directory"); if (!sts) { if (index_byinode != UNKNOWN_INDEX) { BUG_ON(index_byinode > dir->entry_count, "i > c"); sts=dir->by_inode[index_byinode]; } else { BUG_ON(index_byname > dir->entry_count, "i > c"); sts=dir->by_name[index_byname]; } } i=0; if (dir->by_inode) { if (index_byinode == UNKNOWN_INDEX) { /* Maybe use ops__find_entry_byinode ? Would be faster for large * arrays - but the bsearch wouldn't return an index, only a pointer. * */ for(index_byinode=dir->entry_count-1; index_byinode>=0; index_byinode--) if (dir->by_inode[index_byinode] == sts) break; BUG_ON(index_byinode == UNKNOWN_INDEX); } ops___move_array(dir->by_inode, index_byinode, dir->entry_count); i=1; } if (dir->by_name) { if (index_byname == UNKNOWN_INDEX) { /* Maybe use ops__find_entry_byname? Would do a binary search, but * using string compares. */ for(index_byname=dir->entry_count-1; index_byname>=0; index_byname--) if (dir->by_name[index_byname] == sts) break; BUG_ON(index_byname == UNKNOWN_INDEX); } ops___move_array(dir->by_name, index_byname, dir->entry_count); i=1; } STOPIF( ops__free_entry(&sts), NULL); ex: DEBUGP("entry count was %d; flag to remove is %d", dir->entry_count, i); if (i) dir->entry_count--; return i ? 0 : ENOENT; } /** -. * An entry is marked by having estat::to_be_ignored set; and such entries * are removed here. * * If \a fast_mode is set, the entries are get removed from the list are * not free()d, nor do the pointer arrays get resized. */ int ops__free_marked(struct estat *dir, int fast_mode) { struct estat **src, **dst; int i, new_count; int status; BUG_ON(!S_ISDIR(dir->st.mode)); status=0; IF_FREE(dir->by_name); src=dst=dir->by_inode; new_count=0; for(i=0; ientry_count; i++) { if (!(*src)->to_be_ignored) { *dst=*src; dst++; new_count++; } else { if (!fast_mode) STOPIF( ops__free_entry(src), NULL); } src++; } if (new_count != dir->entry_count) { if (!fast_mode) { /* resize by_inode - should never give NULL. */ STOPIF( hlp__realloc( &dir->by_inode, sizeof(*(dir->by_inode)) * (new_count+1) ), NULL); } dir->by_inode[new_count]=NULL; dir->entry_count=new_count; } ex: return status; } /** -. * Does not modify path. * * The \a flags parameter tells about the policy regarding tree walking. * * For \a add, \a unversion we need to create the given path with the * specified flags; in \a add it should exist, for unversion is needs not. * For \a diff / \a info we only walk the tree without creating or checking * for current status (info, repos/repos diff for removed files). * For \a prop_set / \a prop_get / \a prop_list we need an existing path, * which might be not versioned currently. * For \a revert we need to look in the tree, and find removed entries, * too. * In \a waa__partial_update() (status check with given subtrees) we create * the paths as necessary. If they do not exist we'd like to print them as * removed. * * So we need to know: * - Create paths or walk only (\a OPS__CREATE) * - Has the given path to exist? (\a OPS__FAIL_NOT_LIST) * - Should we update this entry, or all below? (OPS__ON_UPD_LIST) * - Which flags the newly created entries should get (in \a sts_flags) * */ int ops__traverse(struct estat *current, char *fullpath, int flags, int sts_flags, struct estat **ret) { int status; char *next_part; struct estat *sts; int quit; char *copy, *path; status=0; STOPIF( hlp__strdup( ©, fullpath), NULL); path=copy; quit=0; while (path) { next_part=(char*)ops___split_fnpart(path); BUG_ON(!path[0]); /* Check special cases. */ if (path[0] == '.' && path[1] == '\0') { /* This happens for the start of a wc-relative path: ./dir/file */ path=next_part; continue; } if (path[0] == '.' && path[1] == '.' && path[2] == '\0') { /* This shouldn't happen; the paths being worked on here should be * normalized. */ BUG("Path '%s' includes '..'!", fullpath); } /* Look in this directory for the wanted entry. * If there's an ignored entry, we'll take that, too. */ STOPIF( ops__find_entry_byname(current, path, &sts, 1), NULL); if (!sts) { /* If we may not create it, print the optional warning, and possibly * return an error. * Print no error message, as the caller may want to catch this. */ if (!(flags & OPS__CREATE)) { if (flags & OPS__FAIL_NOT_LIST) STOPIF_CODE_ERR( 1, ENOENT, "!The entry '%s' was not found.", fullpath); status=ENOENT; goto ex; } /* None found, make a new. */ STOPIF( ops__allocate(1, &sts, NULL), NULL); STOPIF( hlp__strdup( &sts->name, path), NULL); if (flags & OPS__ON_UPD_LIST) STOPIF( waa__insert_entry_block(sts, 1), NULL); /* Fake a directory node. */ sts->st.mode=S_IFDIR | 0700; sts->st.size=0; sts->entry_count=0; sts->parent=current; /* Add that directory with the next commit. */ sts->flags=sts_flags | RF_ISNEW; STOPIF( ops__new_entries(current, 1, &sts), NULL); } current=sts; path=next_part; } *ret=current; ex: IF_FREE(copy); return status; } /** -. * * The parent directory should already be done, so that removal of whole * trees is done without doing unneeded \c lstat()s. * * Depending on \c o_chcheck a file might be checked for changes by a MD5 * comparision. * * Per default \c only_check_status is not set, and the data from \c * lstat() is written into \a sts. Some functions need the \b old values * and can set this flag; then only \c entry_status is modified. * * If \a output is not NULL, then it is overwritten, and \a sts->st is not * changed - independent of \c only_check_status. In case of a removed * entry \a *output is not changed. */ int ops__update_single_entry(struct estat *sts, struct sstat_t *output) { int status; struct sstat_t st; int i; char *fullpath; STOPIF( ops__build_path(&fullpath, sts), NULL); /* If we see that the parent has been removed, there's no need * to check this entry - the path will surely be invalid. */ if (sts->parent) if (sts->parent->entry_status & FS_REMOVED) { goto removed; } /* Check for current status */ status=hlp__lstat(fullpath, &st); if (status) { DEBUGP("lstat whines %d", status); /* only valid error is ENOENT - then this entry has been removed */ /* If we did STOPIF_CODE_ERR(status != ENOENT ...), then status * would be overwritten with the value of the comparison. */ if (abs(status) != ENOENT) STOPIF(status, "cannot lstat(%s)", fullpath); removed: /* Re-set the values, if needed */ if (st.mode) memset(&st, 0, sizeof(st)); sts->entry_status=FS_REMOVED; /* Only ENOENT gets here, and that's ok. */ status=0; } else { /* Entry exists. Check for changes. */ sts->entry_status=ops__stat_to_action(sts, &st); /* May we print a '?' ? */ if ( ((opt__get_int(OPT__CHANGECHECK) & CHCHECK_FILE) && (sts->entry_status & FS_LIKELY)) || (opt__get_int(OPT__CHANGECHECK) & CHCHECK_ALLFILES) ) { /* If the type changed (symlink => file etc.) there's no 'likely' - * the entry *was* changed. * So if we get here, we can check either type - st or sts->st. */ if (S_ISREG(st.mode) || S_ISLNK(st.mode)) { /* make sure, one way or another */ STOPIF( cs__compare_file(sts, fullpath, &i), NULL); if (i>0) sts->entry_status= (sts->entry_status & ~ FS_LIKELY) | FS_CHANGED; else if (i==0) sts->entry_status= sts->entry_status & ~(FS_LIKELY | FS_CHANGED); } /* Directories will be checked later, on finishing their children; * devices have already been checked, and other types are not * allowed. */ } } /* Now we've compared we take the new values. * Better for display, needed for commit (current values) */ /* Before an update (and some other operations) we only set * sts->entry_status - to keep the old values intact. */ if (output) *output=st; else if (action->overwrite_sts_st) sts->st=st; DEBUGP("known %s: action=%X, flags=%X, mode=0%o, status=%d", fullpath, sts->entry_status, sts->flags, sts->st.mode, status); sts->local_mode_packed = MODE_T_to_PACKED(st.mode); ex: return status; } /** Set the estat::do_* bits, depending on the parent. * Should not be called for the root. * */ inline void ops___set_todo_bits(struct estat *sts) { /* For recursive operation: If we should do the parent completely, we do * the sub-entries, too. */ if (opt_recursive>0) sts->do_userselected |= sts->parent->do_userselected; /* For semi-recursive operation: Do the child, if the parent was * wanted. */ if (opt_recursive>=0) sts->do_this_entry |= sts->parent->do_userselected | sts->do_userselected; } /** -. * Should not be called for the root. */ void ops__set_todo_bits(struct estat *sts) { /* We don't know any better yet. */ sts->do_filter_allows=1; sts->do_filter_allows_done=1; if (sts->parent) ops___set_todo_bits(sts); DEBUGP("user_sel,this=%d.%d parent=%d.%d", sts->do_userselected, sts->do_this_entry, sts->parent ? sts->parent->do_userselected : 0, sts->parent ? sts->parent->do_this_entry : 0); return; } /** -. * * Calls \c ops__set_to_handle_bits() and maybe \c * ops__update_single_entry(), and depending on the filter settings \c * sts->do_this_entry might be cleared. * */ int ops__update_filter_set_bits(struct estat *sts) { int status; struct sstat_t stat; status=0; if (sts->parent) ops__set_todo_bits(sts); if (sts->do_this_entry) { STOPIF( ops__update_single_entry(sts, &stat), NULL); if (ops__calc_filter_bit(sts)) { /* We'd have an invalid value if the entry is removed. */ if ((sts->entry_status & FS_REPLACED) != FS_REMOVED) if (action->overwrite_sts_st) sts->st = stat; } } DEBUGP("filter says %d", sts->do_filter_allows); ex: return status; } /** -. * * We have to preserve the \c parent pointer and the \c name of \a dest. * */ void ops__copy_single_entry(struct estat *src, struct estat *dest) { dest->st=src->st; dest->repos_rev=SVN_INVALID_REVNUM; /* parent is kept */ /* name is kept */ /* But, it being a non-committed entry, it has no URL yet. */ dest->url=NULL; if (S_ISDIR(dest->st.mode)) { #if 0 /* Currently unused. */ dest->by_inode=NULL; dest->by_name=NULL; dest->entry_count=0; dest->strings=NULL; dest->other_revs=0; dest->to_be_sorted=0; #endif } else { memcpy(dest->md5, src->md5, sizeof(dest->md5)); #if 0 { memset(dest->md5, 0, sizeof(dest->md5)); /* Currently unused. */ dest->change_flag=CF_NOTCHANGED; dest->decoder=src->decoder; dest->has_orig_md5=src->has_orig_md5; } #endif } #if 0 /* The temporary area is mostly void, but to be on the safe side ... */ dest->child_index=0; dest->dir_pool=NULL; #endif dest->flags=RF_ISNEW | RF_COPY_SUB; /* Gets recalculated on next using */ dest->path_len=0; /* The entry is not marked as to-be-ignored ... that would change the * entry type, and we have to save it anyway. */ dest->entry_status=FS_NEW; dest->remote_status=FS_NEW; dest->cache_index=0; dest->decoder_is_correct=src->decoder_is_correct; dest->was_output=0; dest->do_userselected = dest->do_child_wanted = dest->do_this_entry = 0; dest->arg=NULL; } /** -. * \a only_A, \a both, and \a only_B are called, then \a for_every (if not * \c NULL). * * This builds and loops throught the sts::by_name lists, so modifying them * must be done carefully, to change only the elements already processed. * * Returning an error from any function stops the loop. * */ int ops__correlate_dirs(struct estat *dir_A, struct estat *dir_B, ops__correlate_fn1_t only_A, ops__correlate_fn2_t both, ops__correlate_fn1_t only_B, ops__correlate_fn2_t for_every) { int status, comp; struct estat **list_A, **list_B; status=0; DEBUGP("correlating %s and %s", dir_A->name, dir_B->name); /* We compare the sorted list of entries. */ STOPIF( dir__sortbyname(dir_A), NULL); STOPIF( dir__sortbyname(dir_B), NULL); list_A=dir_A->by_name; list_B=dir_B->by_name; while (*list_A) { if (!*list_B) goto a_only; comp=dir___f_sort_by_name( list_A, list_B ); DEBUGP("comp %s, %s => %d", (*list_A)->name, (*list_B)->name, comp); if (comp == 0) { /* Identical names */ if (both) STOPIF( both(*list_A, *list_B), NULL); if (for_every) STOPIF( for_every(*list_A, *list_B), NULL); list_A++; list_B++; } else if (comp > 0) { /* *list_B > *list_A; entry is additional in list_B. */ if (only_B) STOPIF( only_B(*list_B, list_B), NULL); if (for_every) STOPIF( for_every(NULL, *list_B), NULL); list_B++; } else { a_only: /* *list_A < *list_B; so this entry does not exist in dir_B. */ if (only_A) STOPIF( only_A(*list_A, list_A), NULL); if (for_every) STOPIF( for_every(*list_A, NULL), NULL); list_A++; } } /* Do remaining list_B entries, if necessary. */ if (only_B || for_every) { while (*list_B) { if (only_B) STOPIF( only_B(*list_B, list_B), NULL); if (for_every) STOPIF( for_every(NULL, *list_B), NULL); list_B++; } } ex: return status; } /** -. * The specified stream gets rewound, read up to \a max bytes (sane default * for 0), and returned (zero-terminated) in \a *buffer allocated in \a * pool. * * The real length can be seen via \a real_len. * * If \a filename is given, the file is removed. * * If \a pool is \c NULL, the space is \c malloc()ed and must be \c free()d * by the caller. * */ /* mmap() might be a bit faster; but for securities' sake we put a \0 at * the end, which might not be possible with a readonly mapping (although * it should be, by using MAP_PRIVATE - but that isn't available with * apr_mmap_create(), at least with 1.2.12). */ int ops__read_special_entry(apr_file_t *a_stream, char **data, int max, ssize_t *real_len, char *filename, apr_pool_t *pool) { int status; apr_off_t special_len, bof; apr_size_t len_read; char *special_data; status=0; special_len=0; /* Remove temporary file. Can be done here because we still have the * handle open. */ if (filename) STOPIF_CODE_ERR( unlink(filename) == -1, errno, "Cannot remove temporary file \"%s\"", filename); /* Get length */ STOPIF( apr_file_seek(a_stream, APR_CUR, &special_len), NULL); /* Some arbitrary limit ... */ if (!max) max=8192; STOPIF_CODE_ERR( special_len > max, E2BIG, "!The special entry \"%s\" is too long (%llu bytes, max %llu).\n" "Please contact the dev@ mailing list.", filename, (t_ull)special_len, (t_ull)max); /* Rewind */ bof=0; STOPIF( apr_file_seek(a_stream, APR_SET, &bof), NULL); if (pool) /* Aborts if no memory available. */ special_data= apr_palloc( pool, special_len+1); else STOPIF( hlp__alloc( &special_data, special_len+1), NULL); /* Read data. */ len_read=special_len; STOPIF( apr_file_read( a_stream, special_data, &len_read), NULL); STOPIF_CODE_ERR( len_read != special_len, APR_EOF, "Reading was cut off at byte %llu of %llu", (t_ull)len_read, (t_ull)special_len); special_data[len_read]=0; DEBUGP("got special value %s", special_data); if (real_len) *real_len=special_len; *data=special_data; ex: return status; } /** -. * */ int ops__are_children_interesting(struct estat *dir) { struct estat tmp; tmp.parent=dir; tmp.do_this_entry = tmp.do_userselected = tmp.do_child_wanted = 0; ops___set_todo_bits(&tmp); return tmp.do_this_entry; } /** -. * * This means applying the target URL, and storing the auto-properties. * * Optionally the property database can be returned in \a props. * */ int ops__apply_group(struct estat *sts, hash_t *props, apr_pool_t *pool) { int status; struct grouping_t *group; int own_pool; status=0; own_pool=0; if (props) *props=NULL; if (!sts->match_pattern) goto return_prop; group= sts->match_pattern->group_def; BUG_ON(!group); DEBUGP("applying %s to %s", sts->match_pattern->group_name, sts->name); if (group->auto_props) { if (!pool) { own_pool=1; STOPIF( apr_pool_create_ex(&pool, global_pool, NULL, NULL), NULL); } STOPIF( prp__set_from_aprhash(sts, group->auto_props, STORE_IN_FS, props, pool), NULL); sts->flags |= RF_PUSHPROPS; } if (!sts->url) sts->url=group->url; sts->to_be_ignored=group->is_ignore; sts->match_pattern=NULL; return_prop: if (props && !*props) STOPIF( prp__open_byestat( sts, GDBM_WRCREAT | HASH_REMEMBER_FILENAME, props), NULL); ex: if (own_pool) apr_pool_destroy(pool); return status; } /** -. */ int ops__make_shadow_entry(struct estat *sts, int flags) { int status; struct estat *copy; BUG_ON(sts->old); STOPIF( ops__allocate(1, ©, NULL), NULL); memcpy(copy, sts, sizeof(*copy)); if (flags == SHADOWED_BY_REMOTE) { sts->remote_status=FS_REPLACED; copy->remote_status=FS_REMOVED; } else if (flags == SHADOWED_BY_LOCAL) { sts->remote_status=FS_REMOVED; copy->remote_status=FS_REPLACED; } /* The by_inode and by_name arrays of the parent might point to the old * location; rather than searching and changing them, we simply copy the * old data, and clean the references in sts. */ copy->cache_index=0; sts->old=copy; ex: return status; } #ifndef ENABLE_RELEASE /** -. * We don't want that in a release build; but debug (and default) uses * \c DEBUGP(), so we might need this. */ void DEBUGP_dump_estat(struct estat *sts) { char *path; /* We don't want to return failures (like ENOMEM), because then this * function would have to be wrapped in STOPIF(), which would look ugly * compared to the other DEBUGP() calls. * So if we can't get the path we just use some part of the filename. */ /* We could also use * char path[sts->path_len+2]; * ops__build_path2(...) * which cannot return an ENOMEM, but would always recompute the path * (and not use the cache). */ if (ops__build_path(&path, sts)) DEBUGP("*** Dump of ... %s/%s", sts->parent ? sts->parent->name : "/", sts->name); else DEBUGP("*** Dump of %s", path); DEBUGP("flags=%s", st__flags_string_fromint(sts->flags)); DEBUGP("entry_status=%s", st__status_string_fromint(sts->entry_status)); DEBUGP("remote_status=%s", st__status_string_fromint(sts->remote_status)); DEBUGP("types: st=%s, local=%s, new=%s, old=%s", st__type_string(sts->st.mode), st__type_string(PACKED_to_MODE_T(sts->local_mode_packed)), st__type_string(PACKED_to_MODE_T(sts->new_rev_mode_packed)), st__type_string(PACKED_to_MODE_T(sts->old_rev_mode_packed))); if (S_ISDIR(sts->st.mode)) DEBUGP("directory: %d children", sts->entry_count); else DEBUGP("non-dir: decoder=%s, md5=%s", sts->decoder, cs__md5tohex_buffered(sts->md5)); /* Devices are not seen that often; so we accept a bogus size output. */ DEBUGP("mode=0%o size=%llu uid=%u gid=%u inode=%llu", sts->st.mode & 07777, (t_ull)sts->st.size, sts->st.uid, sts->st.gid, (t_ull)sts->st.ino); DEBUGP("others:%s%s%s%s%s%s%s%s%s", sts->old ? " old" : "", sts->was_output ? " was_output" : "", sts->decoder_is_correct ? " decoder_ok" : "", sts->do_userselected ? " do_usersel" : "", sts->do_child_wanted ? " do_chld_w" : "", sts->do_this_entry ? " do_this" : "", sts->do_filter_allows ? " filter_allows" : "", sts->do_filter_allows_done ? " filter_allows_done" : "", sts->to_be_ignored ? " ignored" : ""); } #endif fsvs-1.2.6/src/cp_mv.h0000644000202400020240000000153511026634740013522 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __CP_MV_H #define __CP_MV_H #include "actions.h" /** \file * \ref cp and \ref mv actions header file. * */ /** For defining copyfrom relations. */ work_t cm__work; /** For automatically finding relations. */ work_t cm__detect; /** For removing copyfrom relations. */ work_t cm__uncopy; /** Returns the source of the given entry. */ int cm__get_source(struct estat *sts, char *name, char **src_name, svn_revnum_t *src_rev, int register_for_cleanup); #endif fsvs-1.2.6/src/props.h0000644000202400020240000000713312467104255013564 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __PROPS_H__ #define __PROPS_H__ /** \file * Property handling header file - \ref prop-set, \ref prop-get, \ref * prop-list. */ #include #include "global.h" #include "hash_ops.h" /** \name Opening a property file. * * The flags for this operations are defined in \c GDBM(3gdbm): * - GDBM_WRCREAT * - GDBM_READER * - GDBM_WRITER * - GDBM_NEWDB * */ /** @{ */ /** Open a property file, by WC-path. */ int prp__open_byname(char *wcfile, int gdbm_mode, hash_t *db); /** Open a property file, by struct estat. */ int prp__open_byestat(struct estat *sts, int gdbm_mode, hash_t *db); /** @} */ /** Set a property by name and data/datalen. */ int prp__set(hash_t db, const char *name, const char *data, int datalen); /** Set a property by svn_string_t. */ int prp__set_svnstr(hash_t db, const char *name, const svn_string_t *utf8_value); /** Bitmasks for prp__set_from_aprhash() operation. */ enum prp__set_from_aprhash_e { DEFAULT=0, STORE_IN_FS=1, ONLY_KEEP_USERDEF=2, }; /** Writes the given set of properties of \a sts into its \ref prop file. * */ int prp__set_from_aprhash(struct estat *sts, apr_hash_t *props, enum prp__set_from_aprhash_e flags, hash_t *props_db, apr_pool_t *pool); /** Wrapper functions, if we need to have some compatibility layer. */ /** @{ */ /** Open a database, path specified through \a wcfile and \a name. */ int prp__db_open_byname(char *wcfile, int flags, char *name, hash_t *db); /** Get a value, addressed by a string; key length is calculated inclusive * the \c \\0. */ int prp__get(hash_t db, const char *keycp, datum *value); /** Store the value; basic function. */ int prp__store(hash_t db, datum key, datum value); /** Get first key. */ static inline int prp__first(hash_t db, datum *key) { int status; status=hsh__first(db, key); #ifdef ENABLE_DEBUG if (!status) BUG_ON(key->dptr[key->dsize-1] != 0, "Not terminated!"); #endif return status; } /** Get next key. */ static inline int prp__next(hash_t db, datum *key, const datum *oldkey) { int status; status=hsh__next(db, key, oldkey); #ifdef ENABLE_DEBUG if (!status) BUG_ON(key->dptr[key->dsize-1] != 0, "Not terminated!"); #endif return status; } /** Fetch a value. */ static inline int prp__fetch(hash_t db, datum key, datum *value) { int status; if (!db) return ENOENT; status=hsh__fetch(db, key, value); #ifdef ENABLE_DEBUG if (!status) BUG_ON(value->dptr[value->dsize-1] != 0, "Not terminated!"); #endif DEBUGP("read property %s=%s", key.dptr, value->dptr); return status; } /** Open, fetch, close a property hash corresponding to \a sts and \a name. * */ int prp__open_get_close(struct estat *sts, char *name, char **data, int *len); int prp__unlink_db_for_estat(struct estat *sts); /** @} */ /** Prop-get worker function. */ work_t prp__g_work; /** Prop-set worker function. */ work_t prp__s_work; /** Prop-list worker function. */ work_t prp__l_work; /** Value string for to-be-removed properties. */ extern const char prp___to_be_removed_value[]; /** Test function for to-be-removed properties. */ static inline int prp__prop_will_be_removed(datum data) { return strcmp(data.dptr, prp___to_be_removed_value) == 0; } int prp__sts_has_no_properties(struct estat *sts, int *result); #endif fsvs-1.2.6/src/interface.h0000644000202400020240000001136611320631774014362 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __INTERFACE_H__ #define __INTERFACE_H__ /** \file * Interface to the outside. * */ /** \defgroup interface The interface to the outside world. * \ingroup compat * * Here the interfaces to the outside are defined - * environment variables and similar. */ /** @{ */ /** If this variable has a numeric value other than 0, the debuglevel is * set even before commandline parsing. */ #define FSVS_DEBUG_ENV "FSVS_DEBUGLEVEL" /** The diff program to use. * * It's arguments are similar to * \code * diff -u file1 --label1 file2 --label2 * \endcode * If you use another program, expect these parameters. * * An exit status of 1 is ignored; the meaning "file has changed" * is assumed. * */ #define DIFF_ENV "FSVS_DIFF" /** @} */ /** The default WAA path. */ #define DEFAULT_WAA_PATH "/var/spool/fsvs" /** The default CONF path. */ #define DEFAULT_CONF_PATH "/etc/fsvs" /** The default subversion config directory (eg for authentication data), * relative to $FSVS_CONF. */ #define DEFAULT_CONFIGDIR_SUB "/svn" /** The directory below $CONF/$WC and $CONF for the grouping definitions. * */ #define CONFIGDIR_GROUP "groups" /** \name List of environment variables used for a chroot jail. * Note that these are not \c \#ifdef - marked, as we'd like to use * off-the-shelf binaries from newer distributions without modifications! * */ /** @{ */ /** The file descriptor number where FSVS can find the "original", * "normal" root directory. */ #define CHROOTER_ROOT_ENV "FSVS_CHROOT_ROOT" /** Which libraries should be preloaded? Space-separated list. */ #define CHROOTER_LIBS_ENV "FSVS_CHROOT_LIBS" /** The old working directory file descriptor */ #define CHROOTER_CWD_ENV "FSVS_CHROOT_CWD" /** @} */ /** \defgroup exp_env Exported environment variables * \ingroup interface * Programs started by FSVS, like \ref o_diff or in the \ref * FSVS_PROP_COMMIT_PIPE "fsvs:commit-pipe", get some environment variables * set, to help them achieve their purpose. * * */ /** @{ */ /** The (relative) path of the current entry. */ #define FSVS_EXP_CURR_ENTRY "FSVS_CURRENT_ENTRY" /** The configuration directory for the current working copy. */ #define FSVS_EXP_WC_CONF "FSVS_WC_CONF" /** The current working copy root directory. */ #define FSVS_EXP_WC_ROOT "FSVS_WC_ROOT" /** The revision we're updating or reverting to. */ #define FSVS_EXP_TARGET_REVISION "FSVS_TARGET_REVISION" /** \addtogroup exp_env * * Apart from these \c $FSVS_CONF and \c $FSVS_WAA are always set. * * Others might be useful, but I'm waiting for a specific user telling her needs before implementing them. * - Base URL, and/or URL for current entry \n * For multi-URL only the topmost? Or all? * - Other filenames for merge and diff? * - \c BASE, \c HEAD and other revisions * * Do you need something? Just ask me. * @} */ /** \name Manber-parameters * * These should be written to a property for big files, * so that they can be easily read before fetching the file. * We need the same values for fetching as were used on storing - * else we cannot do some rsync-like update. * * \note Currently they are used only for checking whether a * file has changed locally. Here they should be written into * the \a md5s file. */ /** @{ */ /** How many bits must be zero in the CRC to define that location * as a block border. * See checksum.c for details. * * 16 bits give blocks of 64kB (on average) ... * we use 17 for 128kB. */ #define CS__APPROX_BLOCKSIZE_BITS (17) /** The bit mask for comparing. */ #define CS__MANBER_BITMASK ((1 << CS__APPROX_BLOCKSIZE_BITS)-1) /** The modulus. Leave at 32bit. */ #define CS__MANBER_MODULUS (-1) /** The prime number used for generation of the hash. */ #define CS__MANBER_PRIME (31) /** The number of bytes for the block comparison. * Must be a power of 2 for performance reasons. */ #define CS__MANBER_BACKTRACK (2*1024) #if (CS__MANBER_BACKTRACK-1) & CS__MANBER_BACKTRACK #error CS__MANBER_BACKTRACK must be a power of 2! #endif /** The minimum filesize, at or above which files get * tested in blocks. * Makes sense to have block size as minimum, but is not needed. */ #define CS__MIN_FILE_SIZE (256*1024) /** @} */ #ifdef HAVE_LCHOWN #define CHOWN_FUNC lchown #define CHOWN_BOOL 1 #else #define CHOWN_FUNC chown #define CHOWN_BOOL 0 #endif #ifdef HAVE_LUTIMES #define UTIMES_FUNC lutimes #define UTIMES_BOOL 1 #else #define UTIMES_FUNC utimes #define UTIMES_BOOL 0 #endif #endif fsvs-1.2.6/src/actions.h0000644000202400020240000001217111135025671014052 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __ACTION_H__ #define __ACTION_H__ #include "global.h" /** \file * Action handling header file. */ /** \anchor callbacks \name callbacks Action callbacks. */ /** @{ */ /** Callback that gets called for each entry. * * Entries get read from the entry list in global [device, inode] order; in * the normal action callback (\ref actionlist_t::local_callback and \ref * actionlist_t::repos_feedback) the parent entries are handled \b after child * entries (but the parent \c struct \ref estat "estats" exist, of course), * so that the list of children is correct. * * * See also \ref waa__update_tree. * * The full (wc-based) path can be built as required by \ref * ops__build_path().*/ /* unused, wrong doc * As in the entry list file (\ref dir) there is a callback \ref * actionlist_t::early_entry that's done \b before the child entries; * Clearing \ref estat::do_this_entry and \ref estat::do_tree in this * callback will skip calling \ref actionlist_t::local_callback for this and * the child entries (see \ref ops__set_to_handle_bits()). */ typedef int (action_t)(struct estat *sts); /** Callback for initializing the action. */ typedef int (work_t)(struct estat *root, int argc, char *argv[]); /** One after all progress has been made. */ typedef int (action_uninit_t)(void); /** @} */ /** The action wrapper. */ action_t ac__dispatch; /** The always allowed action - printing general or specific help. */ work_t ac__Usage; /** For convenience: general help, and help for the current action. */ #define ac__Usage_dflt() do { ac__Usage(NULL, 0, NULL); } while (0) /** Print help for the current action. */ #define ac__Usage_this() do { ac__Usage(NULL, 1, (char**)action->name); } while (0) /** Definition of an \c action. */ struct actionlist_t { /** Array of names this action will be called on the command line. */ const char** name; /** The function doing the setup, tear down, and in-between - the * worker main function. * * See \ref callbacks. */ work_t *work; /** The output function for repository accesses. * Currently only used in cb__record_changes(). * * See \ref callbacks. */ action_t *repos_feedback; /** The local callback. * Called for each entry, just after it's been checked for changes. * Should give the user feedback about individual entries and what * happens with them. * * For directories this gets called when they're finished; so immediately * for empty directories, or after all children are loaded. * \note A removed directory is taken as empty (as no more elements are * here) - this is used in \ref revert so that revert gets called twice * (once for restoring the directory itself, and again after its * populated). * * See \ref callbacks. */ action_t *local_callback; /** The progress reporter needs a callback to clear the line after printing * the progress. */ action_uninit_t *local_uninit; /** A pointer to the verbose help text. */ char const *help_text; /** Flag for usage in the action handler itself. */ int i_val; /** Is this an import or export, ie do we need a WAA? * We don't cache properties, manber-hashes, etc., if is_import_export * is set. */ int is_import_export:1; /** This is set if it's a compare operation (remote-status). * The properties are parsed, but instead of writing them into the * \c struct \c estat they are compared, and \c entry_status set * accordingly. */ int is_compare:1; /** Whether we need fsvs:update-pipe cached. * Do we install files from the repository locally? Then we need to know * how to decode them. * We don't do that in every case, to avoid wasting memory. */ int needs_decoder:1; /** Whether the entries should be filtered on opt_filter. */ int only_opt_filter:1; /** Whether user properties should be stored in estat::user_prop while * running cb__record_changes(). */ int keep_user_prop:1; /** Makes ops__update_single_entry() keep the children of removed * directories. */ int keep_children:1; /** Says that we want the \c estat::st overwritten while looking for * local changes. */ int overwrite_sts_st:1; /** Whether waa__update_dir() may happen. * (It must not for updates, as we'd store local changes as "from * repository"). */ int do_update_dir:1; /** Says that this is a read-only operation (like "status"). */ int is_readonly:1; }; /** Find the action structure by name. * * Returns in \c * \a action_p the action matching (a prefix of) \a cmd. * */ int act__find_action_by_name(const char *cmd, struct actionlist_t **action_p); /** Array of all known actions. */ extern struct actionlist_t action_list[]; /** Gets set to the \e current action after commandline parsing. */ extern struct actionlist_t *action; /** How many actions we know. */ extern const int action_list_count; #endif fsvs-1.2.6/src/build.h0000644000202400020240000000156111100577124013507 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2008 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __BUILD_H__ #define __BUILD_H__ #include "actions.h" #include "global.h" /** \file * \ref _build_new_list action header file. * * This action is not normally used; as it throws away data from the WAA * it is dangerous if simply called without \b exactly knowing the * implications. * * The only use is for debugging - all other disaster recovery is better done * via \c sync-repos. * */ /** Build action. */ work_t bld__work; /** Delay action. */ work_t delay__work; #endif fsvs-1.2.6/src/cp_mv.c0000644000202400020240000013135012043532166013513 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include "global.h" #include "cp_mv.h" #include "status.h" #include "est_ops.h" #include "url.h" #include "checksum.h" #include "options.h" #include "props.h" #include "cache.h" #include "helper.h" #include "waa.h" /** \file * \ref cp and \ref mv actions. * * Various thoughts ... * - Can we construct relations between 2 new files? * We'd just have to write the MD5 of the new files into the hash, then * we'd find the first file on commit of the 2nd file ... and we see that * the other one is new, too. \n * But see \ref commit_2_revs "creating 2 revisions on commit". * * */ /** * \addtogroup cmds * * \anchor mv * \section cp * * \code * fsvs cp [-r rev] SRC DEST * fsvs cp dump * fsvs cp load * \endcode * * The \c copy command marks \c DEST as a copy of \c SRC at revision \c * rev, so that on the next commit of \c DEST the corresponding source path * is sent as copy source. * * The default value for \c rev is \c BASE, ie. the revision the \c SRC * (locally) is at. * * Please note that this command works \b always on a directory \b * structure - if you say to copy a directory, the \b whole structure is * marked as copy. That means that if some entries below the copy are * missing, they are reported as removed from the copy on the next commit. * \n (Of course it is possible to mark files as copied, too; non-recursive * copies are not possible, but can be emulated by having parts of the * destination tree removed.) * * \note TODO: There will be differences in the exact usage - \c copy will * try to run the \c cp command, whereas \c copied will just remember the * relation. * * If this command are used without parameters, the currently defined * relations are printed; please keep in mind that the \b key is the * destination name, ie. the 2nd line of each pair! * * The input format for \c load is newline-separated - first a \c SRC line, * followed by a \c DEST line, then an line with just a dot (".") * as delimiter. If you've got filenames with newlines or other special * characters, you have to give the paths as arguments. * * Internally the paths are stored relative to the working copy base * directory, and they're printed that way, too. * * Later definitions are \b appended to the internal database; to undo * mistakes, use the \ref uncopy action. * * \note Important: User-defined properties like \ref * FSVS_PROP_COMMIT_PIPE "fsvs:commit-pipe" are \b not copied to the * destinations, because of space/time issues (traversing through entire * subtrees, copying a lot of property-files) and because it's not sure * that this is really wanted. \b TODO: option for copying properties? * * \todo -0 like for xargs? * * \todo Are different revision numbers for \c load necessary? Should \c * dump print the source revision number? * * \todo Copying from URLs means update from there * * \note As subversion currently treats a rename as copy+delete, the \ref * mv command is an alias to \ref cp. * * If you have a need to give the filenames \c dump or \c load as first * parameter for copyfrom relations, give some path, too, as in * "./dump". * * \note The source is internally stored as URL with revision number, so * that operations like these \code * $ fsvs cp a b * $ rm a/1 * $ fsvs ci a * $ fsvs ci b * \endcode * work - FSVS sends the old (too recent!) revision number as source, and * so the local filelist stays consistent with the repository. \n But it is * not implemented (yet) to give an URL as copyfrom source directly - we'd * have to fetch a list of entries (and possibly the data!) from the * repository. * * \todo Filter for dump (patterns?). * */ /** * \addtogroup cmds * * \section cpfd copyfrom-detect * * \code * fsvs copyfrom-detect [paths...] * \endcode * * This command tells FSVS to look through the new entries, and see * whether it can find some that seem to be copied from others already * known. \n * It will output a list with source and destination path and why it could * match. * * This is just for information purposes and doesn't change any FSVS state, * (TODO: unless some option/parameter is set). * * The list format is on purpose incompatible with the \c load * syntax, as the best match normally has to be taken manually. * * \todo some parameter that just prints the "best" match, and outputs the * correct format. * * If \ref glob_opt_verb "verbose" is used, an additional value giving the * percentage of matching blocks, and the count of possibly copied entries * is printed. * * Example: * \code * $ fsvs copyfrom-list -v * newfile1 * md5:oldfileA * newfile2 * md5:oldfileB * md5:oldfileC * md5:oldfileD * newfile3 * inode:oldfileI * manber=82.6:oldfileF * manber=74.2:oldfileG * manber=53.3:oldfileH * ... * 3 copyfrom relations found. * \endcode * * The abbreviations are: * * *
\e md5 * The \b MD5 of the new file is identical to that of one or more already * committed files; there is no percentage. * *
\e inode * The \b device/inode number is identical to the given known entry; this * could mean that the old entry has been renamed or hardlinked. * \b Note: Not all filesystems have persistent inode numbers (eg. NFS) - * so depending on your filesystems this might not be a good indicator! * *
\e name * The entry has the same name as another entry. * *
\e manber * Analysing files of similar size shows some percentage of * (variable-sized) common blocks (ignoring the order of the * blocks). * *
\e dirlist * The new directory has similar files to the old directory.\n * The percentage is (number_of_common_entries)/(files_in_dir1 + * files_in_dir2 - number_of_common_entries). * *
* * \note \b manber matching is not implemented yet. * * \note If too many possible matches for an entry are found, not all are * printed; only an indicator ... is shown at the end. * * */ /** * \addtogroup cmds * * \section uncp * * \code * fsvs uncopy DEST [DEST ...] * \endcode * * The \c uncopy command removes a \c copyfrom mark from the destination * entry. This will make the entry unknown again, and reported as \c New on * the next invocations. * * Only the base of a copy can be un-copied; if a directory structure was * copied, and the given entry is just implicitly copied, this command will * return an error. * * This is not folded in \ref revert, because it's not clear whether \c * revert on copied, changed entries should restore the original copyfrom * data or remove the copy attribute; by using another command this is no * longer ambiguous. * * Example: * \code * $ fsvs copy SourceFile DestFile * # Whoops, was wrong! * $ fsvs uncopy DestFile * \endcode * */ /* Or should for dirlist just the raw data be showed - common_files, * files_in_new_dir? */ /* for internal use only, not visible * * \section cp_mv_data Storing and reading the needed data * * For copy/move detection we need fast access to other files with the same * or similar data. * - To find identical files we just take a GDBM database, indexed by the * MD5, and having a (\c \\0 separated) list of filenames. * Why GDBM? * - For partial commits we need to remove/use parts of the data; a * textfile would have to be completely re-written. * - We need the list of copies/moves identified by the user. * - This should be quickly editable * - No big blobs to stay after a commit (empty db structure?) * - Using an file/symlink in the WAA for the new entry seems bad. We'd * have to try to find such an file/symlink for each committed entry. * - But might be fast by just doing readlink()? = a single syscall * - The number of copy-entries is typically small. * - Easy to remove * - Uses inodes, though * - Easily readable - just points to source * - GDBM? * - easy to update and delete * - might uses some space * - a single entry * - not as easy to read * - Text-file is out, because of random access for partial commits. * - Maybe we should write the manber hash of the first two blocks there, * too? -- No, manber-hashes would be done on all files at once. * * * Facts: * - we have to copy directory structures *after* waa__input_tree(), but * before waa__partial_update(). * - While running waa__input_tree() the source tree might still be in * work * - While running waa__partial_update() the "old" data of the copied * entries might already be destroyed * - But here we don't know the new entries yet! * - We have to do *all* entries there * - As we do not yet know which part of the tree we'll be working with * - We must not save these temporary entries * - Either mark FT_IGNORE * - Or don't load the copies for actions calling waa__output_tree(). * - But what about the property functions? They need access to copied * entries. * - Can do the entries as RF_ADD, as now. * * So a "copy" does copy all entries for the list, too; but that means that * a bit more data has to be written out. * * */ /* As the temporary databases (just indizes for detection) are good only * for a single run, we can easily store the address directly. For the real * copy-from db we have to use the path. * * Temporary storage: * value.dptr=sts; * value.dsize=sizeof(sts); * Persistent: * value.dptr=path; * value.dsize=sts->path_len; * * I don't think we want to keep these files up-to-date ... would mean * constant runtime overhead. */ /** Maximum number of entries that are stored. * The -1 is for overflow detection \c "...". */ #define MAX_DUPL_ENTRIES (HASH__LIST_MAX -1) #if 0 /** Files smaller than this are not automatically bound to some ancestor; * their data is not unique enough. */ #define MIN_FILE_SIZE (32) #endif /** How many entries could be correlated. */ int copydetect_count; /** Structure for candidate retrieval. */ struct cm___candidate_t { /** Candidate entry */ struct estat *sts; /** Bitmask to tell which matching criteria apply. */ int matches_where; /** Count, or for percent display, a number from 0 to 1000. */ int match_count; }; /** Function and type declarations for entry-to-hash-key conversions. * @{ */ typedef datum (cm___to_datum_t)(const struct estat *sts); cm___to_datum_t cm___md5_datum; cm___to_datum_t cm___inode_datum; cm___to_datum_t cm___name_datum; /** @} */ /** Structure for storing simple ways of file matching. * * This is the predeclaration. */ struct cm___match_t; /** Enters the given entry into the database */ typedef int (cm___register_fn)(struct estat *, struct cm___match_t *); /** Queries the database for the given entry. * * Output is (the address of) an array of cm___candidates_t, and the number * of elements. */ typedef int (cm___get_list_fn)(struct estat *, struct cm___match_t *, struct cm___candidate_t **, int *count); /** Format function for verbose output. * Formats the candidate \a cand in match \a match into a buffer, and * returns this buffer. */ typedef char* (cm___format_fn)(struct cm___match_t * match, struct cm___candidate_t *cand); /** Inserts into hash tables. */ cm___register_fn cm___hash_register; /** Queries the database for the given entry. */ cm___get_list_fn cm___hash_list; /** Match directories by their children. */ cm___get_list_fn cm___match_children; /** Outputs percent of match. */ cm___format_fn cm___output_pct; /** -. */ struct cm___match_t { /** Name for this way of matching */ char name[8]; /** Which entry type is allowed? */ mode_t entry_type; /** Whether this can be avoided by an option. */ int is_expensive:1; /** Whether this match is allowed. */ int is_enabled:1; /** Callback function for inserting elements */ cm___register_fn *insert; /** Callback function for querying elements */ cm___get_list_fn *get_list; /** Callback function to format the amount of similarity. */ cm___format_fn *format; /** For simple, GDBM-based database matches */ /** How to get a key from an entry */ cm___to_datum_t *to_key; /** Filename for this database. */ char filename[8]; /** Database handle */ hash_t db; /** Last queried key. * Needed if a single get_list call isn't sufficient (TODO). */ datum key; }; /** Enumeration for (some) matching criteria */ enum cm___match_e { CM___NAME_F=0, CM___NAME_D, CM___DIRLIST, }; /** Array with ways for simple matches. * * We keep file and directory matching separated; a file cannot be the * copyfrom source of a directory, and vice-versa. * * The \e important match types are at the start, as they're directly * accessed, too. */ struct cm___match_t cm___match_array[]= { [CM___NAME_F] = { .name="name", .to_key=cm___name_datum, .insert=cm___hash_register, .get_list=cm___hash_list, .entry_type=S_IFREG, .filename=WAA__FILE_NAME_EXT}, [CM___NAME_D] = { .name="name", .to_key=cm___name_datum, .insert=cm___hash_register, .get_list=cm___hash_list, .entry_type=S_IFDIR, .filename=WAA__DIR_NAME_EXT}, [CM___DIRLIST] = { .name="dirlist", .get_list=cm___match_children, .format=cm___output_pct, .entry_type=S_IFDIR, }, { .name="md5", .to_key=cm___md5_datum, .is_expensive=1, .insert=cm___hash_register, .get_list=cm___hash_list, .entry_type=S_IFREG, .filename=WAA__FILE_MD5s_EXT}, { .name="inode", .to_key=cm___inode_datum, .insert=cm___hash_register, .get_list=cm___hash_list, .entry_type=S_IFDIR, .filename=WAA__FILE_INODE_EXT}, { .name="inode", .to_key=cm___inode_datum, .insert=cm___hash_register, .get_list=cm___hash_list, .entry_type=S_IFREG, .filename=WAA__DIR_INODE_EXT}, }; #define CM___MATCH_NUM (sizeof(cm___match_array)/sizeof(cm___match_array[0])) /** Gets a \a datum from a struct estat::md5. */ datum cm___md5_datum(const struct estat *sts) { datum d; d.dsize=APR_MD5_DIGESTSIZE*2+1; d.dptr=cs__md5tohex_buffered(sts->md5); return d; } /** Gets a \a datum from the name of an entry; the \\0 gets included (for * easier dumping). */ datum cm___name_datum(const struct estat *sts) { datum d; d.dptr=sts->name; /* We have only the full path length stored. */ d.dsize=strlen(d.dptr)+1; return d; } /** Gets a \a datum from the filesystem addressing - device and inode. */ datum cm___inode_datum(const struct estat *sts) { static struct { ino_t ino; dev_t dev; } tmp; datum d; tmp.ino=sts->st.ino; tmp.dev=sts->st.dev; d.dptr=(char*)&tmp; d.dsize=sizeof(tmp); return d; } /** Compare function for cm___candidate_t. */ static int cm___cand_compare(const void *_a, const void *_b) { const struct cm___candidate_t *a=_a; const struct cm___candidate_t *b=_b; return a->sts - b->sts; } /** Compare function for cm___candidate_t. */ static int cm___cand_comp_count(const void *_a, const void *_b) { const struct cm___candidate_t *a=_a; const struct cm___candidate_t *b=_b; return a->match_count - b->match_count; } /** -. */ int cm___hash_register(struct estat *sts, struct cm___match_t *match) { int status; status=hsh__insert_pointer( match->db, (match->to_key)(sts), sts); /* If there is no more space available ... just ignore it. */ if (status == EFBIG) status=0; return status; } static int common; int both(struct estat *a, struct estat *b) { common++; return 0; } /** -. * * The big question is - should this work recursively? Would mean that the * topmost directory would be descended, and the results had to be cached. * */ int cm___match_children(struct estat *sts, struct cm___match_t *match, struct cm___candidate_t **list, int *found) { int status; /* We take a fair bit more, to get *all* (or at least most) possible * matches. */ static struct cm___candidate_t similar_dirs[MAX_DUPL_ENTRIES*4]; struct cm___candidate_t *cur, tmp_cand={0}; size_t simil_dir_count; struct estat **children, *curr; struct estat **others, *other_dir; int other_count, i; datum key; struct cm___match_t *name_match; status=0; DEBUGP("child matching for %s", sts->name); /* No children => cannot be matched */ if (!sts->entry_count) goto ex; simil_dir_count=0; children=sts->by_inode; while (*children) { curr=*children; /* Find entries with the same name. Depending on the type of the entry * we have to look in one of the two hashes. */ if (S_ISDIR(curr->st.mode)) name_match=cm___match_array+CM___NAME_D; else if (S_ISREG(curr->st.mode)) name_match=cm___match_array+CM___NAME_F; else goto next_child; key=(name_match->to_key)(curr); status=hsh__list_get(name_match->db, key, &key, &others, &other_count); /* If there are too many entries with the same name, we ignore this * hint. */ if (status != ENOENT && other_count && other_countparent; cur=lsearch(&tmp_cand, similar_dirs, &simil_dir_count, sizeof(similar_dirs[0]), cm___cand_compare); cur->match_count++; DEBUGP("dir %s has count %d", cur->sts->name, cur->match_count); BUG_ON(simil_dir_count > sizeof(similar_dirs)/sizeof(similar_dirs[0])); } } next_child: children++; } /* Now do the comparisions. */ for(i=0; ientry_count + other_dir->entry_count - common); } /* Now sort, and return a few. */ qsort( similar_dirs, simil_dir_count, sizeof(similar_dirs[0]), cm___cand_comp_count); *found=simil_dir_count > HASH__LIST_MAX ? HASH__LIST_MAX : simil_dir_count; *list=similar_dirs; ex: return status; } /** -. * */ int cm___hash_list(struct estat *sts, struct cm___match_t *match, struct cm___candidate_t **output, int *found) { int status; static struct cm___candidate_t arr[MAX_DUPL_ENTRIES]; struct estat **list; int i; match->key=(match->to_key)(sts); status=hsh__list_get(match->db, match->key, &match->key, &list, found); if (status == 0) { for(i=0; i<*found; i++) { /** The other members are touched by upper layers, so we have to * re-initialize them. */ memset(arr+i, 0, sizeof(*arr)); arr[i].sts=list[i]; } *output=arr; } return status; } /** Puts cm___candidate_t::match_count formatted into \a buffer. */ char* cm___output_pct(struct cm___match_t *match, struct cm___candidate_t *cand) { static char buffer[8]; BUG_ON(cand->match_count > 1000 || cand->match_count < 0); sprintf(buffer, "=%d.%1d%%", cand->match_count/10, cand->match_count % 10); return buffer; } /** Inserts the given entry in all allowed matching databases. */ int cm___register_entry(struct estat *sts) { int status; int i; struct cm___match_t *match; status=0; if (!(sts->entry_status & FS_NEW)) { for(i=0; iis_enabled && match->insert && (sts->st.mode & S_IFMT) == match->entry_type ) { STOPIF( (match->insert)(sts, match), NULL); DEBUGP("inserted %s for %s", sts->name, match->name); } } } ex: return status; } /** Shows possible copyfrom sources for the given entry. * */ static int cm___match(struct estat *entry) { int status; char *path, *formatted; int i, count, have_match, j, overflows; struct estat *sts; struct cm___match_t *match; struct cm___candidate_t candidates[HASH__LIST_MAX*CM___MATCH_NUM]; struct cm___candidate_t *cur, *list; size_t candidate_count; FILE *output=stdout; /* #error doesn't work with sizeof() ? * But doesn't matter much, this gets removed by the compiler. */ BUG_ON(sizeof(candidates[0].matches_where) *4 < CM___MATCH_NUM, "Wrong datatype chosen for matches_where."); formatted=NULL; status=0; candidate_count=0; overflows=0; path=NULL; /* Down below status will get the value ENOENT from the hsh__list_get() * lookups; we change it back to 0 shortly before leaving. */ for(i=0; ist.mode & S_IFMT) != match->entry_type) continue; /* \todo Loop if too many for a single call. */ status=match->get_list(entry, match, &list, &count); /* ENOENT = nothing to see */ if (status == ENOENT) continue; STOPIF(status, NULL); if (count > MAX_DUPL_ENTRIES) { /* We show one less than we store, so we have the overflow * information. */ overflows++; count=MAX_DUPL_ENTRIES; } for(j=0; j sizeof(candidates)/sizeof(candidates[0])); cur->matches_where |= 1 << i; /* Copy dirlist value */ if (i == CM___DIRLIST) cur->match_count=list[j].match_count; DEBUGP("got %s for %s => 0x%X", cur->sts->name, match->name, cur->matches_where); } } status=0; if (candidate_count) { copydetect_count++; STOPIF( ops__build_path(&path, entry), NULL); STOPIF( hlp__format_path(entry, path, &formatted), NULL); /* Print header line for this file. */ STOPIF_CODE_EPIPE( fprintf(output, "%s\n", formatted), NULL); /* Output list of matches */ for(j=0; jname, output), NULL); if (opt__is_verbose()>0 && match->format) STOPIF_CODE_EPIPE( fputs( match->format(match, candidates+j), output), NULL); } } STOPIF( ops__build_path(&path, sts), NULL); STOPIF( hlp__format_path(sts, path, &formatted), NULL); STOPIF_CODE_EPIPE( fprintf(output, ":%s\n", formatted), NULL); } if (overflows) STOPIF_CODE_EPIPE( fputs(" ...\n", output), NULL); } else { /* cache might be overwritten again when we're here. */ STOPIF( ops__build_path(&path, entry), NULL); if (opt__is_verbose() > 0) { STOPIF( hlp__format_path(entry, path, &formatted), NULL); STOPIF_CODE_EPIPE( fprintf(output, "- No copyfrom relation found for %s\n", formatted), NULL); } else DEBUGP("No sources found for %s", path); } STOPIF_CODE_EPIPE( fflush(output), NULL); ex: return status; } int cm__find_dir_source(struct estat *dir) { int status; status=0; STOPIF( cm___match( dir ), NULL); ex: return status; } int cm__find_file_source(struct estat *file) { int status; char *path; status=0; STOPIF( ops__build_path(&path, file), NULL); DEBUGP("finding source of %s", file->name); STOPIF( cs__compare_file(file, path, NULL), NULL); /* TODO: EPIPE and similar for output */ STOPIF( cm___match( file ), NULL); ex: return status; } /** After loading known entries try to find some match for every new entry. * */ int cm__find_copied(struct estat *root) { int status; struct estat *sts, **child; status=0; child=root->by_inode; if (!child) goto ex; while (*child) { sts=*child; /* Should we try to associate the directory after all children have been * done? We could simply take a look which parent the children's sources * point to ... * * Otherwise, if there's some easy way to see the source of a directory, * we could maybe save some searching for all children.... */ if (sts->entry_status & FS_NEW) { switch (sts->st.mode & S_IFMT) { case S_IFDIR: STOPIF( cm__find_dir_source(sts), NULL); break; case S_IFLNK: case S_IFREG: STOPIF( cm__find_file_source(sts), NULL); break; default: DEBUGP("Don't handle entry %s", sts->name); } } if (S_ISDIR(sts->st.mode) && (sts->entry_status & (FS_CHILD_CHANGED | FS_CHANGED)) ) STOPIF( cm__find_copied(sts), NULL); child++; } ex: return status; } /** -. * */ int cm__detect(struct estat *root, int argc, char *argv[]) { int status, st2; char **normalized; int i; struct cm___match_t *match; hash_t hash; /* Operate recursively. */ opt_recursive++; /* But do not allow to get current MD5s - we need the data from the * repository. */ opt__set_int(OPT__CHANGECHECK, PRIO_MUSTHAVE, CHCHECK_NONE); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); /** \todo Do we really need to load the URLs here? They're needed for * associating the entries - but maybe we should do that two-way: * - just read \c intnum , and store it again * - or process to (struct url_t*). * * Well, reading the URLs doesn't cost that much ... */ STOPIF( url__load_list(NULL, 0), NULL); for(i=0; iis_enabled= !match->is_expensive || opt__get_int(OPT__COPYFROM_EXP); if (!match->filename[0]) continue; DEBUGP("open hash for %s as %s", match->name, match->filename); /* Create database file for WC root. */ STOPIF( hsh__new(wc_path, match->filename, HASH_TEMPORARY, & match->db), NULL); } /* We read all entries, and show some progress. */ status=waa__read_or_build_tree(root, argc, normalized, argv, cm___register_entry, 1); if (status == -ENOENT) STOPIF(status, "!No committed working copy found."); STOPIF(status, NULL); copydetect_count=0; STOPIF( cm__find_copied(root), NULL); if (!copydetect_count) STOPIF_CODE_EPIPE( printf("No copyfrom relations found.\n"), NULL); else if (opt__is_verbose() > 0) STOPIF_CODE_EPIPE( printf("%d copyfrom relation%s found.\n", copydetect_count, copydetect_count == 1 ? "" : "s"), NULL); ex: for(i=0; i= buflen); *string=buffer; ex: return status; } /** Returns the absolute path * */ int cm___absolute_path(char *path, char **output) { static struct cache_t *cache; int status, len; char *cp; STOPIF( cch__new_cache(&cache, 8), NULL); STOPIF( cch__add(cache, 0, NULL, // wc_path_len + 1 + strlen(path) + 1, &cp), NULL); start_path_len + 1 + strlen(path) + 1, &cp), NULL); DEBUGP("norm from: %s", path); hlp__pathcopy(cp, &len, path, NULL); DEBUGP("norm to: %s", cp); BUG_ON(len > cache->entries[cache->lru]->len); *output=cp; ex: return status; } /** Checks whether a path is below \c wc_path, and returns the relative * part. * * If that isn't possible (because \a path is not below \c wc_path), * \c EINVAL is returned. * The case \c path==wc_path is not allowed, either. */ inline int cm___not_below_wcpath(char *path, char **out) { if (strncmp(path, wc_path, wc_path_len) != 0 || path[wc_path_len] != PATH_SEPARATOR) return EINVAL; *out=path+wc_path_len+1; return 0; } /** Dump a list of copyfrom relations to the stream. * * TODO: filter by wildcards (?) */ int cm___dump_list(FILE *output, int argc, char *normalized[]) { int status; hash_t db; datum key, value; int have; char *path; svn_revnum_t rev; /* TODO: Use some filter, eg. by pathnames. */ db=NULL; /* Create database file for WC root. */ status=hsh__new(wc_path, WAA__COPYFROM_EXT, GDBM_READER, &db); if (status==ENOENT) { status=0; goto no_copyfrom; } have=0; status=hsh__first(db, &key); while (status == 0) { STOPIF( hsh__fetch(db, key, &value), NULL); /* The . at the end is suppressed; therefore we print it from the * second dataset onwards. */ if (have) status=fputs(".\n", output); STOPIF( cm___string_to_rev_path( value.dptr, &path, &rev), NULL); status |= fprintf(output, "%s\n%s\n", path, key.dptr); IF_FREE(value.dptr); STOPIF_CODE_ERR( status < 0, -EPIPE, "output error"); status=hsh__next(db, &key, &key); have++; } if (!have) { no_copyfrom: fprintf(output, "No copyfrom information was written.\n"); } else if (opt__is_verbose() > 0) fprintf(output, "%d copyfrom relation%s.\n", have, have == 1 ? "" : "s"); ex: if (db) STOPIF( hsh__close(db, status), NULL); return status; } /** Make the copy in the tree started at \a root. * * The destination must not already exist in the tree; it can exist in the * filesystem. * * If \a revision is not \c 0 (which corresponds to \c BASE), the correct * list of entries must be taken from the corresponding repository. * * Uninitialization is done via \c root==NULL. * * If the flag \a paths_are_wc_relative is set, the paths \a cp_src and \a * cp_dest are taken as-is. * Else they're are converted to wc-relative paths by making them absolute * (eventually using \ref start_path as anchor), and cutting the wc-root * off. */ int cm___make_copy(struct estat *root, char *cp_src, svn_revnum_t revision, char *cp_dest, int paths_are_wc_relative) { int status; static const char err[]="!The %s path \"%s\" is not below the wc base."; struct estat *src, *dest; static hash_t db=NULL; char *abs_src, *abs_dest; char *wc_src, *wc_dest; char *buffer, *url; if (!root) { STOPIF( hsh__close(db, 0), NULL); goto ex; } /* That we shuffle the characters back and forth can be excused because * - either we are cmdline triggered, in which case we have the full task * starting overhead, and don't do it here again, and * - if we're doing a list of entries, we have to do it at least here. */ if (paths_are_wc_relative) { wc_dest=cp_dest; wc_src=cp_src; } else { STOPIF( cm___absolute_path(cp_dest, &abs_dest), NULL); STOPIF( cm___absolute_path(cp_src, &abs_src), NULL); STOPIF( cm___not_below_wcpath(abs_dest, &wc_dest), err, "destination", abs_dest); STOPIF( cm___not_below_wcpath(abs_src, &wc_src), err, "source", abs_src); } STOPIF( ops__traverse(root, cp_src, 0, 0, &src), NULL); /* TODO: Make copying copied entries possible. * But as we only know the URL, we'd either have to do a checkout, or try * to parse back to the original entry. */ STOPIF_CODE_ERR( src->flags & RF___IS_COPY, EINVAL, "!Copied entries must be committed before using them as copyfrom source."); /* The directories above must be added; the entries below get RF_COPY_SUB * set (by waa__copy_entries), and this entry gets overridden to * RF_COPY_BASE below. */ STOPIF( ops__traverse(root, cp_dest, OPS__CREATE, RF_ADD, &dest), NULL); STOPIF_CODE_ERR( !(dest->flags & RF_ISNEW), EINVAL, "!The destination is already known - must be a new entry."); if (!db) STOPIF( hsh__new(wc_path, WAA__COPYFROM_EXT, GDBM_WRCREAT, &db), NULL); if (revision) { BUG_ON(1, "fetch list of entries from the repository"); } else { STOPIF( waa__copy_entries(src, dest), NULL); revision=src->repos_rev; } /* Mark as base entry for copy; the RF_ADD flag was removed by * copy_entries above, but the entry really is *new*. */ dest->flags |= RF_COPY_BASE; dest->flags &= ~RF_COPY_SUB; STOPIF( url__full_url( src, &url), NULL); STOPIF( cm___rev_path_to_string(url, revision, &buffer), NULL); STOPIF( hsh__store_charp(db, wc_dest, buffer), NULL); ex: return status; } /** Sets all entries that are just implicitly copied to ignored. * Explicitly added entries (because of \ref add, or \ref prop-set) are * kept. * * Returns a \c 0 or \c 1, with \c 1 saying that \b all entries below are * ignored, and so whether \a cur can (perhaps) be completely ignored, too. * */ int cm___ignore_impl_copied(struct estat *cur) { struct estat **sts; int all_ign; all_ign=1; cur->flags &= ~RF_COPY_SUB; if (cur->flags & (RF_ADD | RF_PUSHPROPS)) all_ign=0; if (ops__has_children(cur)) { sts=cur->by_inode; while (*sts) { all_ign &= cm___ignore_impl_copied(*sts); sts++; } } if (all_ign) cur->to_be_ignored=1; else /* We need that because of its children, and we have to check. */ cur->flags |= RF_ADD | RF_CHECK; DEBUGP("%s: all_ignore=%d", cur->name, all_ign); return all_ign; } /** -. * */ int cm__uncopy(struct estat *root, int argc, char *argv[]) { int status; char **normalized; struct estat *dest; /* Do only the selected elements. */ opt_recursive=-1; if (!argc) ac__Usage_this(); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( url__load_nonempty_list(NULL, 0), NULL); /* Load the current data, without updating */ status=waa__input_tree(root, NULL, NULL); if (status == ENOENT) STOPIF( EINVAL, "!No working copy could be found."); else STOPIF( status, NULL); while (*normalized) { DEBUGP("uncopy %s %s", *normalized, normalized[1]); STOPIF( ops__traverse(root, *normalized, OPS__FAIL_NOT_LIST, 0, &dest), "!The entry \"%s\" is not known.", *normalized); STOPIF_CODE_ERR( !(dest->flags & RF_COPY_BASE), EINVAL, "!The entry \"%s\" is not a copy base.", *normalized); /* Directly copied, unchanged entry. * Make it unknown - remove copy relation (ie. mark hash value for * deletion), and remove entry from local list. */ STOPIF( cm__get_source(dest, NULL, NULL, NULL, 1), NULL); dest->flags &= ~RF_COPY_BASE; /* That removes all not explicitly added entries from this subtree. */ cm___ignore_impl_copied(dest); normalized++; } STOPIF( waa__output_tree(root), NULL); /* Purge. */ STOPIF( cm__get_source(NULL, NULL, NULL, NULL, 0), NULL); ex: return status; } /** -. * */ int cm__work(struct estat *root, int argc, char *argv[]) { int status; char **normalized; int count; FILE *input=stdin; char *src, *dest, *cp; int is_dump, is_load; svn_revnum_t revision; status=0; is_load=is_dump=0; /* We have to do the parameter checking in two halfs, because we must not * use "dump" or "load" as working copy path. So we first check what to do, * eventually remove these strings from the parameters, and then look for * the wc base. */ /* If there's \b no parameter given, we default to dump. */ if (argc==0) is_dump=1; else if (strcmp(argv[0], parm_dump) == 0) { is_dump=1; argv++; argc--; } else if (strcmp(argv[0], parm_load) == 0) { is_load=1; argv++; argc--; } STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); if (is_dump) { STOPIF( cm___dump_list(stdout, argc, normalized), NULL); /* To avoid the indentation */ goto ex; } switch (opt_target_revisions_given) { case 0: /* Default is \c BASE. */ revision=0; break; case 1: revision=opt_target_revision; default: STOPIF( EINVAL, "!Only a single revision number may be given."); } STOPIF( url__load_nonempty_list(NULL, 0), NULL); /* Load the current data, without updating; so st.mode equals * st.local_mode_packed and so on. */ status=waa__input_tree(root, NULL, NULL); if (status == -ENOENT) STOPIF(status, "!No entries are currently known, " "so you can't define copy or move relations yet.\n"); STOPIF(status, NULL); hlp__string_from_filep(NULL, NULL, NULL, SFF_RESET_LINENUM); if (is_load) { /* Load copyfrom data. */ count=0; while (1) { status=hlp__string_from_filep(input, &cp, NULL, 0); if (status == EOF) { status=0; break; } STOPIF( status, "Failed to read copyfrom source"); STOPIF_CODE_ERR( !*cp, EINVAL, "!Copyfrom source must not be empty."); STOPIF( hlp__strdup( &src, cp), NULL); status=hlp__string_from_filep(input, &cp, NULL, 0); STOPIF_CODE_ERR( status == EOF, EINVAL, "!Expected a target specification, got EOF!"); STOPIF( status, "Failed to read copyfrom destination"); STOPIF( hlp__strdup( &dest, cp), NULL); /* Get the empty line */ status=hlp__string_from_filep(input, &cp, NULL, SFF_WHITESPACE); if (status == EOF) DEBUGP("delimiter line missing - EOF"); else if (status == 0 && cp[0] == '.' && cp[1] == 0) DEBUGP("delimiter line ok"); else { STOPIF(status, "Cannot read delimiter line"); /* status == 0 ? not empty. */ STOPIF(EINVAL, "Expected delimiter line - got %s", cp); } DEBUGP("read %s => %s", src, dest); /* These paths were given relative to the cwd, which is changed now, as * we're in the wc base. Calculate correct names. */ STOPIF( cm___make_copy(root, src, revision, dest, 0), NULL); count++; free(dest); free(src); } if (opt__is_verbose() >= 0) printf("%d copyfrom relation%s loaded.\n", count, count==1 ? "" : "s"); } else { STOPIF_CODE_ERR(argc != 2, EINVAL, "!At least source and destination, " "or \"dump\" resp. \"load\" must be given."); /* Create database file for WC root. */ STOPIF( cm___make_copy(root, normalized[0], revision, normalized[1], 1), "Storing \"%s\" as source of \"%s\" failed.", normalized[0], normalized[1]); } STOPIF( cm___make_copy(NULL, NULL, 0, NULL, 0), NULL); STOPIF( waa__output_tree(root), NULL); ex: return status; } /** Get the source of an entry with \c RF_COPY_BASE set. * See cm__get_source() for details. * */ int cm___get_base_source(struct estat *sts, char *name, char **src_url, svn_revnum_t *src_rev, int alloc_extra, int register_for_cleanup) { int status; datum key, value; static hash_t hash; static int init=0; char *url; value.dptr=NULL; status=0; if (src_url) *src_url=NULL; if (src_rev) *src_rev=SVN_INVALID_REVNUM; if (!sts) { /* uninit */ STOPIF( hsh__close(hash, register_for_cleanup), NULL); hash=NULL; init=0; goto ex; } if (!init) { /* We cannot simply use !hash as condition; if there is no database with * copyfrom information, we'd try to open it *every* time we're asked for * a source, which is clearly not optimal for performance. * So we use an static integer. */ init=1; /* In case there's a cleanup at the end we have to open read/write. */ status=hsh__new(wc_path, WAA__COPYFROM_EXT, GDBM_WRITER | HASH_REMEMBER_FILENAME, &hash); /* If we got an ENOENT here, hash==NULL; so we'll re-set the * *parameters below and return. */ if (status != ENOENT) STOPIF( status, NULL); } /* Normal proceedings, this is a direct target of a copy definition. */ if (!name) STOPIF( ops__build_path( &name, sts), NULL); if (name[0]=='.' && name[1]==PATH_SEPARATOR) name+=2; key.dptr=name; key.dsize=strlen(name)+1; status=hsh__fetch(hash, key, &value); if (status) { DEBUGP("no source for %s found", name); goto ex; } if (register_for_cleanup) STOPIF( hsh__register_delete(hash, key), NULL); /* Extract the revision number. */ STOPIF( cm___string_to_rev_path( value.dptr, &url, src_rev), NULL); if (src_url) { BUG_ON(!url); status=strlen(url); /* In case the caller wants to do something with this buffer, we return * more. We need at least the additional \0; and we give a few byte * extra, gratis, free for nothing (and that's cutting my own throat)! * */ STOPIF( hlp__strnalloc( status + 1 +alloc_extra + 4, src_url, url), NULL); status=0; } ex: IF_FREE(value.dptr); return status; } /** Recursively creating the URL. * As most of the parameters are constant, we could store them statically * ... don't know whether it would make much difference, this function * doesn't get called very often. * \a length_to_add is increased while going up the tree; \a eobuffer gets * handed back down. */ int cm___get_sub_source_rek(struct estat *cur, int length_to_add, char **dest_buffer, svn_revnum_t *src_rev, char **eobuffer) { int status; struct estat *copied; int len; /* Get source of parent. * Just because this entry should be removed from the copyfrom database * that isn't automatically true for the corresponding parent. */ copied=cur->parent; BUG_ON(!copied, "Copy-sub but no base?"); len=strlen(cur->name); length_to_add+=len+1; if (copied->flags & RF_COPY_BASE) { /* Silent error return. */ status=cm___get_base_source(copied, NULL, dest_buffer, src_rev, length_to_add, 0); if (status) goto ex; *eobuffer=*dest_buffer+strlen(*dest_buffer); DEBUGP("after base eob-5=%s", *eobuffer-5); } else { /* Maybe better do (sts->path_len - copied->path_len))? * Would be faster. */ status=cm___get_sub_source_rek(copied, length_to_add, dest_buffer, src_rev, eobuffer); if (status) goto ex; } /* Now we have the parent's URL ... put cur->name after it. */ /* Not PATH_SEPARATOR, it's an URL and not a pathname. */ **eobuffer = '/'; strcpy( *eobuffer +1, cur->name ); *eobuffer += len+1; DEBUGP("sub source of %s is %s", cur->name, *dest_buffer); ex: return status; } /** Get the source of an entry with \c RF_COPY_SUB set. * See cm__get_source() for details. * * This function needs no cleanup. * */ int cm___get_sub_source(struct estat *sts, char *name, char **src_url, svn_revnum_t *src_rev) { int status; char *eob; /* As we only store the URL in the hash database, we have to proceed as * follows: * - Look which parent is the copy source, * - Get its URL * - Append the path after that to the URL of the copy source: * root / dir1 / dir2 / / dir3 / * and * root / dir1 / dir3 / * Disadvantage: * - we have to traverse the entries, and make the estat::by_name * arrays for all intermediate nodes. * * We do that as a recursive sub-function, to make bookkeeping easier. */ STOPIF( cm___get_sub_source_rek(sts, 0, src_url, src_rev, &eob), NULL); ex: return status; } /** -. * * Wrapper around cm___get_base_source() and cm___get_sub_source(). * * If \c *src_url is needed, it is allocated and must be \c free()ed after * use. * * If \a name is not given, it has to be calculated. * * Both \a src_name and \a src_rev are optional. * These are always set; if no source is defined, they're set to \c NULL, * \c NULL and \c SVN_INVALID_REVNUM. * * Uninitializing should be done via calling with \c sts==NULL; in this * case the \a register_for_cleanup value is used as success flag. * * If no source could be found, \c ENOENT is returned. */ int cm__get_source(struct estat *sts, char *name, char **src_url, svn_revnum_t *src_rev, int register_for_cleanup) { int status; if (!sts) { status=cm___get_base_source(NULL, NULL, NULL, NULL, 0, 0); goto ex; } if (sts->flags & RF_COPY_BASE) { status= cm___get_base_source(sts, name, src_url, src_rev, 0, register_for_cleanup); } else if (sts->flags & RF_COPY_SUB) { status= cm___get_sub_source(sts, name, src_url, src_rev); } else { status=ENOENT; goto ex; } if (src_url) DEBUGP("source of %s is %s", sts->name, *src_url); if (status) { /* That's a bug ... the bit is set, but no source was found? * Could some stale entry cause that? Don't error out now; perhaps at a * later time. */ DEBUGP("bit set, no source!"); /* BUG_ON(1,"bit set, no source!"); */ goto ex; } ex: return status; } fsvs-1.2.6/src/props.c0000644000202400020240000005427112152033633013554 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ /** \file * Properties handling - \ref prop-get, \ref prop-set, \ref prop-list. * * Deleted properties are marked locally be a special value; callers need * to check by using prp__prop_will_be_removed(). * * \todo --xml, --raw, --dump switches? * * \todo should \ref prop-get and \ref prop-list use UTF-8 or local * encoding? Currently the names and values are dumped as-is, ie. UTF-8. * */ #include #include #include #include #include #include #include "global.h" #include "waa.h" #include "url.h" #include "helper.h" #include "actions.h" #include "hash_ops.h" #include "props.h" #include "update.h" #include "ignore.h" #include "est_ops.h" #include "add_unvers.h" #include "warnings.h" /** \defgroup props Property handling * \ingroup compati * * We take and give arbitrary properties to the subversion layers; a few * are used directly, see \ref s_p_n "special property names" for details. * * All other properties are stored with a terminating NUL by the * hash layer; see hsh__new(). * * */ /** \addtogroup cmds * * \section prop-get * * \code * fsvs prop-get PROPERTY-NAME PATH... * \endcode * * Prints the data of the given property to \c STDOUT. * * \note Be careful! This command will dump the property as it is, * ie. with any special characters! If there are escape sequences or binary * data in the property, your terminal might get messed up! \n * If you want a safe way to look at the properties, use prop-list with the * \c -v parameter. */ /* TODO: If you use \c - as the value, the value is read from \c STDIN. */ /** \addtogroup cmds * * \section prop-set * * \code * fsvs prop-set [-u URLNAME] PROPERTY-NAME VALUE PATH... * \endcode * * This command sets an arbitrary property value for the given path(s). * * \note Some property prefixes are reserved; currently everything starting * with svn: throws a (fatal) warning, and fsvs: is * already used, too. See \ref s_p_n. * * If you're using a multi-URL setup, and the entry you'd like to work on * should be pinned to a specific URL, you can use the \c -u parameter; * this is like the \ref add "add" command, see there for more details. * * */ /** \addtogroup cmds * * \section prop-del * * \code * fsvs prop-del PROPERTY-NAME PATH... * \endcode * * This command removes a property for the given path(s). * * See also \ref prop-set. * * */ /** \addtogroup cmds * * \section prop-list * * \code * fsvs prop-list [-v] PATH... * \endcode * * Lists the names of all properties for the given entry. \n * With \c -v, the value is printed as well; special characters will be * translated, as arbitrary binary sequences could interfere with your * terminal settings. * * If you need raw output, post a patch for \c --raw, or write a loop with * \ref prop-get "prop-get". * * */ /** \defgroup s_p_n Special property names * \ingroup userdoc * * \section fsvs_props Special FSVS properties. * * These are used \b only by \c FSVS; \c subversion doesn't know them. * @{ */ /** The common prefix. */ #define FSVS_PROP_PREFIX "fsvs:" /** The name for the commit-pipe property. * * If this property is set for a file, this file gets filtered by the given * command on its way to the repository. This is mostly used for backups, * to protect data. * * To make that easier to understand, here's an example. * You're versioning your \c etc: * \code * cd /etc * fsvs urls * \endcode * That means that \c /etc/shadow, \c /etc/ssh/ssh_host_key and so on would * all get transmitted to the repository. * Now we could say that if the machine crashes hard, a changed ssh-key is * the least of our worries - so we simply exclude it from backup. * \code * fsvs ignore './ssh/ssh_host_*key' * \endcode * But the users' passwords and similar should not be lost - so we use \c * gpg to encrypt them on backup. You generate a key, whose private key * gets kept in a secure place, but not (only) on this machine; * because if the machine gets damaged, the backups could no longer be * decrypted. * \note If the key is on this machine, and it gets hacked, your backups * might be read! * \code * gpg --import-key ..... * fsvs propset fsvs:commit-pipe 'gpg -er ' shadow * \endcode * You might want/need to set an update-pipe, too; see \ref * FSVS_PROP_UPDATE_PIPE for details. * * The only thing left is to take the first backup: * \code * fsvs commit * \endcode * * \note Currently only files can use this property. Would it make sense * for devices or symlinks too? Currently not, as the only way to send * these into the repository is changing the major/minor number - which is * not possible with normal files.\n If we instead sent the whole data, we * could encrypt a filesystem into the repository - but that would get no * delta-transfers, and deltification only if not CBC ...\n Sending the * fsvs generated string "cdev:x:x" for encryption wouldn't help; so if * such special files must be processed, we'd might need to make a \e raw * pipe - which does no interpreting.\n Ideas welcome. * * \note Encrypted data cannot be deltified, so the few marked files will * take their full space in the repository. (Although \c gpg compresses the * files before encryption, so it won't be \b that bad.) * * You might be interested in * \ref exp_env "exported environment variables", too. * * \note Another idea is to ignore files that are not readable by everyone; * see \ref ign_mod "ignore pattern modifiers" for details. * */ #define FSVS_PROP_COMMIT_PIPE FSVS_PROP_PREFIX "commit-pipe" /** The name of the update-pipe property. * * This is the reverse thing to \ref FSVS_PROP_COMMIT_PIPE; it's used in * the same way. * * Extending the example before: * \code * fsvs propset fsvs:commit-pipe 'gpg -er ' shadow * fsvs propset fsvs:update-pipe 'gpg -d' shadow * \endcode * * \note This command is used for \ref revert, \ref diff, and \ref export, * too. * * */ #define FSVS_PROP_UPDATE_PIPE FSVS_PROP_PREFIX "update-pipe" /** Local install commandline. * * \note This is not yet implemented. This list is more or less just a kind * of brainstorming. If you need this feature, tell us at * dev@fsvs.tigris.org - you'll get it. * * This is used after the temporary file (which had possibly used \ref * FSVS_PROP_UPDATE_PIPE) has been written; the normal, internal fsvs * operation is approximately this: * - File gets piped through \ref FSVS_PROP_UPDATE_PIPE into a temporary * file, which was created with mask \c 0700. * - chmod $m $tmp - set the stored access mode. * - chown $u.$g $tmp || chown $U.$G $tmp - set user and group * by the stored strings, and if that fails, by the uid and gid. * - touch -t$t $tmp - set the stored access mode. * - mv $tmp $dest rename to destination name. * * You could get a more or less equivalent operation by using * \code * fsvs propset fsvs:update-pipe \ * '/usr/bin/install -g$g -o$u -m$m $tmp $dest' \ * [paths] * \endcode * * The environment gets prepared as outlined above - you get the variables * - \c $g and \c $G (group name and gid), * - \c $u and \c $U (owner name and uid), * - \c $m (octal mode, like \c 0777), * - \c $t (mtime in form yyyymmddHHMM.SS - like used with GNU touch(1)), * - \c $tmp (name of temporary file) and * - \c $dest (destination name) * set. * * After the given program completed * - $tmp gets deleted (\c ENOENT is not seen as an error, in case your * install program moved the file), and * - the destination path gets queried to store the meta-data of the (now * assumed to be \e non-modified) node. * */ #define FSVS_PROP_INSTALL_CMD FSVS_PROP_PREFIX "install" /** The MD5 of the original (un-encoded) data. * * Used for encoded entries, see \ref FSVS_PROP_COMMIT_PIPE. * * If we do a sync-repos (or update), we need the cleartext-MD5 to know * whether the entry has changed; this entry holds it. * * \todo Do we need some kind of SALT here, to avoid plaintext guessing? */ #define FSVS_PROP_ORIG_MD5 FSVS_PROP_PREFIX "original-md5" /** @} */ /** \name propNames Meta-data property names. * * Depending on the subversion sources there may be some of these already * defined - especially if the header files are from the meta-data branches. * These would override the defaults - but lets hope that they'll always * be compatible! */ /** @{ */ #ifndef SVN_PROP_TEXT_TIME #define SVN_PROP_TEXT_TIME SVN_PROP_PREFIX "text-time" #endif #ifndef SVN_PROP_OWNER #define SVN_PROP_OWNER SVN_PROP_PREFIX "owner" #endif #ifndef SVN_PROP_GROUP #define SVN_PROP_GROUP SVN_PROP_PREFIX "group" #endif #ifndef SVN_PROP_UNIX_MODE #define SVN_PROP_UNIX_MODE SVN_PROP_PREFIX "unix-mode" #endif /** @} */ /** \section svn_props Property names from the subversion name-space * * \c FSVS has a number of reserved property names, where it stores the * meta-data and other per-entry data in the repository. * * \section svn_props Meta-data of entries * * Such names are already in use in the \e mtime and \e meta-data branches * of subversion; we use the values defined in \c svn_props.h (if any), or * use the originally used values to be compatible. * * These start all with the string defined in \c SVN_PROP_PREFIX, which is * \c svn: . */ /** @{ */ /** Modification time - \c svn:text-time. */ const char propname_mtime[]=SVN_PROP_TEXT_TIME, /** -. */ propname_owner[]=SVN_PROP_OWNER, /** -. */ propname_group[]=SVN_PROP_GROUP, /** -. */ propname_origmd5[]=FSVS_PROP_ORIG_MD5, /** -. */ propname_umode[]=SVN_PROP_UNIX_MODE, /** -. Subversion defines that for symlinks; we use that for devices, * too. */ propname_special[]=SVN_PROP_SPECIAL, /** -. */ propval_special []=SVN_PROP_SPECIAL_VALUE, /** -. This will get the local file as \c STDIN, and its output goes to the * repository. * See \ref FSVS_PROP_COMMIT_PIPE. */ propval_commitpipe[]=FSVS_PROP_COMMIT_PIPE, /** -. This will get the repository file as \c STDIN, and its output goes * to a local temporary file, which gets installed. See \ref * FSVS_PROP_UPDATE_PIPE. * */ propval_updatepipe[]=FSVS_PROP_UPDATE_PIPE, /** -. */ propval_orig_md5 []=FSVS_PROP_ORIG_MD5; /** @} */ /** -. * * I thought about using "constant prefix.$random" => "$propertyname" for * them - but it's more work than simply ignoring them before listing. * * And as they're not widely used, it's easier this way. */ const char prp___to_be_removed_value[]= "FSVS:INTERNAL-\nto-be-removed\n-" "\x8f\xc1\xa6\xe5\x86\x0a\x01\x72\x54\x89\x25\x23\x03\xc3\xfa\x75"; /** -. * Just a wrapper for the normal property operation. * * Must be silent for \c ENOENT, so that fsvs pl * doesn't give an * error. */ int prp__open_byname(char *wcfile, int gdbm_mode, hash_t *db) { int status; status=hsh__new(wcfile, WAA__PROP_EXT, gdbm_mode, db); if (status != ENOENT) STOPIF(status, "Opening property file for %s", wcfile); ex: return status; } /** -. * Returns ENOENT silently. * */ int prp__open_byestat(struct estat *sts, int gdbm_mode, hash_t *db) { int status; char *fn; STOPIF( ops__build_path(&fn, sts), NULL); status=prp__open_byname(fn, gdbm_mode, db); if (status != ENOENT) STOPIF(status, NULL); ex: return status; } /** -. * * If \a datalen is -1, \c strlen(data) is used. */ int prp__set(hash_t db, const char *name, const char *data, int datalen) { int status; datum key, value; key.dptr=(char*)name; key.dsize=strlen(name)+1; if (data) { value.dptr=(char*)data; value.dsize=datalen; if (datalen == -1) value.dsize= *data ? strlen(data)+1 : 0; #ifdef ENABLE_DEBUG else BUG_ON(value.dptr[value.dsize-1] != 0, "Not terminated!"); #endif } else { value.dptr=NULL; value.dsize=0; } STOPIF( prp__store(db, key, value), NULL); ex: return status; } /** -. * Convenience function. * The svn_string_t has the number of characters used, whereas we store the * \c \\0 at the end, too. */ int prp__set_svnstr(hash_t db, const char *name, const svn_string_t *utf8_value) { return prp__set(db, name, utf8_value->data, utf8_value->len+1); } /** -. * */ int prp__store(hash_t db, datum key, datum value) { int status; DEBUGP("storing property %s=%s", key.dptr, value.dptr); STOPIF( hsh__store(db, key, value), NULL); ex: return status; } /** -. * Wrapper for prp__fetch(). */ int prp__get(hash_t db, const char *keycp, datum *value) { static datum key; key.dptr=keycp; key.dsize=strlen(keycp)+1; return prp__fetch(db, key, value); } /** -. * The meta-data of the entry is overwritten with the data coming from the * repository; its \ref estat::remote_status is set. * If \a props_db is not NULL, the still opened property database is * returned. * */ int prp__set_from_aprhash(struct estat *sts, apr_hash_t *props, enum prp__set_from_aprhash_e flags, hash_t *props_db, apr_pool_t *pool) { int status; apr_hash_index_t *hi; char *prop_key; apr_ssize_t prop_key_len; svn_string_t *prop_val; hash_t db; int to_store, count; void *k, *v; status=0; count=0; /* The old behaviour was to always open the database file. If no * user-specified properties are given, old properties were removed that * way. * But gdbm has the problem that on gdbm_close() a fsync() is done - even * if nothing was written; this means that for every updated file we * create a new property database, write nothing in it, do a fsync(), * close it, and delete it again - which costs some time. * * Debian bug #514704. * * (Removing of old properties is needed because we'd only know in * cb__record_changes() that properties get removed; in revert we only * have the new list. * TODO: Merge local and remote changes.) */ /* We remember the filename, so that empty hashes get removed on close. * */ db=NULL; hi=apr_hash_first(pool, props); if (flags & STORE_IN_FS) { /* If we want to write the data to disk, but there is nothing to write * (and the caller doesn't need the DB), just remove the file. See * above. */ if (!hi && !props_db) { STOPIF( prp__unlink_db_for_estat(sts), NULL); goto ex; } STOPIF( prp__open_byestat(sts, GDBM_NEWDB | HASH_REMEMBER_FILENAME, &db), NULL); } for (; hi; hi = apr_hash_next(hi)) { /* As the name/key is a (char*), we don't need its length. */ /* Is there a cleaner way than this (cast or compiler warning)?? * subversion defines a "const void * key" and casts that to * whatever needed in subsequent calls - which isn't pretty, too. */ k=&prop_key; v=&prop_val; apr_hash_this(hi, k, &prop_key_len, v); to_store=0; STOPIF( up__parse_prop(sts, prop_key, prop_val, &to_store, pool), NULL); if (to_store) { if (db) { /** \todo - store in utf-8? local encoding? * What if it's binary??? Better do no translation, ie store as * UTF-8. */ STOPIF( prp__set_svnstr(db, prop_key, prop_val), NULL); } count++; } else { /* If already used it's no longer needed. */ if (flags & ONLY_KEEP_USERDEF) apr_hash_set(props, prop_key, prop_key_len, NULL); } } DEBUGP("%d properties stored", count); if (props_db) *props_db=db; else STOPIF( hsh__close(db, status), NULL); ex: return status; } /** -. * */ int prp__g_work(struct estat *root, int argc, char *argv[]) { int status, st2; datum key, value; hash_t db; FILE *output; char **normalized; status=0; output=stdout; if (argc<2) ac__Usage_this(); key.dptr=*(argv++); key.dsize=strlen(key.dptr)+1; argc--; STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); for(; *argv; argv++) { db=NULL; status=prp__open_byname( *normalized, GDBM_READER, &db); if (!status) status=prp__fetch(db, key, &value); if (status == ENOENT) { DEBUGP("No such property"); } else if (status) { /* Any other error means trouble. */ STOPIF( status, NULL); } else if (value.dptr && !prp__prop_will_be_removed(value)) { STOPIF_CODE_EPIPE( fputs(value.dptr, output), NULL); STOPIF_CODE_EPIPE( fputc('\n', output), NULL); } if (db) STOPIF( hsh__close(db, status), NULL); db=NULL; } status=0; ex: if (db) { st2=hsh__close(db, status); db=NULL; if (!status && st2) STOPIF( st2, NULL); } return status; } /** -. * * Depending on action->i_val properties are removed or added. * */ int prp__s_work(struct estat *root, int argc, char *argv[]) { int status, st2; datum key, value, rv; hash_t db; char **normalized; struct estat *sts; int change; status=0; if (argc<2) ac__Usage_this(); /* Check name for special values. */ if (svn_prop_is_svn_prop(*argv)) STOPIF( wa__warn( WRN__PROP_NAME_RESERVED, EINVAL, "This is a reserved property name and should not be used." ), NULL ); key.dptr=*(argv++); key.dsize=strlen(key.dptr)+1; argc--; if (action->i_val == FS_REMOVED) { value.dptr=(char*)prp___to_be_removed_value; /* + \0 */ value.dsize=strlen(prp___to_be_removed_value)+1; } else { value.dptr=*(argv++); value.dsize=strlen(value.dptr)+1; argc--; if (argc<1) ac__Usage_this(); } STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); STOPIF( au__prepare_for_added(), NULL); STOPIF( waa__input_tree(root, NULL, NULL), NULL); for(; *normalized; normalized++) { STOPIF( ops__traverse(root, *normalized, OPS__CREATE | OPS__FAIL_NOT_LIST, RF_ADD, &sts), NULL); if (sts->flags & RF_ISNEW) { /* Get group. */ STOPIF( ign__is_ignore(sts, &change), NULL); STOPIF( ops__apply_group(sts, &db, NULL), NULL); if (!sts->url) sts->url=current_url; STOPIF( hlp__lstat( *normalized, & sts->st), "!'%s' can not be queried", *normalized); /* Such entries must be set as added, if needed - else they wouldn't be * seen as new. */ sts->flags |= RF_ADD; } else STOPIF( prp__open_byestat(sts, GDBM_WRCREAT, &db), NULL); /* Check if modified. */ change=0; status=prp__fetch(db, key, &rv); if (action->i_val == FS_REMOVED) { if (status == ENOENT) DEBUGP("%s on %s didnt exist anyway", key.dptr, *normalized); else change++; } else { if (status == ENOENT) change++; else { change = (rv.dsize != value.dsize) || memcmp(rv.dptr, value.dptr, value.dsize) != 0; DEBUGP("%s on %s change? %d", key.dptr, *normalized, change); } } if (change) { STOPIF( prp__store(db, key, value), NULL); sts->flags |= RF_PUSHPROPS; } STOPIF( hsh__close(db, status), NULL); db=NULL; } STOPIF( waa__output_tree(root), NULL); ex: if (db) { st2=hsh__close(db, status); db=NULL; if (!status && st2) STOPIF( st2, NULL); } return status; } /** -. * \a data and \a len are optional output parameters, ie. may be \c NULL. * if \a data is used, it \b must be free()d. * * Returns \c ENOENT silently. */ int prp__open_get_close(struct estat *sts, char *name, char **data, int *len) { int status; hash_t props; datum value; props=NULL; status=prp__open_byestat(sts, GDBM_READER, &props); if (status == ENOENT) goto ex; STOPIF(status, NULL); status=prp__get(props, name, &value); if (status == ENOENT) goto ex; STOPIF(status, NULL); if (len) *len=value.dsize; if (data) *data=value.dptr; else IF_FREE(value.dptr); ex: if (props) hsh__close(props, status); return status; } /** -. * */ int prp__l_work(struct estat *root, int argc, char *argv[]) { int status, count; int many_files; char indent[5]=" "; hash_t db; FILE *output; datum key, data; char **normalized; status=0; db=NULL; if (!argc) ac__Usage_this(); STOPIF( waa__find_common_base(argc, argv, &normalized), NULL); output=stdout; many_files= argc>1; if (!many_files) *indent=0; for(; *normalized; normalized++) { status=prp__open_byname( *normalized, GDBM_READER, &db); if (status == ENOENT) goto noprops; if (status) STOPIF(status, "Cannot open properties file for '%s'", *normalized); count=0; status=prp__first(db, &key); while (status == 0) { DEBUGP("got key with len=%d: %.30s", key.dsize, key.dptr); STOPIF( prp__fetch(db, key, &data), NULL); if (prp__prop_will_be_removed(data)) { /* This property will be removed on next commit. */ } else { count++; if (count==1 && many_files) STOPIF_CODE_EPIPE( printf("Properties of %s:\n", *normalized), NULL); STOPIF_CODE_EPIPE( fputs(indent, output), NULL); /* The key and value are defined to have a \0 at the end. * This should not be printed. */ STOPIF( hlp__safe_print(output, key.dptr, key.dsize-1), NULL); if (opt__is_verbose() > 0) { STOPIF_CODE_EPIPE( fputc('=',output), NULL); STOPIF( hlp__safe_print(output, data.dptr, data.dsize-1), NULL); free(data.dptr); } STOPIF_CODE_EPIPE( fputc('\n', output), NULL); } status=prp__next(db, &key, &key); } if (count == 0) { noprops: printf("%s has no properties.\n", *normalized); status=0; continue; } STOPIF( hsh__close(db, status), NULL); db=NULL; } ex: hsh__close(db, status); return status; } /** -. */ int prp__unlink_db_for_estat(struct estat *sts) { int status; char *cp, *eos, *path; STOPIF( ops__build_path(&path, sts), NULL); STOPIF( waa__get_waa_directory(path, &cp, &eos, NULL, waa__get_gwd_flag(WAA__PROP_EXT)), NULL); strcpy(eos, WAA__PROP_EXT); status= unlink(cp) == -1 ? errno : 0; if (status == ENOENT) status=0; else STOPIF(status, "deleting properties of %s (%s)", path, cp); ex: return status; } /** -. */ int prp__sts_has_no_properties(struct estat *sts, int *result) { hash_t db; int status, rv; datum key; rv = prp__open_byestat( sts, GDBM_READER, &db); if (rv == ENOENT) goto done; STOPIF(rv, NULL); rv = prp__first(db, &key); STOPIF( hsh__close(db, 0), NULL); done: *result = (rv == ENOENT); ex: return status; } fsvs-1.2.6/src/Makefile.in0000644000202400020240000001761212152033551014307 0ustar marekmarek########################################################################### # Copyright (C) 2005-2009 Philipp Marek. # # # # This program is free software; you can redistribute it and/or modify # # it under the terms of the GNU General Public License version 2 as # # published by the Free Software Foundation. # ########################################################################### ################################ Definitions ################################ DIR := /usr/share/doc HEADURL := "$URL: http://fsvs.tigris.org/svn/fsvs/tags/fsvs-1.2.6/fsvs/src/Makefile.in $" HEADREV := "$Revision: 2467 $" VERSION = $(shell perl -e '($$r) = (q( $(HEADREV) ) =~ m:(\d+):); $$t= q( $(HEADURL) ) =~ m:/tags/([^/]+): ? $$1 : "trunk"; print "$$t:$$r\n";' ) CFLAGS := @CFLAGS@ CFLAGS += -Wall -funsigned-char -Os -DFSVS_VERSION='"$(VERSION)"' LDFLAGS := @LDFLAGS@ FSVS_LDFLAGS = $(LDFLAGS) -lsvn_subr-1 -lsvn_delta-1 -lsvn_ra-1 -lpcre -lgdbm -ldl EXTRALIBS := @EXTRALIBS@ WAA_CHARS?= @WAA_WC_MD5_CHARS@ ifdef RPATH LDFLAGS += -Wl,-rpath,$(RPATH) endif ifeq (@ENABLE_DEBUG@, 1) CFLAGS += -DDEBUG -g LDFLAGS += -g ifeq (@ENABLE_GCOV@, 1) CFLAGS += -fprofile-arcs -ftest-coverage LDFLAGS += -fprofile-arcs endif endif # CFLAGS += -m64 -Wpadded # LDFLAGS += -m64 C_FILES := $(wildcard *.c) H_FILES := $(wildcard *.h) D_FILES := $(C_FILES:%.c=.%.d) DEST := fsvs ################################ Targets ################################### ifeq (@CHROOTER_JAIL@, ) all: deps tags check-version check-dox $(DEST) lsDEST else all: tools/fsvs-chrooter endif check-version: config.h fsvs.c @dev/check-version-output.pl $^ check-dox: options.c dox/options.dox @dev/check-option-docs.pl $^ tags: $(C_FILES) $(wildcard *.h) @echo " $@" @-ctags $^ @echo ":au BufNewFile,BufRead *.c syntax keyword Constant" $(shell grep -v "^!" < $@ | cut -f1 | grep _) > .vimrc .IGNORE: tags clean: rm -f *.o *.s $(D_FILES) $(DEST) 2> /dev/null || true lsDEST: $(DEST) @ls -la $< version: @echo $(VERSION) version-nnl: @perl -e '$$_=shift; s/\s+$$//; print;' $(VERSION) .SILENT: version.nnl version .PHONY: version-nnl version ################################ Distribution ############################### bindir = @bindir@ exec_prefix= @exec_prefix@ prefix = @prefix@ mandir = @mandir@ install: mkdir -p /etc/fsvs /var/spool/fsvs $(bindir) /etc/fsvs/svn/auth/svn.{simple,ssl.server,ssl.client-passphrase} install -m 0755 $(DEST) $(DESTDIR)/$(bindir) # install -m 0644 ../doc/fsvs.1 $(DESTDIR)/(mandir) # No automatic rebuild (?) #../doc/USAGE: $(C_FILES) $(H_FILES) #.PHONY: ../doc/USAGE DOXDIR=../../doxygen/html/ MANDIR=../../doxygen/man/man1/ MANDEST=../doc/ DOXFLAG=../../doxygen/html/index.html $(DOXFLAG): ( cat doxygen-data/Doxyfile-man ; echo PROJECT_NUMBER=$(VERSION)) | doxygen - ( cat doxygen-data/Doxyfile ; echo PROJECT_NUMBER=$(VERSION)) | doxygen - # Change the /§* to the correct /* cd $(DOXDIR) && perl -i.bak -pe '1 while s#([/*])\xc2?\xa7([/*])#\1\2#;' *.html cd $(MANDIR) && perl -i.bak -pe '1 while s#([/*])\xc2?\xa7([/*])#\1\2#;' *.? rm $(DOXDIR)/*.bak $(DOXDIR)/html-doc.zip || true cd $(DOXDIR)/.. && zip -rq9 html-doc.zip html -x 'html/.svn/*' && tar -cf html-doc.tar --exclude .svn html && bzip2 -vkf9 html-doc.tar && gzip -vf9 html-doc.tar $(DOXDIR)/group__cmds.html: $(DOXFLAG) touch $@ $(DOXDIR)/group__ignpat.html: $(DOXFLAG) touch $@ # Fix for badly generated man page (Doxygen) # Some other idea? Is there some other workaround? $(MANDEST)/fsvs.1: $(MANDIR)/cmds.1 tools/man-repair.pl $@ "FSVS - fast versioning tool" < $< $(MANDEST)/fsvs-howto-backup.5: $(MANDIR)/howto_backup.1 tools/man-repair.pl $@ "FSVS - Backup HOWTO" < $< $(MANDEST)/fsvs-howto-master_local.5: $(MANDIR)/howto_master_local.1 tools/man-repair.pl $@ "FSVS - Master/Local HOWTO" < $< $(MANDEST)/fsvs-options.5: $(MANDIR)/options.1 tools/man-repair.pl $@ "FSVS - Options and configfile" < $< $(MANDEST)/fsvs-url-format.5: $(MANDIR)/url_format.1 tools/man-repair.pl $@ "FSVS - URL format" < $< $(MANDEST)/fsvs-groups.5: $(MANDIR)/groups_spec.1 tools/man-repair.pl $@ "FSVS - Group definitions" < $< $(MANDEST)/fsvs-ignore-patterns.5: $(MANDIR)/ignpat.1 tools/man-repair.pl $@ "FSVS - Ignore definitions" < $< ../doc/USAGE: $(DOXDIR)/group__cmds.html dev/dox2txt.pl $< > $@ ../doc/IGNORING: $(DOXDIR)/group__ignpat.html dev/dox2txt.pl $< > $@ doc.g-c: ../doc/USAGE # Generate static text strings ( cat $< ; echo "end" ) | dev/make_doc.pl > $@ docs: $(DOXFLAG) ../doc/USAGE ../doc/IGNORING doc.g-c docs: $(MANDEST)/fsvs.1 $(MANDEST)/fsvs-options.5 docs: $(MANDEST)/fsvs-url-format.5 $(MANDEST)/fsvs-groups.5 docs: $(MANDEST)/fsvs-howto-backup.5 $(MANDEST)/fsvs-howto-master_local.5 .PHONY: docs $(DOXFLAG) ################################ Rules ###################################### %.o: %.c @echo " CC $<" @$(CC) $(CFLAGS) -c -o $@ $< # if the Makefile has changed, the output will (at least sometimes) # change, too. $(DEST): $(C_FILES:%.c=%.o) @echo " Link $@" @$(CC) $(FSVS_LDFLAGS) $(LDLIBS) $(LIBS) $(EXTRALIBS) -o $@ $^ ifeq (@ENABLE_RELEASE@, 1) -strip $@ endif # For debugging: generate preprocessed, generate assembler %.s: %.c $(CC) $(CFLAGS) -S -fverbose-asm -o $@ $< || true %.P : %.c $(CC) $(CFLAGS) -E -o $@ $< ############################### Dependencies ################################ deps: $(D_FILES) .%.d: %.c @echo " deps for $<" @$(CC) $(INCS) -MM $< | perl -pe 's#\bdoc.g-c\b##' > $@ # $(CC) $(INCS) -MM -MF $@ $< include $(D_FILES) tools/fsvs-chrooter: tools/fsvs-chrooter.c tools/fsvs-chrooter: interface.h config.h ############################### GCov Usage ################################ ifeq (@ENABLE_GCOV@, 1) GCOV_FILES := $(C_FILES:%.c=%.c.gcov) GCOV_SMRY_FILES := $(GCOV_FILES:%.gcov=%.gcov.smry) GCOV_DATA := $(C_FILES:%.c=%.gcda) $(C_FILES:%.c=%.gcno) gcov: $(GCOV_FILES) @dev/gcov-summary.pl $(GCOV_SMRY_FILES) %.c.gcov: %.c @gcov -f $< > $<.gcov.smry # -b -c gcov-clean: rm -f *.gcov *.gcov.smry *.gcda 2> /dev/null || true gcov-unused-funcs: grep -B1 ":0.00%" *.gcov.smry .PHONY: gcov gcov-clean endif ################################ Statistics ################################# diffstat: svk diff | diffstat count: @echo "sum of lines: "`cat $(C_FILES) $(H_FILES) | wc -l -` @echo "sum w/o comments, {, }, empty lines: "`perl -e 'undef $$/; while (<>) { 1 while s#//.*##; 1 while s#/\\*[\\x00-\\xff]*?\\*/##; 1 while s#\s*[{}]\s*##; $$c++ while s#[\r\n]+# #; }; sub END { print $$c,"\n" } ' $(C_FILES) $(H_FILES)` revcount: count @last_rev=$(shell svk info | grep Revision | cut -d" " -f2) ; echo "number of edits up to revision $$last_rev:" ; for r in `seq 2 $$last_rev` ; do svk diff -r`expr $$r - 1`:$$r /svn2/trunk ; done | perl -pe 's#\ssrc/# #g;' | diffstat structs: $(DEST) @for a in `perl -ne 'print $$1,"\n" if m#^\s*struct\s+(\w+)\s+{\s*$$#' $(C_FILES) $(H_FILES)` ; do printf "%-30s " "struct $$a" ; gdb --batch -ex "printf \"\t%6d\", sizeof(struct $$a)" $(DEST) | cut -f2 -d= ; done 2>&1 | sort -k3 -n .PHONY: revcount count diffstat ################################ Testing #################################### run-tests: $(DEST) WAA_CHARS=$(WAA_CHARS) $(MAKE) -C ../tests BINARY=$(shell pwd)/$(DEST) $(TESTS) ifeq (@ENABLE_GCOV@, 1) # I don't know why, but gcov wants to open the .gcda and .gcno # files Read-Write. I filed a bug report for this. # If the tests are run as root (which is currently necessary because # of the devices and device-tests), the normal user who compiled # the sources will not be allowed to open this files ... # # Not all files have code .. and so not all files (of the generated list) # will exist; therefore "true". -@chmod 777 $(GCOV_DATA) > /dev/null 2>&1 endif ext-tests: $(DEST) dev/permutate-all-tests .PHONY: run-tests ext-tests ################################ -- THE END -- ############################## ## vi: ts=8 sw=8 fsvs-1.2.6/src/options.h0000644000202400020240000002314411556526402014114 0ustar marekmarek/************************************************************************ * Copyright (C) 2007-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __SETTINGS_H__ #define __SETTINGS_H__ #include "global.h" #include "actions.h" /** \file * Functions dealing with user settings. */ /** \name A list of supported settings/options. * @{ */ enum opt__settings_e { /* Output options */ /** Option how paths should be shown. See also \ref opt_paths_t and \ref * o_opt_path. */ OPT__PATH=0, /** The maximum number of revisions on \c log output. * See \ref o_logmax. */ OPT__LOG_MAXREV, /** The option bits for log output. * See \ref o_logoutput. */ OPT__LOG_OUTPUT, /** Whether to pipe to colordiff. * Currently yes/no/auto; possibly path/"auto"/"no"? * See \ref o_colordiff. */ OPT__COLORDIFF, /** Should directory listings be sorted? * See \ref o_dir_sort. */ OPT__DIR_SORT, /** Should the status output be colored? * See \ref o_colordiff*/ OPT__STATUS_COLOR, /** Stop on change. * See \ref o_stop_change*/ OPT__STOP_ON_CHANGE, /** Exclude directories where only the mtime metadata changed, see \ref o_dir_exclude_mtime. */ OPT__DIR_EXCLUDE_MTIME, /** The filter mask as given with \ref o_filter "-f". */ OPT__FILTER, /** Whichs change checks to perform, \ref o_chcheck. */ OPT__CHANGECHECK, /** Whether all removed entries should be printed as removed, or only the * base directory. */ OPT__ALL_REMOVED, /** Verbosity flags, see \ref o_verbose. */ OPT__VERBOSE, /** Path for debug output. * See \ref glob_opt_deb. */ OPT__DEBUG_OUTPUT, /** Size of debug buffer. * See \ref glob_opt_deb. */ OPT__DEBUG_BUFFER, /** Show grouping statistics. * See \ref o_group_stats. */ OPT__GROUP_STATS, /* merge/diff options */ /** How conflicts on update should be handled. * See \ref o_conflict. */ OPT__CONFLICT, /** Default options for the merge program. * See \ref o_merge. */ OPT__MERGE_OPT, /** Name of the merge binary to be used. * See \ref o_merge. */ OPT__MERGE_PRG, /** Which program should be called. * See \ref o_diff. */ OPT__DIFF_PRG, /** Default options for the diff program. * See \ref o_diff. */ OPT__DIFF_OPT, /** Extra options for the diff program. * See \ref o_diff. */ OPT__DIFF_EXTRA, /** Set warning levels. * See \ref o_warnings */ OPT__WARNINGS, /** WAA root directory; per definition no PATH_SEPARATOR at the end. * See \ref o_softroot. */ OPT__SOFTROOT, /** Which URL to commit to. * See \ref o_commit_to. */ OPT__COMMIT_TO, /** Whether an empty commit message is allowed. * See \ref o_empty_msg.*/ OPT__EMPTY_MESSAGE, /** Whether the base directory in the repos should be created, if it does * not already. * See \ref o_mkdir_base. */ OPT__MKDIR_BASE, /** The author for commit. * See \ref o_author. */ OPT__AUTHOR, /** Whether commits without changes should be done. * See \ref o_empty_commit. */ OPT__EMPTY_COMMIT, /** Should commit wait for the next full second? * If shells would export \c $- we could do an \c auto value as well. * See \ref o_delay. */ OPT__DELAY, /** Do expensive copyfrom checks? * See \ref o_copyfrom_exp */ OPT__COPYFROM_EXP, /** Set a global password, for anonymous co/ci. * See \ref o_passwd. */ OPT__PASSWD, /** The base path of the WAA. * See \ref o_waa. */ OPT__WAA_PATH, /** The base path of the configuration area. * See \ref o_conf. */ OPT__CONF_PATH, /** The config directory to use. * See \ref o_configdir. */ OPT__CONFIG_DIR, /** End of enum marker. */ OPT__COUNT }; /** @} */ /** \name List of priority levels for settings loading. * @{ */ enum opt__prio_e { /** Default value in program. */ PRIO_DEFAULT=0, /** Value from \c /etc/fsvs/config, or at least from \c $FSVS_CONF/config. * */ PRIO_ETC_FILE, /** Value read from ~/.fsvs/config. */ PRIO_USER_FILE, /** Value read from \c $FSVS_CONF/$wc_dir/Config. */ PRIO_ETC_WC, /** Value read from \c ~/$wc_dir/Config. */ PRIO_USER_WC, /** Value read from environment variable. */ PRIO_ENV, /** Value assumed from external state, but overrideable. * Example: colors for log output; should not be printed when redirected * into a file, except if explicitly told so on the command line. */ PRIO_PRE_CMDLINE, /** Value given on commandline. */ PRIO_CMDLINE, /** Internal requirement. */ PRIO_MUSTHAVE, }; /* Pre-declaration. */ struct opt__list_t; /** An option string parsing function. */ typedef int (opt___parse_t)(struct opt__list_t *, char *, enum opt__prio_e); /** An option entry. * */ struct opt__list_t { /** Name of the option. */ char name[24]; /** Function to convert the string into a value. */ opt___parse_t *parse; /** Arbitrary parameter for the function. */ const void *parm; /** Result, if it's a string. */ const char *cp_val; /** Result, if it's an int. For a string its length is stored. */ int i_val; /** At which priority it has been written yet. */ enum opt__prio_e prio; }; /** The list of all options. * Must be accessible. */ extern struct opt__list_t opt__list[OPT__COUNT]; /** Read the integer value of an option. */ static inline __attribute__((const)) FASTCALL int opt__get_int(enum opt__settings_e which) { return opt__list[which].i_val; } /** Read the string value of an option. */ static inline __attribute__((const)) FASTCALL const char* opt__get_string(enum opt__settings_e which) { return opt__list[which].cp_val; } /** Get the priority for an option. */ static inline __attribute__((const)) FASTCALL enum opt__prio_e opt__get_prio(enum opt__settings_e which) { return opt__list[which].prio; } /** Set the integer value of an option. */ static inline FASTCALL void opt__set_int(enum opt__settings_e which, enum opt__prio_e prio, int val) { if (opt__list[which].prio <= prio) { opt__list[which].i_val=val; opt__list[which].prio=prio; } } /** Set the string value of an option. * Will not get modified, unless a reader changes data. * */ static inline FASTCALL void opt__set_string(enum opt__settings_e which, enum opt__prio_e prio, char *stg) { if (opt__list[which].prio <= prio) { opt__list[which].cp_val=stg; opt__list[which].prio=prio; } } /** Parse the string for the option. */ int opt__parse_option(enum opt__settings_e which, enum opt__prio_e prio, char *string); /** Find the option, and parse the string. */ int opt__parse(char *key, char *value, enum opt__prio_e prio, int quiet_errors); /** Load options from the environment. */ int opt__load_env(char **env); /** Maximum length of a line in a settings file. */ #define OPT__MAX_LINE_LEN (512) /** Load options from a file. * Will use hlp__vpathcopy(), with parameters swapped (\a prio first). */ int opt__load_settings(char *path, char *name, enum opt__prio_e prio); /** Returns \c 0 if the \a string is an \b off value (like \c off, \c * false, or \c no). */ int opt__doesnt_say_off(const char *string); /** Return the variable name from an option. */ char *opt__variable_from_option(enum opt__settings_e which); /** \name Specific data for single options. * @{ */ /** \name Printing of paths. * See \ref o_opt_path. * @{ */ /** Path-printing enumeration */ enum opt_paths_t { /** \ref pd_wcroot */ PATH_WCRELATIVE=0, /** \ref pd_parm */ PATH_PARMRELATIVE, /** \ref pd_absolute*/ PATH_ABSOLUTE, /** \ref pd_env */ PATH_CACHEDENVIRON, /** \ref pd_env */ PATH_FULLENVIRON, }; /** @} */ /** \name List of constants for \ref o_delay option. * @{ */ enum opt__delay_e { DELAY_CHECKOUT = 1 << 0, DELAY_COMMIT = 1 << 1, DELAY_UPDATE = 1 << 2, DELAY_REVERT = 1 << 3, }; /** @} */ /** \name List of constants for \ref o_chcheck option. * @{ */ enum opt__chcheck_e { CHCHECK_NONE = 0, CHCHECK_FILE = 1 << 0, CHCHECK_DIRS = 1 << 1, CHCHECK_ALLFILES = 1 << 2, }; /** @} */ /** \name List of constants for \ref o_verbose option. * They are ordered for use by "-v"; the later don't matter that much, * though. * @{ */ enum opt__verbosity_e { VERBOSITY_VERYQUIET = 0, VERBOSITY_QUIET = 1 << 0, VERBOSITY_SHOWCHG = 1 << 1, VERBOSITY_SHOWSIZE = 1 << 2, VERBOSITY_SHOWNAME = 1 << 3, VERBOSITY_DEFAULT = VERBOSITY_SHOWCHG | VERBOSITY_SHOWSIZE | VERBOSITY_SHOWNAME, /* We cannot easily include helper.h for * hlp__rightmost_0_bit(VERBOSITY_DEFAULT) * so we have to cheat it - which works, as we know the bitmasks set on * this value. */ VERBOSITY_DEFAULT_v = VERBOSITY_DEFAULT | (VERBOSITY_DEFAULT << 1), VERBOSITY_SHOWTIME = 1 << 4, VERBOSITY_COPYFROM = 1 << 5, VERBOSITY_TOP_URL = 1 << 6, VERBOSITY_GROUP = 1 << 7, VERBOSITY_ALL_URLS = 1 << 8, VERBOSITY_STACKTRACE= 1 << 16, }; /** @} */ static inline unsigned opt__verbosity() { /* Don't compare signed, because with "all" the numbers appear negative. * */ return (unsigned)opt__get_int(OPT__VERBOSE); } /** Greater than zero if additional details are wanted, or negative for * quiet operation. */ static inline int opt__is_verbose(void) { if (opt__verbosity() > VERBOSITY_DEFAULT) return +1; if (opt__verbosity() < VERBOSITY_DEFAULT) return -1; return 0; } /** \name List of constants for \ref o_conflict option. * @{ */ enum opt__conflict_e { CONFLICT_STOP=0, CONFLICT_LOCAL, CONFLICT_REMOTE, CONFLICT_BOTH, CONFLICT_MERGE, }; /** @} */ /** Filter value to print \b all entries. */ #define FILTER__ALL (-1) /** Generic yes/no/auto config values. * @{ */ #define OPT__YES (1) #define OPT__NO (0) /** @} */ /** @} */ /** For showing/changing options. */ work_t opt__work; #endif fsvs-1.2.6/src/actions.c0000644000202400020240000000525511142237705014053 0ustar marekmarek/************************************************************************ * Copyright (C) 2005-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include #include "actions.h" #include "est_ops.h" #include "global.h" #include "checksum.h" #include "options.h" #include "waa.h" /** \file * Common functions for action (name) handling. */ /** This wrapper-callback for the current action callback calculates the * path and fills in the \c entry_type for the current \a sts, if * necessary. */ int ac__dispatch(struct estat *sts) { int status; status=0; if (!action->local_callback) goto ex; /* We cannot really test the type here; on update we might only know that * it's a special file, but not which type exactly. */ #if 0 BUG_ON(!( S_ISDIR(sts->st.mode) || S_ISREG(sts->st.mode) || S_ISCHR(sts->st.mode) || S_ISBLK(sts->st.mode) || S_ISLNK(sts->st.mode) ), "%s has mode 0%o", sts->name, sts->st.mode); #endif if (ops__allowed_by_filter(sts) || (sts->entry_status & FS_CHILD_CHANGED)) { /* If * - we want to see all entries, * - there's no parent that could be removed ("." is always there), or * - the parent still exists, * we print the entry. */ if (opt__get_int(OPT__ALL_REMOVED) || !sts->parent || (sts->parent->entry_status & FS_REPLACED)!=FS_REMOVED) STOPIF( action->local_callback(sts), NULL); } else DEBUGP("%s is not the entry you're looking for", sts->name); ex: return status; } /** Given a string \a cmd, return the corresponding action entry. * Used by commandline parsing - finding the current action, and * which help text to show. */ int act__find_action_by_name(const char *cmd, struct actionlist_t **action_p) { int i, status; struct actionlist_t *action_v; int match_nr; char const* const* cp; size_t len; status=0; len=strlen(cmd); match_nr=0; action_v=action_list; for (i=action_list_count-1; i >=0; i--) { cp=action_list[i].name; while (*cp) { if (strncmp(*cp, cmd, len) == 0) { action_v=action_list+i; /* If it's am exact match, we're done. * Needed for "co" (checkout) vs. "commit". */ if (len == strlen(*cp)) goto done; match_nr++; break; } cp++; } } STOPIF_CODE_ERR( match_nr <1, ENOENT, "!Action \"%s\" not found. Try \"help\".", cmd); STOPIF_CODE_ERR( match_nr >=2, EINVAL, "!Action \"%s\" is ambiguous. Try \"help\".", cmd); done: *action_p=action_v; ex: return status; } fsvs-1.2.6/src/build.c0000644000202400020240000000717211264677022013517 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #include #include "global.h" #include "waa.h" #include "helper.h" #include "options.h" #include "url.h" #include "build.h" /** \file * \ref _build_new_list action file. * * */ /** \addtogroup cmds * * \section _build_new_list _build_new_list * * This is used mainly for debugging. * It traverses the filesystem and builds a new entries file. * In production it should not be used; as neither URLs nor the revision of * the entries is known, information is lost by calling this function! * * Look at \ref sync-repos. */ /** Traverse the filesystem, build a tree, and store it as WC. * Doesn't do anything with the repository. */ int bld__work(struct estat *root, int argc, char *argv[]) { int status; STOPIF( waa__find_base(root, &argc, &argv), NULL); STOPIF( url__load_list(NULL, 0), NULL); /* If there are any URLs, we use the lowest-priority. * Any sync-repos will correct that. */ current_url=urllist[urllist_count-1]; root->do_userselected = 1; opt_recursive=1; STOPIF( waa__build_tree(root), NULL); DEBUGP("build tree, now saving"); STOPIF( waa__output_tree(root), NULL); ex: return status; } /** \addtogroup cmds * \section delay * * This command delays execution until time has passed at least to the next * second after writing the data files used by FSVS (\ref dir "dir" and * \ref urls "urls"). * * This command is for use in scripts; where previously the \ref delay * "delay" option was used, this can be substituted by the given command * followed by the \c delay command. * * The advantage against the \ref o_delay "delay" option is that read-only * commands can be used in the meantime. * * An example: * \code * fsvs commit /etc/X11 -m "Backup of X11" * ... read-only commands, like "status" * fsvs delay /etc/X11 * ... read-write commands, like "commit" * \endcode * * The optional path can point to any path in the WC. * * In the testing framework it is used to save a bit of time; in normal * operation, where FSVS commands are not so tightly packed, it is normally * preferable to use the \ref o_delay "delay" option. * */ /** Waits until the \c dir and \c Urls files have been modified in the * past, ie their timestamp is lower than the current time (rounded to * seconds.) */ int delay__work(struct estat *root, int argc, char *argv[]) { int status; int i; time_t last; struct sstat_t st; char *filename, *eos; char *list[]= { WAA__DIR_EXT, WAA__URLLIST_EXT }; STOPIF( waa__find_base(root, &argc, &argv), NULL); if (opt__is_verbose() > 0) printf("Waiting on WC root \"%s\"\n", wc_path); last=0; for(i=0; i last) last=st.mtim.tv_sec; } } DEBUGP("waiting until %llu", (t_ull)last); opt__set_int(OPT__DELAY, PRIO_MUSTHAVE, -1); STOPIF( hlp__delay(last, 1), NULL); ex: return status; } fsvs-1.2.6/src/add_unvers.h0000644000202400020240000000136711202211121014527 0ustar marekmarek/************************************************************************ * Copyright (C) 2006-2009 Philipp Marek. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. ************************************************************************/ #ifndef __ADD_UNVERS_H__ #define __ADD_UNVERS_H__ #include "actions.h" /** \file * \ref add and \ref unversion action header file. * */ /** For adding/unversioning files. */ work_t au__work; /** Worker function. */ action_t au__action; /** In case we need to handle new entries we might have to assign an URL to * them. */ int au__prepare_for_added(void); #endif fsvs-1.2.6/configure.in0000644000202400020240000002332712043532166013771 0ustar marekmarek# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ(2.60) AC_INIT(fsvs, [esyscmd(make --quiet --no-print-directory -f Makefile.in version-nnl 2>/dev/null)], http://fsvs.tigris.org/) AC_GNU_SOURCE # if [[ "x$cache_file" == /dev/null ]] # then # cache_file=config.cache # fi # AC_CACHE_LOAD AC_CONFIG_SRCDIR([src/actions.c]) AC_CONFIG_HEADERS([src/config.h]) AC_MSG_NOTICE([*** Now configuring FSVS ]AC_PACKAGE_VERSION[ ***]) # Checks for programs. AC_PROG_CC AC_PROG_CPP ##################################### Header files INCDIRS="/usr/local/include /usr/include /openpkg/include " # The subversion headers do a #include , so the APR libraries # *have* to be directly specified. # Furthermore there's apr-1/ as directory name, depending on apr version. # Is there something like this available for subversion? AC_ARG_WITH(aprinc, AC_HELP_STRING([--with-aprinc=PATH], [Specify an include directory for the APR headers.]), [ INCDIRS="$INCDIRS $withval" ], [ if APR=`apr-1-config --includedir || apr-config --includedir` then INCDIRS="$INCDIRS $APR" fi ]) AC_ARG_WITH(svninc, AC_HELP_STRING([--with-svninc=PATH], [Specify an include directory for the subversion headers.]), [ INCDIRS="$INCDIRS $withval" ]) AC_ARG_WITH(svninc, AC_HELP_STRING([--with-svninc=PATH], [Specify an include directory for the subversion headers.]), [ INCDIRS="$INCDIRS $withval" ]) AC_ARG_WITH(waa_md5, AC_HELP_STRING([--with-waa_md5=NUMBER], [Specifies how many hex characters of the MD5 of the working copy root should be used to address the data in the WAA. This may be increased if you have a lot of different working copies on a single machine. The default is 0; useful values are 0, and from 6 to 32.]), [ # The shell gives an error on numeric comparision with a non-numeric # value. # We allow from 3 characters on, although it might not make much # sense. WAA_WC_MD5_CHARS=`perl -e '$_=0+shift; print $_+0 if $_==0 || ($_>3 && $_<=16)' "$withval"` if [[ "$WAA_WC_MD5_CHARS" = "" ]] then AC_MSG_ERROR([[The given value for --with-waa_md5 is invalid.]]) fi ], [ WAA_WC_MD5_CHARS=0 ]) AC_DEFINE_UNQUOTED(WAA_WC_MD5_CHARS, $WAA_WC_MD5_CHARS, [Number of bytes for WAA addressing is $WAA_WC_MD5_CHARS.]) AC_SUBST(WAA_WC_MD5_CHARS) CFLAGS="$CFLAGS -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64" for dir in $INCDIRS do # using -I would result in the files being _non_ system include # directories, ie. they'd clutter the dependency files. # That's why -idirafter is used. CFLAGS="$CFLAGS -idirafter $dir" done AC_DEFINE_UNQUOTED(CFLAGS, [$CFLAGS]) AC_SUBST(CFLAGS) AC_MSG_NOTICE(["CFLAGS=$CFLAGS"]) ##################################### Linker LIBDIRS="/usr/local/lib /openpkg/lib" AC_ARG_WITH(aprlib, AC_HELP_STRING([--with-aprlib=PATH], [Specify a directory containing APR libraries.]), [ LIBDIRS="$LIBDIRS $withval" ]) AC_ARG_WITH(svnlib, AC_HELP_STRING([--with-svnlib=PATH], [Specify a directory containing subversion libraries.]), [ LIBDIRS="$LIBDIRS $withval" ]) for dir in $LIBDIRS do LDFLAGS="$LDFLAGS -L$dir" done AC_DEFINE_UNQUOTED(LDFLAGS, [$LDFLAGS]) AC_SUBST(LDFLAGS) AC_MSG_NOTICE(["LDFLAGS=$LDFLAGS"]) EXTRALIBS="-laprutil-1 -lapr-1" if [[ `uname -s` = "SunOS" ]] then # Solaris 10, thanks Peter. EXTRALIBS="-lsocket -lnsl $EXTRALIBS" fi if [[ `uname -s` = "Darwin" ]] then # OSX 10.6 - thanks, Florian. EXTRALIBS="-liconv $EXTRALIBS" have_fmemopen=no fi AC_DEFINE_UNQUOTED(EXTRALIBS, [$EXTRALIBS]) AC_SUBST(EXTRALIBS) ##################################### Checks # Checks for libraries. AC_CHECK_LIB([pcre], [pcre_compile], [], [AC_MSG_FAILURE([Sorry, can't find PCRE.])]) AC_CHECK_LIB([aprutil-1], [apr_md5_init], [], [AC_MSG_FAILURE([Sorry, can't find APR.])]) AC_CHECK_LIB([svn_delta-1], [svn_txdelta_apply], [], [AC_MSG_FAILURE([Sorry, can't find subversion.])]) AC_CHECK_LIB([svn_ra-1], [svn_ra_initialize], [], [AC_MSG_FAILURE([Sorry, can't find subversion.])]) AC_CHECK_LIB([gdbm], [gdbm_firstkey], [], [AC_MSG_FAILURE([Sorry, can't find gdbm.])]) # Checks for header files. AC_HEADER_STDC AC_CHECK_HEADERS([fcntl.h stddef.h stdlib.h string.h sys/time.h unistd.h pcre.h ], [], [AC_MSG_FAILURE([Needed header file not found.])]) #apr_file_io.h subversion-1/svn_md5.h]) AC_HEADER_DIRENT AC_CHECK_MEMBERS([struct stat.st_mtim]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [[ #include ]], [[ VALGRIND_MAKE_MEM_DEFINED(0, 2); ]] )], [have_valgrind=yes], [have_valgrind=no]) if test x$have_valgrind = xyes ; then AC_DEFINE(HAVE_VALGRIND, 1, compatible valgrind version found) else AC_MSG_NOTICE([No compatible valgrind version.]) fi # Check whether S_IFMT is dense, ie. a single block of binary ones. # If it isn't, the bitcount wouldn't tell the needed bits to represent the # data. # If S_IFMT is dense, the increment results in a single carry bit. # Checked via changing /usr/include/bits/stat.h. AC_RUN_IFELSE([AC_LANG_SOURCE([ #include "src/preproc.h" int main(int argc, char **args) { if (_BITCOUNT( (S_IFMT >> MODE_T_SHIFT_BITS) + 1) == 1) return 0; else return 1; } ])], [AC_MSG_NOTICE([S_IFMT is ok.])], [AC_MSG_FAILURE([You have a sparse S_IFMT. Please tell the dev@ mailing list.])]) AC_CHECK_HEADERS([linux/kdev_t.h]) AC_ARG_ENABLE(dev-fake, AC_HELP_STRING([--enable-dev-fake], [Include fake definitions for MAJOR(), MINOR() and MKDEV(). Needed if none found.]), [AC_DEFINE([ENABLE_DEV_FAKE]) ENABLE_DEV_FAKE=1], []) AC_SUBST(ENABLE_DEV_FAKE) AC_ARG_ENABLE(debug, AC_HELP_STRING([--enable-debug], [compile some extra debug checks in (valgrind, gdb) (default is no)]), [AC_DEFINE([ENABLE_DEBUG]) ENABLE_DEBUG=1], []) AC_SUBST(ENABLE_DEBUG) AC_ARG_ENABLE(gcov, AC_HELP_STRING([--enable-gcov], [whether to compile with instrumentation for gcov (default is no) (needs --enable-debug)]), [AC_DEFINE([ENABLE_GCOV]) ENABLE_GCOV=1], []) AC_DEFINE([ENABLE_GCOV]) AC_SUBST(ENABLE_GCOV) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [[ #include ]], [[ int i=O_DIRECTORY; ]] )], [have_o_directory=yes], [have_o_directory=no]) if test x$have_o_directory = xyes ; then AC_DEFINE(HAVE_O_DIRECTORY, 1, O_DIRECTORY found) fi AC_SUBST(HAVE_O_DIRECTORY) AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include ]], [[ char **environ; int main(void) { return environ == NULL; } ]] )], [need_environ_extern=no], [need_environ_extern=yes]) if test x$need_environ_extern = xyes ; then AC_DEFINE(NEED_ENVIRON_EXTERN, 1, "char **environ" needs "extern") fi AC_SUBST(NEED_ENVIRON_EXTERN) if test x$have_fmemopen = x then AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include ]], [[ int main(int argc, char *args[]) { return fmemopen(args[0], 2, args[1]) == NULL; } ]] )], [have_fmemopen=yes], [have_fmemopen=no]) fi if test x$have_fmemopen = xyes then AC_DEFINE(HAVE_FMEMOPEN, 1, [fmemopen() found]) else AC_MSG_WARN([fmemopen() not found. debug_buffer option not available.]) fi AC_SUBST(HAVE_FMEMOPEN) if locale -a > /dev/null 2>&1 then AC_DEFINE([HAVE_LOCALES],[1]) fi AC_SUBST(HAVE_LOCALES) AC_ARG_WITH(chroot, AC_HELP_STRING([--with-chroot=PATH], [Specify a chroot environment for the fsvs-chrooter helper.]), [ if test "$withval" = "yes" ; then AC_MSG_ERROR([--with-chroot requires an argument.]) else CHROOTER_JAIL=$withval AC_DEFINE_UNQUOTED(CHROOTER_JAIL, "$CHROOTER_JAIL", [The path of a chroot jail.]) fi ]) AC_SUBST(CHROOTER_JAIL) AC_ARG_ENABLE(release, AC_HELP_STRING([--enable-release], [whether to compile without debug messages. Makes image smaller (to about half size), but makes -d and -D inoperative. (Default is no)]), [AC_DEFINE([ENABLE_RELEASE]) ENABLE_RELEASE=1], []) AC_SUBST(ENABLE_RELEASE) if [[ "$ENABLE_RELEASE$ENABLE_DEBUG" = "11" ]] then AC_MSG_ERROR([[--enable-debug and --enable-release are incompatibel. Use one or the other.]]) fi AC_CHECK_FUNCS([getdents64]) AC_CHECK_HEADERS([linux/types.h]) AC_CHECK_HEADERS([linux/unistd.h]) AC_CHECK_TYPES([comparison_fn_t]) AC_SYS_LARGEFILE # Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_C_INLINE AC_CHECK_MEMBERS([struct stat.st_rdev]) AC_HEADER_TIME AC_STRUCT_TM AC_DEFINE([HAS_FASTCALL]) AC_SUBST(HAS_FASTCALL) # Only i386 (32bit) has fastcall. if [[ `uname -m` = i?86 ]] then HAS_FASTCALL=1 fi AC_TYPE_UINT32_T AC_SUBST(HAVE_UINT32_T) # See config.h for an explanation. if [[ "$ac_cv_c_uint32_t" = "yes" ]] then ac_cv_c_uint32_t=uint32_t fi AC_DEFINE_UNQUOTED(AC_CV_C_UINT32_T, [$ac_cv_c_uint32_t]) AC_TYPE_UINT64_T AC_SUBST(HAVE_UINT64_T) if [[ "$ac_cv_c_uint64_t" = "yes" ]] then ac_cv_c_uint64_t=uint64_t fi AC_DEFINE_UNQUOTED(AC_CV_C_UINT64_T, [$ac_cv_c_uint64_t]) # Checks for library functions. AC_FUNC_CHOWN AC_FUNC_FORK AC_FUNC_MALLOC AC_FUNC_MEMCMP AC_FUNC_MMAP AC_FUNC_REALLOC AC_TYPE_SIGNAL AC_FUNC_VPRINTF AC_CHECK_FUNCS([fchdir getcwd gettimeofday memmove memset mkdir munmap rmdir strchr strdup strerror strrchr strtoul strtoull alphasort dirfd lchown lutimes strsep]) # AC_CACHE_SAVE AC_CONFIG_FILES([src/Makefile tests/Makefile]) AC_OUTPUT # Cause a recompile touch src/config.h if [ [ "$ac_cv_header_linux_kdev_t_h" = "no" -a "x$ENABLE_DEV_FAKE" = "x" ] ] then AC_MSG_WARN([ * MAJOR(), MINOR() and MAKEDEV() definitions not found. * Fake a definition, but that could make problems for ignore patterns * and commits/updates of device nodes, so these will be disabled. * Please contact dev@fsvs.tigris.org for help, or, if you know your * systems' way, to report the correct header name. * * If you *really* need to use device compares, and have *no* other way, * you could try using the --enable-dev-fake option on ./configure.]) fi # vi: ts=3 sw=3 fsvs-1.2.6/doc/0000755000202400020240000000000012554717232012224 5ustar marekmarekfsvs-1.2.6/doc/develop/0000755000202400020240000000000012554717232013662 5ustar marekmarekfsvs-1.2.6/doc/develop/UTF80000644000202400020240000000216610437241125014327 0ustar marekmarek UTF8 in FSVS ------------ Some points which trouble me a bit, and some random thoughts; everything connected with UTF-8: - Properties we get from the repository might be easiest stored locally as UTF8, if we don't do anything with them (eg. svn:entry). - In which properties can be non-ASCII-characters? Does someone define user/group names in UTF-8? Can eg. xattr have Unicode characters in them? Does that happen in practical usage? - The currently used properties should be safe. I've never heard from non-ASCII groups or users, and the mtime should always be in the same format. - I thought whether I should just do *everything* in UTF-8. But that is a performance trade off; on a simple "fsvs status" we'd have to all filenames from the waa-directory. It may not be much work, but if it's not necessary ... - I'd like to have the subversion headers to define a utf8_char *, which would (with gcc) be handled distinct from a normal char * ... (see linux kernel, include/linux/types.h: #define __bitwise ...) But that won't happen, as there's already too much software which relies on the current definitions. fsvs-1.2.6/doc/fsvs-ssl-setup0000644000202400020240000001112011436764405015062 0ustar marekmarekRepository Access with SSL Client Certificate (passwordless) ============================================================ This small guide explains the creation of a svn repository, that is accessible via https and client certificate authentication. Using client certificate authentication you won't neither need to supply a password on access nor you have to worry to store your password on that machine. Prerequisites: The basic configuration for access of to a repository via http is explained in http://svnbook.red-bean.com/en/1.5/svn-book.html#svn.serverconfig.httpd The steps are: a) install webdav and svn support b) configure apache2 to point to the repository c) setup of basic authentication For https access the additional steps are neccessary: a) enable ssl module for the webserver b) install ssl certificate and authority c) for passwordless access install the host key (pkcs12) If the repository is open to public it is recommended to get a certificate / host key from from an external ca-authority. Otherwise self-signed keys can be used. Creating self-signed keys ========================= Creation of self-signed keys can be done with the openssl-toolkit. It contains a script CA.pl to perform ca/certificate creation. Within Ubuntu/Debian the script can be found in /usr/lib/ssl/misc. CA.pl has a few options: $ CA.pl -h usage: CA -newcert|-newreq|-newreq-nodes|-newca|-sign|-verify usage: CA -signcert certfile keyfile|-newcert|-newreq|-newca|-sign|-verify To create a new authority use $ CA.pl -newca First a key is created. Afterwards a few questions about locality and company information will be asked. The ca-certificate and index files for ca-management are stored in ./default of the current directory. Creating the certificate is done via $ CA.pl -newcert This creates a new certificate. Both ca-authority, certificate and key will be used on the server where the repository is installed. Additionally a host certificate is created for the individual hosts to access the repository. $ CA.pl -newcert For use with subversion/fsvs the key needs first be converted to pkcs12. $ openssl pkcs12 -in newcert.pem -export -out $(hostname).p12 Replace $(hostname) with the hostname of your server. Installation of SSL certificate for SVN repository ================================================== A certificate .pem file contains both, the x509 certificate and the key. Before installation of the .pem file the password of the key should be removed. Otherwise on bootup the server will prompt for the password which is not convenient in HA environments. Of course the password should be removed from the servers' ssl certificate, in trusted environments, only. This command removes the password from a pem file. $ openssl rsa -in newcert.pem -out server.pem On Debian/Ubuntu, the ca-authority and the certificate should be placed in the /etc/ssl folder. The authority file should be moved to /etc/ssl/certs. The certificate that contains the key should be moved to /etc/ssl/private. Folders are created with installation of the openssl package. Configuration of CA-Authority and Certificate ============================================= The SSL configuration part for the apache server: SSLKeyFile /etc/ssl/private/newkey.pem SSLCertificate /etc/ssl/private/newkey.pem SSLAuthorityFile /etc/ssl/certs/ca.crt SSLCipherSuite HIGH:MEDIUM SSLVerifyClient require SSLVerifyDepth 1 SSLRequireSSL # ... SVN related config Setup Authentication ==================== Authentication is not necessary because we relay on the Client Certificate. Only issue left, is that the name of users who perform checkins will not be shown in commit messages. For this way one can use anonymous authentication. First check if module is enabled $ a2enmod authn_anon Global configuration for an host with fsvs-client: /etc/fsvs/svn/servers: [groups] fsvs = fsvs.repository.host [fsvs] ssl-client-cert-file = /etc/ssl/private/myhost.p12 ssl-client-cert-password = mysecretpass [global] ssl-authority-files = /etc/ssl/default/cacert.pem store-plaintext-passwords=yes The global svn access configuration takes place by default in /etc/fsvs/svn/servers. This can be changed on compile time with DEFAULT_CONFIGDIR_SUB in interface.h The configuration for the authentication credentials is stored in ~/.subversion. If the folder does not exists it will be created. Be aware that the initial creation tooks place with root privileges so if another svn client, running with user-only privileges, needs write access back this access should be restored e.g. via: $ chown -R username: ~/subversion. fsvs-1.2.6/doc/fsvs-options.50000644000202400020240000007135111346140057014764 0ustar marekmarek.TH "FSVS - Options and configfile" 5 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME Further options for FSVS. \- .PP List of settings that modify FSVS' behaviour. List of settings that modify FSVS' behaviour. FSVS understands some options that modify its behaviour in various small ways. .SH "Overview" .PP .SS "This document" This document lists all available options in FSVS, in an \fBfull listing\fP and in \fBgroups\fP. .PP Furthermore you can see their \fBrelative priorities\fP and some \fBexamples\fP. .SS "Semantic groups" .PD 0 .IP "\(bu" 2 \fBOutput settings and entry filtering\fP .IP "\(bu" 2 \fBDiffing and merging on update\fP .IP "\(bu" 2 \fBOptions for commit\fP .IP "\(bu" 2 \fBPerformance and tuning related options\fP .IP "\(bu" 2 \fBBase configuration\fP .IP "\(bu" 2 \fBDebugging and diagnosing\fP .PP .SS "Sorted list of options" FSVS currently knows: .PD 0 .IP "\(bu" 2 \fCall_removed\fP - \fBTrimming the list of deleted entries\fP .IP "\(bu" 2 \fCauthor\fP - \fBAuthor\fP .IP "\(bu" 2 \fCchange_check\fP - \fBChange detection\fP .IP "\(bu" 2 \fCcolordiff\fP - \fBUsing colordiff\fP .IP "\(bu" 2 \fCcommit_to\fP - \fBDestination URL for commit\fP .IP "\(bu" 2 \fCconflict\fP - \fBHow to resolve conflicts on update\fP .IP "\(bu" 2 \fCconf\fP - \fBPath definitions for the config and WAA area\fP. .IP "\(bu" 2 \fCconfig_dir\fP - \fBConfiguration directory for the subversion libraries\fP. .IP "\(bu" 2 \fCcopyfrom_exp\fP - \fBAvoiding expensive compares on \fBcopyfrom-detect\fP\fP .IP "\(bu" 2 \fCdebug_output\fP - \fBDestination for debug output\fP .IP "\(bu" 2 \fCdebug_buffer\fP - \fBUsing a debug buffer\fP .IP "\(bu" 2 \fCdelay\fP - \fBWaiting for a time change after working copy operations\fP .IP "\(bu" 2 \fCdiff_prg\fP, \fCdiff_opt\fP, \fCdiff_extra\fP - \fBOptions relating to the 'diff' action\fP .IP "\(bu" 2 \fCdir_exclude_mtime\fP - \fBIgnore mtime-metadata changes for directories\fP .IP "\(bu" 2 \fCdir_sort\fP - \fBDirectory sorting\fP .IP "\(bu" 2 \fCempty_commit\fP - \fBDoing empty commits\fP .IP "\(bu" 2 \fCempty_message\fP - \fBAvoid commits without a commit message\fP .IP "\(bu" 2 \fCfilter\fP - \fBFiltering entries\fP, but see \fB-f\fP. .IP "\(bu" 2 \fCgroup_stats\fP - \fBGetting grouping/ignore statistics\fP. .IP "\(bu" 2 \fClimit\fP - \fB'fsvs log' revision limit\fP .IP "\(bu" 2 \fClog_output\fP - \fB'fsvs log' output format\fP .IP "\(bu" 2 \fCmerge_prg\fP, \fCmerge_opt\fP - \fBOptions regarding the 'merge' program\fP .IP "\(bu" 2 \fCmkdir_base\fP - \fBCreating directories in the repository above the URL\fP .IP "\(bu" 2 \fCpath\fP - \fBDisplaying paths\fP .IP "\(bu" 2 \fCsoftroot\fP - \fBUsing an alternate root directory\fP .IP "\(bu" 2 \fCstat_color\fP - \fBStatus output coloring\fP .IP "\(bu" 2 \fCstop_change\fP - \fBChecking for changes in a script\fP .IP "\(bu" 2 \fCverbose\fP - \fBVerbosity flags\fP .IP "\(bu" 2 \fCwarning\fP - \fBSetting warning behaviour\fP, but see \fB-W\fP. .IP "\(bu" 2 \fCwaa\fP - \fBwaa\fP. .PP .SS "Priorities for option setting" The priorities are .PD 0 .IP "\(bu" 2 Command line \fI\fP(highest) .IP "\(bu" 2 Environment variables. These are named as \fCFSVS_\fP\fI{upper-case option name}\fP. .IP "\(bu" 2 \fC$HOME/.fsvs/wc-dir/config\fP .IP "\(bu" 2 \fC$FSVS_CONF/wc-dir/config\fP .IP "\(bu" 2 \fC$HOME/.fsvs/config\fP .IP "\(bu" 2 \fC$FSVS_CONF/config\fP .IP "\(bu" 2 Default value, compiled in \fI\fP(lowest) .PP .PP \fBNote:\fP .RS 4 The \fC$HOME-dependent\fP configuration files are not implemented currently. Volunteers? .RE .PP Furthermore there are 'intelligent' run-time dependent settings, like turning off colour output when the output is redirected. Their priority is just below the command line - so they can always be overridden if necessary. .SS "Examples" Using the commandline: .PP .nf fsvs -o path=environment fsvs -opath=environment .fi .PP Using environment variables: .PP .nf FSVS_PATH=absolute fsvs st .fi .PP A configuration file, from \fC$FSVS_CONF/config\fP or in a WC-specific path below \fC$FSVS_CONF\fP: .PP .nf # FSVS configuration file path=wcroot .fi .PP .SH "Output settings and entry filtering" .PP .SS "Trimming the list of deleted entries" If you remove a directory, all entries below are implicitly known to be deleted, too. To make the \fBstatus\fP output shorter there's the \fCall_removed\fP option which, if set to \fCno\fP, will cause children of removed entries to be omitted. .PP Example for the config file: .PP .nf all_removed=no .fi .PP .SS "Ignore mtime-metadata changes for directories" When this option is enabled, directories where only the mtime changed are not reported on \fBstatus\fP anymore. .PP This is useful in situations where temporary files are created in directories, eg. by text editors. (Example: \fCVIM\fP swapfiles when no \fCdirectory\fP option is configured). .PP Example for the config file: .PP .nf dir_exclude_mtime=yes .fi .PP .SS "Directory sorting" If you'd like to have the output of \fBstatus\fP sorted, you can use the option \fCdir_sort=yes\fP. FSVS will do a run through the tree, to read the status of the entries, and then go through it again, but sorted by name. .PP \fBNote:\fP .RS 4 If FSVS aborts with an error during \fBstatus\fP output, you might want to turn this option off again, to see where FSVS stops; the easiest way is on the command line with \fC-odir_sort=no\fP. .RE .PP .SS "Filtering entries" Please see the command line parameter for \fB-f\fP, which is identical. .PP .PP .nf fsvs -o filter=mtime .fi .PP .SS "'fsvs log' revision limit" There are some defaults for the number of revisions that are shown on a \fC'fsvs log'\fP command: .PD 0 .IP "\(bu" 2 2 revisions given (\fC-rX:Y\fP): \fCabs\fP(X-Y)+1, ie. all revisions in that range. .IP "\(bu" 2 1 revision given: exactly that one. .IP "\(bu" 2 no revisions given: from \fCHEAD\fP to 1, with a maximum of 100. .PP .PP As this option can only be used to set an upper limit of revisions, it makes most sense for the no-revision-arguments case. .SS "'fsvs log' output format" You can modify aspects of the \fBfsvs log\fP output format by setting the \fClog_output\fP option to a combination of these flags: .PD 0 .IP "\(bu" 2 \fCcolor:\fP This uses color in the output, similar to \fCcg-log\fP (\fCcogito-log\fP); the header and separator lines are highlighted. .PP \fBNote:\fP .RS 4 This uses ANSI escape sequences, and tries to restore the default color; if you know how to do that better (and more compatible), please tell the developer mailing list. .RE .PP .IP "\(bu" 2 \fCindent:\fP Additionally you can shift the log message itself a space to the right, to make the borders clearer. .PP .PP Furthermore the value \fCnormal\fP is available; this turns off all special handling. .PP \fBNote:\fP .RS 4 If you start such an option, the value is reset; so if you specify \fClog_output=color\fP,indent in the global config file, and use \fClog_output=color\fP on the commandline, only colors are used. This is different to the \fBFiltering entries\fP option, which is cumulating. .RE .PP .SS "Displaying paths" You can specify how paths printed by FSVS should look like; this is used for the entry status output of the various actions, and for the diff header lines. .PP There are several possible settings, of which one can be chosen via the \fCpath\fP option. .PP .PD 0 .IP "\(bu" 2 \fCwcroot\fP .br This is the old, traditional FSVS setting, where all paths are printed relative to the working copy root. .PP .IP "\(bu" 2 \fCparameter\fP .br With this setting FSVS works like most other programs - it uses the first best-matching parameter given by the user, and appends the rest of the path. .br This is the new default. .PP \fBNote:\fP .RS 4 Internally FSVS still first parses all arguments, and then does a single run through the entries. So if some entry matches more than one parameter, it is printed using the first match. .RE .PP .IP "\(bu" 2 \fCabsolute\fP .br All paths are printed in absolute form. This is useful if you want to paste them into other consoles without worrying whether the current directory matches, or for using them in pipelines. .PP .PP The next two are nearly identical to \fCabsolute\fP, but the beginning of paths are substituted by environment variables. This makes sense if you want the advantage of full paths, but have some of them abbreviated. .PD 0 .IP "\(bu" 2 \fCenvironment\fP .br Match variables to directories after reading the known entries, and use this cached information. This is faster, but might miss the best case if new entries are found (which would not be checked against possible longer hits). .br Furthermore, as this works via associating environment variables to entries, the environment variables must at least match the working copy base - shorter paths won't be substituted. .IP "\(bu" 2 \fCfull-environment\fP .br Check for matches just before printing the path. .br This is slower, but finds the best fit. .PP \fBNote:\fP .RS 4 The string of the environment variables must match a directory name; the filename is always printed literally, and partial string matches are not allowed. Feedback wanted. .PP Only environment variables whose names start with \fCWC\fP are used for substitution, to avoid using variables like \fC$PWD\fP, \fC$OLDPWD\fP, \fC$HOME\fP and similar which might differ between sessions. Maybe the allowed prefixes for the environment variables should be settable in the configuration. Opinions to the users mailing list, please. .RE .PP .PP .PP Example, with \fC/\fP as working copy base: .PP .nf $ cd /etc $ fsvs -o path=wcroot st .mC. 1001 ./etc/X11/xorg.conf $ fsvs -o path=absolute st .mC. 1001 /etc/X11/xorg.conf $ fsvs -o path=parameters st .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st . .mC. 1001 ./X11/xorg.conf $ fsvs -o path=parameters st / .mC. 1001 /etc/X11/xorg.conf $ fsvs -o path=parameters st X11 .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st ../dev/.. .mC. 1001 ../dev/../etc/X11/xorg.conf $ fsvs -o path=parameters st X11 ../etc .mC. 1001 X11/xorg.conf $ fsvs -o path=parameters st ../etc X11 .mC. 1001 ../etc/X11/xorg.conf $ fsvs -o path=environ st .mC. 1001 ./etc/X11/xorg.conf $ WCBAR=/etc fsvs -o path=wcroot st .mC. 1001 $WCBAR/X11/xorg.conf $ WCBAR=/etc fsvs -o path=wcroot st / .mC. 1001 $WCBAR/X11/xorg.conf $ WCBAR=/e fsvs -o path=wcroot st .mC. 1001 /etc/X11/xorg.conf $ WCBAR=/etc WCFOO=/etc/X11 fsvs -o path=wcroot st .mC. 1001 $WCFOO/xorg.conf $ touch /etc/X11/xinit/xinitrc $ fsvs -o path=parameters st .mC. 1001 X11/xorg.conf .m.? 1001 X11/xinit/xinitrc $ fsvs -o path=parameters st X11 /etc/X11/xinit .mC. 1001 X11/xorg.conf .m.? 1001 /etc/X11/xinit/xinitrc .fi .PP .PP \fBNote:\fP .RS 4 At least for the command line options the strings can be abbreviated, as long as they're still identifiable. Please use the full strings in the configuration file, to avoid having problems in future versions when more options are available. .RE .PP .SS "Status output coloring" FSVS can colorize the output of the status lines; removed entries will be printed in red, new ones in green, and otherwise changed in blue. Unchanged (for \fC-v\fP) will be given in the default color. .PP For this you can set \fCstat_color=yes\fP; this is turned \fCoff\fP per default. .PP As with the other colorizing options this gets turned \fCoff\fP automatically if the output is not on a tty; on the command line you can override this, though. .SS "Checking for changes in a script" If you want to use FSVS in scripts, you might simply want to know whether anything was changed. .PP In this case use the \fCstop_on_change\fP option, possibly combined with \fBFiltering entries\fP; this gives you no output on \fCSTDOUT\fP, but an error code on the first change seen: .PP .nf fsvs -o stop_change=yes st /etc if fsvs status -o stop_change=yes -o filter=text /etc/init.d then echo No change found ... else echo Changes seen. fi .fi .PP .SS "Verbosity flags" If you want a bit more control about the data you're getting you can use some specific flags for the \fCverbose\fP options. .PP .PD 0 .IP "\(bu" 2 \fCnone\fP,veryquiet - reset the bitmask, don't display anything. .IP "\(bu" 2 \fCquiet\fP - only a few output lines. .IP "\(bu" 2 \fCchanges\fP - the characters showing what has changed for an entry. .IP "\(bu" 2 \fCsize\fP - the size for files, or the textual description (like \fC'dir'\fP). .IP "\(bu" 2 \fCpath\fP - the path of the file, formatted according to \fBthe path option\fP. .IP "\(bu" 2 \fCdefault\fP - The default value, ie. \fCchanges\fP, \fCsize\fP and \fCname\fP. .IP "\(bu" 2 \fCmeta\fP - One more than the default so it can be used via a single \fC'-v'\fP, it marks that the mtime and owner/group changes get reported as two characters.If \fC'-v'\fP is used to achieve that, even entries without changes are reported, unless overridden by \fBFiltering entries\fP. .IP "\(bu" 2 \fCurl\fP - Displays the entries' top priority URL .IP "\(bu" 2 \fCcopyfrom\fP - Displays the URL this entry has been copied from (see \fBcopy\fP). .IP "\(bu" 2 \fCgroup\fP - The group this entry belongs to, see \fBgroup\fP .IP "\(bu" 2 \fCurls\fP - Displays all known URLs of this entry .IP "\(bu" 2 \fCstacktrace\fP - Print the full stacktrace when reporting errors; useful for debugging. .IP "\(bu" 2 \fCall\fP - Sets all flags. Mostly useful for debugging. .PP .PP Please note that if you want to display \fBfewer\fP items than per default, you'll have to clear the bitmask first, like this: .PP .nf fsvs status -o verbose=none,changes,path .fi .PP .SH "Diffing and merging on update" .PP .SS "Options relating to the 'diff' action" The diff is not done internally in FSVS, but some other program is called, to get the highest flexibility. .PP There are several option values: .PD 0 .IP "\(bu" 2 \fCdiff_prg\fP: The executable name, default \fC'diff'\fP. .IP "\(bu" 2 \fCdiff_opt\fP: The default options, default \fC'-pu'\fP. .IP "\(bu" 2 \fCdiff_extra\fP: Extra options, no default. .PP .PP The call is done as .PP .nf $diff_prg $diff_opt $file1 --label '$label1' $file2 --label '$label2' $diff_extra .fi .PP .PP \fBNote:\fP .RS 4 In \fCdiff_opt\fP you should use only use command line flags without parameters; in \fCdiff_extra\fP you can encode a single flag with parameter (like \fC'-U5'\fP). If you need more flexibility, write a shell script and pass its name as \fCdiff_prg\fP. .RE .PP Advanced users might be interested in \fBexported environment variables\fP, too; with their help you can eg. start different \fCdiff\fP programs depending on the filename. .SS "Using colordiff" If you have \fCcolordiff\fP installed on your system, you might be interested in the \fCcolordiff\fP option. .PP It can take one of these values: .PD 0 .IP "\(bu" 2 \fCno\fP, \fCoff\fP or \fCfalse:\fP Don't use \fCcolordiff\fP. .IP "\(bu" 2 empty (default value): Try to use \fCcolordiff\fP as executable, but don't throw an error if it can't be started; just pipe the data as-is to \fCSTDOUT\fP. (\fIAuto\fP mode.) .IP "\(bu" 2 anything else: Pipe the output of the \fCdiff\fP program (see \fBOptions relating to the 'diff' action\fP) to the given executable. .PP .PP Please note that if \fCSTDOUT\fP is not a tty (eg. is redirected into a file), this option must be given on the command line to take effect. .SS "How to resolve conflicts on update" If you start an update, but one of the entries that was changed in the repository is changed locally too, you get a conflict. .PP There are some ways to resolve a conflict: .PD 0 .IP "\(bu" 2 \fClocal\fP - Just take the local entry, ignore the repository. .IP "\(bu" 2 \fCremote\fP - Overwrite any local change with the remote version. .PP .IP "\(bu" 2 \fCboth\fP - Keep the local modifications in the file renamed to \fC\fIfilename\fP.mine\fP, and save the repository version as \fC\fIfilename\fP.r\fIXXX\fP\fP, ie. put the revision number after the filename. .PP The conflict must be solved manually, and the solution made known to FSVS via the \fBresolve\fP command. .PP \fBNote:\fP .RS 4 As there's no known \fIgood\fP version after this renaming, a zero byte file gets created. .br Any \fBresolve\fP or \fBrevert\fP command would make that current, and the changes that are kept in \fC\fIfilename\fP.mine\fP would be lost! .br You should only \fBrevert\fP to the last repository version, ie. the data of \fC\fIfilename\fP.r\fIXXX\fP\fP. .RE .PP .IP "\(bu" 2 \fCmerge\fP - Call the program \fCmerge\fP with the common ancestor, the local and the remote version. .PP If it is a clean merge, no further work is necessary; else you'll get the (partly) merged file, and the two other versions just like with the \fCboth\fP variant, and (again) have to tell FSVS that the conflict is solved, by using the \fBresolve\fP command. .PP .PP \fBNote:\fP .RS 4 As in the subversion command line client \fCsvn\fP the auxiliary files are seen as new, although that might change in the future (so that they automatically get ignored). .RE .PP .SS "Options regarding the 'merge' program" Like with \fBdiff\fP, the \fCmerge\fP operation is not done internally in FSVS. .PP To have better control .PD 0 .IP "\(bu" 2 \fCmerge_prg\fP: The executable name, default \fC'merge'\fP. .IP "\(bu" 2 \fCmerge_opt\fP: The default options, default \fC'-A'\fP. .PP .PP The option \fC'-p'\fP is always used: .PP .nf $merge_prg $merge_opt -p $file1 $common $file2 .fi .PP .SH "Options for commit" .PP .SS "Author" You can specify an author to be used on commit. This option has a special behaviour; if the first character of the value is an \fC'$'\fP, the value is replaced by the environment variable named. .PP Empty strings are ignored; that allows an \fC/etc/fsvs/config\fP like this: .PP .nf author=unknown author=$LOGNAME author=$SUDO_USER .fi .PP where the last non-empty value is taken; and if your \fC\fP.authorized_keys has lines like .PP .nf environment='FSVS_AUTHOR=some_user' ssh-rsa ... .fi .PP that would override the config values. .PP \fBNote:\fP .RS 4 Your \fCsshd_config\fP needs the \fCPermitUserEnvironment\fP setting; you can also take a look at the \fCAcceptEnv\fP and \fCSendEnv\fP documentation. .RE .PP .SS "Destination URL for commit" If you defined multiple URLs for your working copy, FSVS needs to know which URL to commit to. .PP For this you would set \fCcommit_to\fP to the \fBname\fP of the URL; see this example: .PP .nf fsvs urls N:master,P:10,http://... N:local,P:20,file:///... fsvs ci /etc/passwd -m 'New user defined' -ocommit_to=local .fi .PP .SS "Doing empty commits" In the default settings FSVS will happily create empty commits, ie. revisions without any changed entry. These just have a revision number, an author and a timestamp; this is nice if FSVS is run via CRON, and you want to see when FSVS gets run. .PP If you would like to avoid such revisions, set this option to \fCno\fP; then such commits will be avoided. .PP Example: .PP .nf fsvs commit -o empty_commit=no -m 'cron' /etc .fi .PP .SS "Avoid commits without a commit message" If you don't like the behaviour that FSVS does commits with an empty message received from \fC$EDITOR\fP (eg if you found out that you don't want to commit after all), you can change this option to \fCno\fP; then FSVS won't allow empty commit messages. .PP Example for the config file: .PP .nf empty_message=no .fi .PP .SS "Creating directories in the repository above the URL" If you want to keep some data versioned, the first commit is normally the creation of the base directories \fBabove\fP the given URL (to keep that data separate from the other repository data). .PP Previously this had to be done manually, ie. with a \fCsvn mkdir $URL --parents\fP or similar command. .br With the \fCmkdir_base\fP option you can tell FSVS to create directories as needed; this is mostly useful on the first commit. .PP .PP .nf fsvs urls ... fsvs group 'group:ignore,./**' fsvs ci -m 'First post!' -o mkdir_base=yes .fi .PP .SS "Waiting for a time change after working copy operations" If you're using FSVS in automated systems, you might see that changes that happen in the same second as a commit are not seen with \fBstatus\fP later; this is because the timestamp granularity of FSVS is 1 second. .PP For backward compatibility the default value is \fCno\fP (don't delay). You can set it to any combination of .PD 0 .IP "\(bu" 2 \fCcommit\fP, .IP "\(bu" 2 \fCupdate\fP, .IP "\(bu" 2 \fCrevert\fP and/or .IP "\(bu" 2 \fCcheckout\fP; .PP for \fCyes\fP all of these actions are delayed until the clock seconds change. .PP Example how to set that option via an environment variable: .PP .nf export FSVS_DELAY=commit,revert .fi .PP .SH "Performance and tuning related options" .PP .SS "Change detection" This options allows to specify the trade-off between speed and accuracy. .PP A file with a changed size can immediately be known as changed; but if only the modification time is changed, this is not so easy. Per default FSVS does a MD5 check on the file in this case; if you don't want that, or if you want to do the checksum calculation for \fBevery\fP file (in case a file has changed, but its mtime not), you can use this option to change FSVS' behaviour. .PP On the command line there's a shortcut for that: for every \fC'-C'\fP another check in this option is chosen. .PP The recognized specifications are noneResets the check bitmask to 'no checks'. file_mtimeCheck files for modifications (via MD5) and directories for new entries, if the mtime is different - default dirCheck all directories for new entries, regardless of the timestamp. allfilesCheck \fBall\fP files with MD5 for changes (\fCtripwire\fP -like operation). fullAll available checks. .PP You can give multiple options; they're accumulated unless overridden by \fCnone\fP. .PP .nf fsvs -o change_check=allfiles status .fi .PP .PP \fBNote:\fP .RS 4 \fIcommit\fP and \fIupdate\fP set additionally the \fCdir\fP option, to avoid missing new files. .RE .PP .SS "Avoiding expensive compares on \\ref cpfd 'copyfrom-detect'" If you've got big files that are seen as new, doing the MD5 comparison can be time consuming. So there's the option \fCcopyfrom_exp\fP (for \fI'expensive'\fP, which takes the usual \fCyes\fP (default) and \fCno\fP arguments. .PP .PP .nf fsvs copyfrom-detect -o copyfrom_exp=no some_directory .fi .PP .SS "Getting grouping/ignore statistics" If you need to ignore many entries of your working copy, you might find that the ignore pattern matching takes some valuable time. .br In order to optimize the order of your patterns you can specify this option to print the number of tests and matches for each pattern. .PP .PP .nf $ fsvs status -o group_stats=yes -q Grouping statistics (tested, matched, groupname, pattern): 4705 80 ignore group:ignore,. .fi .PP .PP For optimizing you'll want to put often matching patterns at the front (to make them match sooner, and avoid unnecessary tests); but if you are using other groups than \fCignore\fP (like \fCtake\fP), you will have to take care to keep the patterns within a group together. .PP Please note that the first line shows how many entries were tested, and that the next lines differ by the number of matches entries for the current line, as all entries being tested against some pattern get tested for the next too, \fBunless they match the current pattern\fP. .PP This option is available for \fBstatus\fP and the \fBignore test\fP commands. .SH "Base configuration" .PP .SS "Path definitions for the config and WAA area" .PP The paths given here are used to store the persistent configuration data needed by FSVS; please see \fBFiles used by fsvs\fP and \fBPriorities for option setting\fP for more details, and the \fBUsing an alternate root directory\fP parameter as well as the \fBRecovery for a non-booting system\fP for further discussion. .PP .PP .nf FSVS_CONF=/home/user/.fsvs-conf fsvs -o waa=/home/user/.fsvs-waa st .fi .PP .PP \fBNote:\fP .RS 4 Please note that these paths can be given \fBonly\fP as environment variables (\fC$FSVS_CONF\fP resp. \fC$FSVS_WAA\fP) or as command line parameter; settings in config files are ignored. .RE .PP .SS "Configuration directory for the subversion libraries" This path specifies where the subversion libraries should take their configuration data from; the most important aspect of that is authentication data, especially for certificate authentication. .PP The default value is \fC$FSVS_CONF/svn/\fP. .PP \fC/etc/fsvs/config\fP could have eg. .PP .nf config_dir=/root/.subversion .fi .PP .PP Please note that this directory can hold an \fCauth\fP directory, and the \fCservers\fP and \fCconfig\fP files. .SS "Using an alternate root directory" This is a path that is prepended to \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP (or their default values, see \fBFiles used by fsvs\fP), if they do not already start with it, and it is cut off for the directory-name MD5 calculation. .PP When is that needed? Imagine that you've booted from some Live-CD like Knoppix; if you want to setup or restore a non-working system, you'd have to transfer all files needed by the FSVS binary to it, and then start in some kind of \fCchroot\fP environment. .PP With this parameter you can tell FSVS that it should load its libraries from the current filesystem, but use the given path as root directory for its administrative data. .PP This is used for recovery; see the example in \fBRecovery for a non-booting system\fP. .PP So how does this work? .PD 0 .IP "\(bu" 2 The internal data paths derived from \fC$FSVS_WAA\fP and \fC$FSVS_CONF\fP use the value given for \fCsoftroot\fP as a base directory, if they do not already start with it. .br (If that creates a conflict for you, eg. in that you want to use \fC/var\fP as the \fCsoftroot\fP, and your \fC$FSVS_WAA\fP should be \fC/var/fsvs\fP, you can make the string comparison fail by using \fC/./var\fP for either path.) .PP .IP "\(bu" 2 When a directory name for \fC$FSVS_CONF\fP or \fC$FSVS_WAA\fP is derived from some file path, the part matching \fCsoftroot\fP is cut off, so that the generated names match the situation after rebooting. .PP .PP Previously you'd have to \fBexport\fP your data back to the filesystem and call \fBurls\fP \fC'fsvs urls'\fP and FSVS \fBsync-repos\fP again, to get the WAA data back. .PP \fBNote:\fP .RS 4 A plain \fCchroot()\fP would not work, as some needed programs (eg. the decoder for update, see \fBSpecial property names\fP) would not be available. .PP The easy way to understand \fCsoftroot\fP is: If you want to do a \fCchroot()\fP into the given directory (or boot with it as \fC/\fP), you'll want this set. .PP As this value is used for finding the correct working copy root (by trying to find a \fBconf-path\fP, it cannot be set from a per-wc config file. Only the environment, global configuration or command line parameter make sense. .RE .PP .SH "Debugging and diagnosing" .PP The next two options could be set in the global configuration file, to automatically get the last debug messages when an error happens. .PP To provide an easy way to get on-line debugging again, \fCdebug_output\fP and \fCdebug_buffer\fP are both reset to non-redirected, on-line output, if more than a single \fC-d\fP is specified on the command line, like this: .PP .nf fsvs commit -m '...' -d -d filenames .fi .PP .PP In this case you'll get a message telling you about that. .SS "Destination for debug output" You can specify the debug output destination with the option \fCdebug_output\fP. This can be a simple filename (which gets truncated on open), or, if it starts with a \fC\fP|, a command that the output gets piped into. .PP If the destination cannot be opened (or none is given), debug output goes to \fCSTDOUT\fP (for easier tracing via \fCless\fP). .PP Example: .PP .nf fsvs -o debug_output=/tmp/debug.out -d st /etc .fi .PP .PP \fBNote:\fP .RS 4 That string is taken only once - at the first debug output line. So you have to use the correct order of parameters: \fC-o debug_output=... -d\fP. .RE .PP An example: writing the last 200 lines of debug output into a file. .PP .nf fsvs -o debug_output='| tail -200 > /tmp/debug.log' -d .... .fi .PP .SS "Using a debug buffer" With the \fCdebug_buffer\fP option you can specify the size of a buffer (in kB) that is used to capture the output, and which gets printed automatically if an error occurs. .PP This must be done \fBbefore\fP debugging starts, like with the \fBdebug_output\fP specification. .PP .PP .nf fsvs -o debug_buffer=128 ... .fi .PP .PP \fBNote:\fP .RS 4 If this option is specified in the configuration file or via the environment, only the buffer is allocated; if it is used on the command line, debugging is automatically turned on, too. .RE .PP .SS "Setting warning behaviour" Please see the command line parameter \fB-W\fP, which is identical. .PP .PP .nf fsvs -o warning=diff-status=ignore .fi .PP .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/IGNORING0000644000202400020240000000000011264677317013357 0ustar marekmarekfsvs-1.2.6/doc/fsvs-url-format.50000644000202400020240000000705011346140057015354 0ustar marekmarek.TH "FSVS - URL format" 5 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME Format of URLs \- .PP FSVS can use more than one URL; the given URLs are \fIoverlaid\fP according to their priority. FSVS can use more than one URL; the given URLs are \fIoverlaid\fP according to their priority. For easier managing they get a name, and can optionally take a target revision. .PP Such an \fIextended URL\fP has the form .PP .nf ['name:'{name},]['target:'{t-rev},]['prio:'{prio},]URL .fi .PP where URL is a standard URL known by subversion -- something like \fChttp://....\fP, \fCsvn://...\fP or \fCsvn+ssh://...\fP. .PP The arguments before the URL are optional and can be in any order; the URL must be last. .PP Example: .PP .nf name:perl,prio:5,svn://... .fi .PP or, using abbreviations, .PP .nf N:perl,P:5,T:324,svn://... .fi .PP .PP Please mind that the full syntax is in lower case, whereas the abbreviations are capitalized! .br Internally the \fC\fP: is looked for, and if the part before this character is a known keyword, it is used. .br As soon as we find an unknown keyword we treat it as an URL, ie. stop processing. .PP The priority is in reverse numeric order - the lower the number, the higher the priority. (See \fC\fBurl__current_has_precedence()\fP\fP ) .SH "Why a priority?" .PP When we have to overlay several URLs, we have to know \fBwhich\fP URL takes precedence - in case the same entry is in more than one. \fB(Which is \fBnot\fP recommended!)\fP .SH "Why a name?" .PP We need a name, so that the user can say \fB'commit all outstanding changes to the repository at URL x'\fP, without having to remember the full URL. After all, this URL should already be known, as there's a list of URLs to update from. .PP You should only use alphanumeric characters and the underscore here; or, in other words, \fC\\w\fP or \fC\fP[a-zA-Z0-9_]. (Whitespace, comma and semicolon get used as separators.) .SH "What can I do with the target revision?" .PP Using the target revision you can tell fsvs that it should use the given revision number as destination revision - so update would go there, but not further. Please note that the given revision number overrides the \fC-r\fP parameter; this sets the destination for all URLs. .PP The default target is \fCHEAD\fP. .PP \fBNote:\fP .RS 4 In subversion you can enter \fCURL@revision\fP - this syntax may be implemented in fsvs too. (But it has the problem, that as soon as you have a \fC@\fP in the URL, you \fBmust\fP give the target revision every time!) .RE .PP .SH "There's an additional internal number - why that?" .PP This internal number is not for use by the user. .br It is just used to have an unique identifier for an URL, without using the full string. .PP On my system the package names are on average 12.3 characters long (1024 packages with 12629 bytes, including newline): .PP .nf COLUMNS=200 dpkg-query -l | cut -c5- | cut -f1 -d' ' | wc .fi .PP .PP So if we store an \fIid\fP of the url instead of the name, we have approx. 4 bytes per entry (length of strings of numbers from 1 to 1024). Whereas using the needs name 12.3 characters, that's a difference of 8.3 per entry. .PP Multiplied with 150 000 entries we get about 1MB difference in filesize of the dir-file. Not really small ... .br And using the whole URL would inflate that much more. .PP Currently we use about 92 bytes per entry. So we'd (unnecessarily) increase the size by about 10%. .PP That's why there's an \fBurl_t::internal_number\fP. .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/FAQ0000644000202400020240000000263111145217065012552 0ustar marekmarekQ: What does fsvs mean? A: Fast System VerSioning Q: How do you pronounce it? A: [fisvis] Q: Why are the listings not sorted? A: Because of speed considerations the program does the files in hard disk-order, ie. in the order they're on the hard disk. Doing the run and output later would leave the user without feedback for some time. Q: What meta-data is versioned? A: Currently modification time, user, group, and permissions are saved. Q: What kind of files are versioned? A: Files, directories, device nodes (block and char), symbolic links. Sockets and pipes are normally regenerated upon opening and are therefore not stored. Q: I don't like XYZ. A: Unified patches are welcome. Q: Why is it called fsvs and not X? A: I've had several great ideas myself, but after discarding SYSV (SYStem Versioning) and SUBS (SUbversion Backup System) I just searched for a unique string to describe this project. Q: Can I use some subdirectory of my repository instead of the root? A: Of course. You can use the normal subversion structures /trunk, /tags, /branches - you just have to create them and point your working copy there. So you do svn mkdir $URL/branches $URL/tags $URL/trunk and use fsvs init $URL/trunk to use the trunk, or fsvs export $URL/tags/tag2 to export your "tag2". Note: There's no way currently to "switch" between directories, although there might/should. fsvs-1.2.6/doc/fsvs-howto-master_local.50000644000202400020240000002522611346140057017074 0ustar marekmarek.TH "FSVS - Master/Local HOWTO" 5 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME HOWTO: Master/Local repositories \- .PP This HOWTO describes how to use a single working copy with multiple repositories. This HOWTO describes how to use a single working copy with multiple repositories. Please read the \fBHOWTO: Backup\fP first, to know about basic steps using FSVS. .SH "Rationale" .PP If you manage a lot of machines with similar or identical software, you might notice that it's a bit of work keeping them all up-to-date. Sure, automating distribution via rsync or similar is easy; but then you get identical machines, or you have to play with lots of exclude patterns to keep the needed differences. .PP Here another way is presented; and even if you don't want to use FSVS for distributing your files, the ideas presented here might help you keep your machines under control. .SH "Preparation, repository layout" .PP In this document the basic assumption is that there is a group of (more or less identical) machines, that share most of their filesystems. .PP Some planning should be done beforehand; while the ideas presented here might suffice for simple versioning, your setup can require a bit of thinking ahead. .PP This example uses some distinct repositories, to achieve a bit more clarity; of course these can simply be different paths in a single repository (see \fBUsing a single repository\fP for an example configuration). .PP Repository in URL \fCbase:\fP .PP .nf trunk/ bin/ ls true lib/ libc6.so modules/ sbin/ mkfs usr/ local/ bin/ sbin/ tags/ branches/ .fi .PP .PP Repository in URL \fCmachine1\fP (similar for machine2): .PP .nf trunk/ etc/ HOSTNAME adjtime network/ interfaces passwd resolv.conf shadow var/ log/ auth.log messages tags/ branches/ .fi .PP .SS "User data versioning" If you want to keep the user data versioned, too, a idea might be to start a new working copy in \fBevery\fP home directory; this way .IP "\(bu" 2 the system- and (several) user-commits can be run in parallel, .IP "\(bu" 2 the intermediate \fChome\fP directory in the repository is not needed, and .IP "\(bu" 2 you get a bit more isolation (against FSVS failures, out-of-space errors and similar). .IP "\(bu" 2 Furthermore FSVS can work with smaller file sets, which helps performance a bit (less dentries to cache at once, less memory used, etc.). .PP .PP .PP .nf A/ Andrew/ .bashrc .ssh/ .kde/ Alexander/ .bashrc .ssh/ .kde/ B/ Bertram/ .fi .PP .PP A cronjob could simply loop over the directories in \fC/home\fP, and call fsvs for each one; giving a target URL name is not necessary if every home-directory is its own working copy. .PP \fBNote:\fP .RS 4 URL names can include a forward slash \fC/\fP in their name, so you might give the URLs names like \fChome/Andrew\fP - although that should not be needed, if every home directory is a distinct working copy. .RE .PP .SH "Using master/local repositories" .PP Imagine having 10 similar machines with the same base-installation. .PP Then you install one machine, commit that into the repository as \fCbase/trunk\fP, and make a copy as \fCbase/released\fP. .PP The other machines get \fCbase/released\fP as checkout source, and another (overlaid) from eg. \fCmachine1/trunk\fP. .br Per-machine changes are always committed into the \fCmachineX/trunk\fP of the per-machine repository; this would be the host name, IP address, and similar things. .PP On the development machine all changes are stored into \fCbase/trunk\fP; if you're satisfied with your changes, you merge them (see \fBBranching, tagging, merging\fP) into \fCbase/released\fP, whereupon all other machines can update to this latest version. .PP So by looking at \fCmachine1/trunk\fP you can see the history of the machine-specific changes; and in \fCbase/released\fP you can check out every old version to verify problems and bugs. .PP \fBNote:\fP .RS 4 You can take this system a bit further: optional software packages could be stored in other subtrees. They should be of lower priority than the base tree, so that in case of conflicts the base should always be preferred (but see \fB1\fP). .RE .PP Here is a small example; \fCmachine1\fP is the development machine, \fCmachine2\fP is a \fIclient\fP. .PP .nf machine1$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine1/trunk machine1$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk # Determine differences, and commit them machine1$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log machine1$ fsvs ci -o commit_to=base / .fi .PP .PP Now you've got a base-install in your repository, and can use that on the other machine: .PP .nf machine2$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine2/trunk machine2$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk machine2$ fsvs sync-repos # Now you see differences of this machines' installation against the other: machine2$ fsvs st # You can see what is different: machine2$ fsvs diff /etc/X11/xorg.conf # You can take the base installations files: machine2$ fsvs revert /bin/ls # And put the files specific to this machine into its repository: machine2$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log .fi .PP .PP Now, if this machine has a harddisk failure or needs setup for any other reason, you boot it (eg. via PXE, Knoppix or whatever), and do (\fB3\fP) .PP .nf # Re-partition and create filesystems (if necessary) machine2-knoppix$ fdisk ... machine2-knoppix$ mkfs ... # Mount everything below /mnt machine2-knoppix$ mount /mnt/[...] machine2-knoppix$ cd /mnt # Do a checkout below /mnt machine2-knoppix$ fsvs co -o softroot=/mnt .fi .PP .SH "Branching, tagging, merging" .PP Other names for your branches (instead of \fCtrunk\fP, \fCtags\fP and \fCbranches\fP) could be \fCunstable\fP, \fCtesting\fP, and \fCstable\fP; your production machines would use \fCstable\fP, your testing environment \fCtesting\fP, and in \fCunstable\fP you'd commit all your daily changes. .PP \fBNote:\fP .RS 4 Please note that there's no merging mechanism in FSVS; and as far as I'm concerned, there won't be. Subversion just gets automated merging mechanisms, and these should be fine for this usage too. (\fB4\fP) .RE .PP .SS "Thoughts about tagging" Tagging works just like normally; although you need to remember to tag more than a single branch. Maybe FSVS should get some knowledge about the subversion repository layout, so a \fCfsvs tag\fP would tag all repositories at once? It would have to check for duplicate tag-names (eg. on the \fCbase\fP -branch), and just keep it if it had the same copyfrom-source. .PP But how would tags be used? Define them as source URL, and checkout? Would be a possible case. .PP Or should \fCfsvs tag\fP do a \fImerge\fP into the repository, so that a single URL contains all files currently checked out, with copyfrom-pointers to the original locations? Would require using a single repository, as such pointers cannot be across different repositories. If the committed data includes the \fC$FSVS_CONF/\fP.../Urls file, the original layout would be known, too - although to use it a \fBsync-repos\fP would be necessary. .SH "Using a single repository" .PP A single repository would have to be partitioned in the various branches that are needed for bookkeeping; see these examples. .PP Depending on the number of machines it might make sense to put them in a 1- or 2 level deep hierarchy; named by the first character, like .PP .PP .nf machines/ A/ Axel/ Andreas/ B/ Berta/ G/ Gandalf/ .fi .PP .SS "Simple layout" Here only the base system gets branched and tagged; the machines simply backup their specific/localized data into the repository. .PP .PP .nf # For the base-system: trunk/ bin/ usr/ sbin/ tags/ tag-1/ branches/ branch-1/ # For the machines: machines/ machine1/ etc/ passwd HOSTNAME machine2/ etc/ passwd HOSTNAME .fi .PP .SS "Per-area" Here every part gets its \fCtrunk\fP, \fCbranches\fP and \fCtags:\fP .PP .PP .nf base/ trunk/ bin/ sbin/ usr/ tags/ tag-1/ branches/ branch-1/ machine1/ trunk/ etc/ passwd HOSTNAME tags/ tag-1/ branches/ machine2/ trunk/ etc/ passwd HOSTNAME tags/ branches/ .fi .PP .SS "Common trunk, tags, and branches" Here the base-paths \fCtrunk\fP, \fCtags\fP and \fCbranches\fP are shared: .PP .PP .nf trunk/ base/ bin/ sbin/ usr/ machine2/ etc/ passwd HOSTNAME machine1/ etc/ passwd HOSTNAME tags/ tag-1/ branches/ branch-1/ .fi .PP .SH "Other notes" .PP .SS "1" Conflicts should not be automatically merged. If two or more trees bring the same file, the file from the \fIhighest\fP tree wins - this way you always know the file data on your machines. It's better if a single software doesn't work, compared to a machine that no longer boots or is no longer accessible (eg. by SSH)). .PP So keep your base installation at highest priority, and you've got good chances that you won't loose control in case of conflicting files. .SS "2" If you don't know which files are different in your installs, .IP "\(bu" 2 install two machines, .IP "\(bu" 2 commit the first into fsvs, .IP "\(bu" 2 do a \fBsync-repos\fP on the second, .IP "\(bu" 2 and look at the \fBstatus\fP output. .PP .SS "3" As debian includes FSVS in the near future, it could be included on the next KNOPPIX, too! .PP Until then you'd need a custom boot CD, or copy the absolute minimum of files to the harddisk before recovery. .PP There's a utility \fCsvntar\fP available; it allows you to take a snapshot of a subversion repository directly into a \fC\fP.tar -file, which you can easily export to destination machine. (Yes, it knows about the meta-data properties FSVS uses, and stores them into the archive.) .SS "4" Why no file merging? Because all real differences are in the per-machine files -- the files that are in the \fCbase\fP repository are changed only on a single machine, and so there's an unidirectional flow. .PP BTW, how would you merge your binaries, eg. \fC/bin/ls\fP? .SH "Feedback" .PP If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list \fCusers [at] fsvs.tigris.org\fP. .PP Thank you! .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/fsvs-howto-backup.50000644000202400020240000001544411346140057015675 0ustar marekmarek.TH "FSVS - Backup HOWTO" 5 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME HOWTO: Backup \- .PP This document is a step-by-step explanation how to do backups using FSVS. This document is a step-by-step explanation how to do backups using FSVS. .SH "Preparation" .PP If you're going to back up your system, you have to decide what you want to have stored in your backup, and what should be left out. .PP Depending on your system usage and environment you first have to decide: .PD 0 .IP "\(bu" 2 Do you only want to backup your data in \fC/home\fP? .PD 0 .IP " \(bu" 4 Less storage requirements .IP " \(bu" 4 In case of hardware crash the OS must be set up again .PP .IP "\(bu" 2 Do you want to keep track of your configuration in \fC/etc\fP? .PD 0 .IP " \(bu" 4 Very small storage overhead .IP " \(bu" 4 Not much use for backup/restore, but shows what has been changed .PP .IP "\(bu" 2 Or do you want to backup your whole installation, from \fC/\fP on? .PD 0 .IP " \(bu" 4 Whole system versioned, restore is only a few commands .IP " \(bu" 4 Much more storage space needed - typically you'd need at least a few GB free space. .PP .PP .PP The next few moments should be spent thinking about the storage space for the repository - will it be on the system harddisk, a secondary or an external harddisk, or even off-site? .PP \fBNote:\fP .RS 4 If you just created a fresh repository, you probably should create the 'default' directory structure for subversion - \fCtrunk\fP, \fCbranches\fP, \fCtags\fP; this layout might be useful for your backups. .br The URL you'd use in fsvs would go to \fCtrunk\fP. .RE .PP Possibly you'll have to take the available bandwidth into your considerations; a single home directory may be backed up on a 56k modem, but a complete system installation would likely need at least some kind of DSL or LAN. .PP \fBNote:\fP .RS 4 If this is a production box with sparse, small changes, you could take the initial backup on a local harddisk, transfer the directory with some media to the target machine, and switch the URLs. .RE .PP A fair bit of time should go to a small investigation which file patterns and paths you \fBnot\fP want to back-up. .PD 0 .IP "\(bu" 2 Backup files like \fC*\fP.bak, \fC*~\fP, \fC*\fP.tmp, and similar .IP "\(bu" 2 History files: \fC.sh-history\fP and similar in the home-directories .IP "\(bu" 2 Cache directories: your favourite browser might store many MB of cached data in you home-directories .IP "\(bu" 2 Virtual system directories, like \fC/proc\fP and \fC/sys\fP, \fC/dev/shmfs\fP. .PP .SH "Telling FSVS what to do" .PP Given \fC$WC\fP as the \fIworking directory\fP - the base of the data you'd like backed up (\fC/\fP, \fC/home\fP), and \fC$URL\fP as a valid subversion URL to your (already created) repository path. .PP Independent of all these details the first steps look like these: .PP .nf cd $WC fsvs urls $URL .fi .PP Now you have to say what should be ignored - that'll differ depending on your needs/wishes. .PP .nf fsvs ignore './**~' './**.tmp' './**.bak' fsvs ignore ./proc/ ./sys/ ./tmp/ fsvs ignore ./var/tmp/ ./var/spool/lpd/ fsvs ignore './var/log/*.gz' fsvs ignore ./var/run/ /dev/pts/ fsvs ignore './etc/*.dpkg-dist' './etc/*.dpkg-new' fsvs ignore './etc/*.dpkg-old' './etc/*.dpkg-bak' .fi .PP .PP \fBNote:\fP .RS 4 \fC/var/run\fP is for transient files; I've heard reports that \fBreverting\fP files there can cause problems with running programs. .br Similar for \fC/dev/pts\fP - if that's a \fCdevpts\fP filesystem, you'll run into problems on \fBupdate\fP or \fBrevert\fP - as FSVS won't be allowed to create entries in this directory. .RE .PP Now you may find that you'd like to have some files encrypted in your backup - like \fC/etc/shadow\fP, or your \fC\fP.ssh/id_* files. So you tell fsvs to en/decrypt these files: .PP .nf fsvs propset fsvs:commit-pipe 'gpg -er {your backup key}' /etc/shadow /etc/gshadow fsvs propset fsvs:update-pipe 'gpg -d' /etc/shadow /etc/gshadow .fi .PP .PP \fBNote:\fP .RS 4 This are just examples. You'll probably have to exclude some other paths and patterns from your backup, and mark some others as to-be-filtered. .RE .PP .SH "The first backup" .PP .PP .nf fsvs commit -m 'First commit.' .fi .PP That's all there is to it! .SH "Further use and maintenance" .PP The further usage is more or less the \fCcommit\fP command from the last section. .br When do you have to do some manual work? .PD 0 .IP "\(bu" 2 When ignore patterns change. .PD 0 .IP " \(bu" 4 New filesystems that should be ignored, or would be ignored but shouldn't .IP " \(bu" 4 You find that your favorite word-processor leaves many *.segv files behind, and similar things .PP .IP "\(bu" 2 If you get an error message from fsvs, check the arguments and retry. In desperate cases (or just because it's quicker than debugging yourself) ask on \fCdev [at] fsvs.tigris.org\fP. .PP .SH "Restoration in a working system" .PP Depending on the circumstances you can take different ways to restore data from your repository. .PD 0 .IP "\(bu" 2 \fC 'fsvs export'\fP allows you to just dump some repository data into your filesystem - eg. into a temporary directory to sort things out. .IP "\(bu" 2 Using \fC'fsvs revert'\fP you can get older revisions of a given file, directory or directory tree inplace. .br .IP "\(bu" 2 Or you can do a fresh checkout - set an URL in an (empty) directory, and update to the needed revision. .IP "\(bu" 2 If everything else fails (no backup media with fsvs on it), you can use subversion commands (eg. \fCexport\fP) to restore needed parts, and update the rest with fsvs. .PP .SH "Recovery for a non-booting system" .PP In case of a real emergency, when your harddisks crashed or your filesystem was eaten and you have to re-partition or re-format, you should get your system working again by .PD 0 .IP "\(bu" 2 booting from a knoppix or some other Live-CD (with FSVS on it), .IP "\(bu" 2 partition/format as needed, .IP "\(bu" 2 mount your harddisk partitions below eg. \fC/mnt\fP, .IP "\(bu" 2 and then recovering by .PP .PP .nf $ cd /mnt $ export FSVS_CONF=/etc/fsvs # if non-standard $ export FSVS_WAA=/var/spool/fsvs # if non-standard $ fsvs checkout -o softroot=/mnt .fi .PP .PP If somebody asks really nice I'd possibly even create a \fCrecovery\fP command that deduces the \fCsoftroot\fP parameter from the current working directory. .PP For more information please take a look at \fBUsing an alternate root directory\fP. .SH "Feedback" .PP If you've got any questions, ideas, wishes or other feedback, please tell us in the mailing list \fCusers [at] fsvs.tigris.org\fP. .PP Thank you! .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/USAGE0000644000202400020240000012174211346140057013013 0ustar marekmarekCommands and command line parameters fsvs is a client for subversion repositories; it is designed for fast versioning of big directory trees. More... fsvs is a client for subversion repositories; it is designed for fast versioning of big directory trees. SYNOPSIS fsvs command [options] [args] The following commands are understood by FSVS: Local configuration and information: urls Define working copy base directories by their URL(s) status Get a list of changed entries info Display detailed information about single entries log Fetch the log messages from the repository diff Get differences between files (local and remote) copyfrom-detect Ask FSVS about probably copied/moved/renamed entries; see cp Defining which entries to take: ignore and rign Define ignore patterns unversion Remove entries from versioning add Add entries that would be ignored cp, mv Tell FSVS that entries were copied Commands working with the repository: commit Send changed data to the repository update Get updates from the repository checkout Fetch some part of the repository, and register it as working copy cat Get a file from the directory revert and uncp Undo local changes and entry markings remote-status Ask what an update would bring Property handling: prop-set Set user-defined properties prop-get Ask value of user-defined properties prop-list Get a list of user-defined properties Additional commands used for recovery and debugging: export Fetch some part of the repository sync-repos Drop local information about the entries, and fetch the current list from the repository. Note: Multi-url-operations are relatively new; there might be rough edges. The return code is 0 for success, or 2 for an error. 1 is returned if the option Checking for changes in a script is used, and changes are found; see also Filtering entries. Universal options -V -- show version -V makes FSVS print the version and a copyright notice, and exit. -d and -D -- debugging If FSVS was compiled using --enable-debug you can enable printing of debug messages (to STDOUT) with -d. Per default all messages are printed; if you're only interested in a subset, you can use -D start-of-function-name. fsvs -d -D waa_ status would call the status action, printing all debug messages of all WAA functions - waa__init, waa__open, etc. For more details on the other debugging options debug_output and debug_buffer please see the options list. -N, -R -- recursion The -N and -R switches in effect just decrement/increment a counter; the behaviour is chosen depending on that. So a command line of -N -N -N -R -R is equivalent to -3 +2 = -1, this results in -N. -q, -v -- verbose/quiet -v/-q set/clear verbosity flags, and so give more/less output. Please see the verbose option for more details. -C -- checksum -C chooses to use more change detection checks; please see the change_check option for more details. -f -- filter entries This parameter allows to do a bit of filtering of entries, or, for some operations, modification of the work done on given entries. It requires a specification at the end, which can be any combination of any, text, new, deleted (or removed), meta, mtime, group, mode, changed or owner; default or def use the default value. By giving eg. the value text, with a status action only entries that are new or changed are shown; with mtime,group only entries whose group or modification time has changed are printed. Note: Please see Change detection for some more information. If an entry gets replaced with an entry of a different type (eg. a directory gets replaced by a file), that counts as deleted and new. If you use -v, it's used as a any internally. If you use the string none, it resets the bitmask to no entries shown; then you can built a new mask. So owner,none,any,none,delete would show deleted entries. If the value after all commandline parsing is none, it is reset to the default. -W warning=action -- set warnings Here you can define the behaviour for certain situations that should not normally happen, but which you might encounter. The general format here is specification = action, where specification is a string matching the start of at least one of the defined situations, and action is one of these: * once to print only a single warning, * always to print a warning message every time, * stop to abort the program, * ignore to simply ignore this situation, or * count to just count the number of occurrences. If specification matches more than one situation, all of them are set; eg. for meta=ignore all of meta-mtime, meta-user etc. are ignored. If at least a single warning that is not ignored is encountered during the program run, a list of warnings along with the number of messages it would have printed with the setting always is displayed, to inform the user of possible problems. The following situations can be handled with this: meta-mtime, meta-user, meta-group, meta-umask These warnings are issued if a meta-data property that was fetched from the repository couldn't be parsed. This can only happen if some other program or a user changes properties on entries. In this case you can use -Wmeta=always or -Wmeta=count, until the repository is clean again. no-urllist This warning is issued if a info action is executed, but no URLs have been defined yet. charset-invalid If the function nl_langinfo(3) couldn't return the name of the current character encoding, a default of UTF-8 is used. You might need that for a minimal system installation, eg. on recovery. chmod-eperm, chown-eperm If you update a working copy as normal user, and get to update a file which has another owner but which you may modify, you'll get errors because neither the user, group, nor mode can be set. This way you can make the errors non-fatal. chmod-other, chown-other If you get another error than EPERM in the situation above, you might find these useful. mixed-rev-wc If you specify some revision number on a revert, it will complain that mixed-revision working copies are not allowed. While you cannot enable mixed-revision working copies (I'm working on that) you can avoid being told every time. propname-reserved It is normally not allowed to set a property with the prop-set action with a name matching some reserved prefixes. ignpat-wcbase This warning is issued if an absolute ignore pattern" does not match the working copy base directory. \n See \ref ignpat_shell_abs "absolute shell patterns" for more details. diff-status GNU diff has defined that it returns an exit code 2 in case of an error; sadly it returns that also for binary files, so that a simply fsvs diff some-binary-file text-file would abort without printing the diff for the second file. Because of this FSVS currently ignores the exit status of diff per default, but this can be changed by setting this option to eg. stop. Also an environment variable FSVS_WARNINGS is used and parsed; it is simply a whitespace-separated list of option specifications. -u URLname[@revision[:revision]] -- select URLs Some commands can be reduced to a subset of defined URLs; the update command is a example. If you have more than a single URL in use for your working copy, update normally updates all entries from all URLs. By using this parameter you can tell FSVS to update only the specified URLs. The parameter can be used repeatedly; the value can have multiple URLs, separated by whitespace or one of ",;". fsvs up -u base_install,boot@32 -u gcc This would get HEAD of base_install and gcc, and set the target revision of the boot URL for this command at 32. -o [name[=value]] -- other options This is used for setting some seldom used option, for which default can be set in a configuration file (to be implemented, currently only command-line). For a list of these please see Further options for FSVS.. Signals If you have a running FSVS, and you want to change its verbosity, you can send the process either SIGUSR1 (to make it more verbose) or SIGUSR2 (more quiet). add fsvs add [-u URLNAME] PATH [PATH...] With this command you can explicitly define entries to be versioned, even if they have a matching ignore pattern. They will be sent to the repository on the next commit, just like other new entries, and will therefore be reported as New . The -u option can be used if you're have more than one URL defined for this working copy and want to have the entries pinned to the this URL. Example Say, you're versioning your home directory, and gave an ignore pattern of ./.* to ignore all .* entries in your home-directory. Now you want .bashrc, .ssh/config, and your complete .kde3-tree saved, just like other data. So you tell fsvs to not ignore these entries: fsvs add .bashrc .ssh/config .kde3 Now the entries below .kde3 would match your earlier ./.* pattern (as a match at the beginning is sufficient), so you have to insert a negative ignore pattern (a take pattern): fsvs ignore prepend t./.kde3 Now a fsvs st would show your entries as New , and the next commit will send them to the repository. unversion fsvs unversion PATH [PATH...] This command flags the given paths locally as removed. On the next commit they will be deleted in the repository, and the local information of them will be removed, but not the entries themselves. So they will show up as New again, and you get another chance at ignoring them. Example Say, you're versioning your home directory, and found that you no longer want .bash_history and .sh_history versioned. So you do fsvs unversion .bash_history .sh_history and these files will be reported as d (will be deleted, but only in the repository). Then you do a fsvs commit Now fsvs would report these files as New , as it does no longer know anything about them; but that can be cured by fsvs ignore "./.*sh_history" Now these two files won't be shown as New , either. The example also shows why the given paths are not just entered as separate ignore patterns - they are just single cases of a (probably) much broader pattern. Note: If you didn't use some kind of escaping for the pattern, the shell would expand it to the actual filenames, which is (normally) not what you want. _build_new_list This is used mainly for debugging. It traverses the filesystem and builds a new entries file. In production it should not be used; as neither URLs nor the revision of the entries is known, information is lost by calling this function! Look at sync-repos. delay This command delays execution until time has passed at least to the next second after writing the data files used by FSVS (dir and urls). This command is for use in scripts; where previously the delay option was used, this can be substituted by the given command followed by the delay command. The advantage against the delay option is that read-only commands can be used in the meantime. An example: fsvs commit /etc/X11 -m "Backup of X11" ... read-only commands, like "status" fsvs delay /etc/X11 ... read-write commands, like "commit" The optional path can point to any path in the WC. In the testing framework it is used to save a bit of time; in normal operation, where FSVS commands are not so tightly packed, it is normally preferable to use the delay option. cat fsvs cat [-r rev] path Fetches a file repository, and outputs it to STDOUT. If no revision is specified, it defaults to BASE, ie. the current local revision number of the entry. checkout fsvs checkout [path] URL [URLs...] Sets one or more URLs for the current working directory (or the directory path), and does an checkout of these URLs. Example: fsvs checkout . http://svn/repos/installation/machine-1/trunk The distinction whether a directory is given or not is done based on the result of URL-parsing -- if it looks like an URL, it is used as an URL. Please mind that at most a single path is allowed; as soon as two non-URLs are found an error message is printed. If no directory is given, "." is used; this differs from the usual subversion usage, but might be better suited for usage as a recovery tool (where versioning / is common). Opinions welcome. The given path must exist, and should be empty -- FSVS will abort on conflicts, ie. if files that should be created already exist. If there's a need to create that directory, please say so; patches for some parameter like -p are welcome. For a format definition of the URLs please see the chapter Format of URLs and the urls and update commands. Furthermore you might be interested in Using an alternate root directory and Recovery for a non-booting system. commit fsvs commit [-m "message"|-F filename] [-v] [-C [-C]] [PATH [PATH ...]] Commits (parts of) the current state of the working copy into the repository. Example The working copy is /etc , and it is set up and committed already. Then /etc/hosts and /etc/inittab got modified. Since these are non-related changes, you'd like them to be in separate commits. So you simply run these commands: fsvs commit -m "Added some host" /etc/hosts fsvs commit -m "Tweaked default runlevel" /etc/inittab If the current directory is /etc you could even drop the /etc/ in front, and use just the filenames. Please see status for explanations on -v and -C . For advanced backup usage see also the commit-pipe property". cp fsvs cp [-r rev] SRC DEST fsvs cp dump fsvs cp load The copy command marks DEST as a copy of SRC at revision rev, so that on the next commit of DEST the corresponding source path is sent as copy source. The default value for rev is BASE, ie. the revision the SRC (locally) is at. Please note that this command works always on a directory structure - if you say to copy a directory, the whole structure is marked as copy. That means that if some entries below the copy are missing, they are reported as removed from the copy on the next commit. (Of course it is possible to mark files as copied, too; non-recursive copies are not possible, but can be emulated by having parts of the destination tree removed.) Note: TODO: There will be differences in the exact usage - copy will try to run the cp command, whereas copied will just remember the relation. If this command are used without parameters, the currently defined relations are printed; please keep in mind that the key is the destination name, ie. the 2nd line of each pair! The input format for load is newline-separated - first a SRC line, followed by a DEST line, then an line with just a dot (".") as delimiter. If you've got filenames with newlines or other special characters, you have to give the paths as arguments. Internally the paths are stored relative to the working copy base directory, and they're printed that way, too. Later definitions are appended to the internal database; to undo mistakes, use the uncopy action. Note: Important: User-defined properties like fsvs:commit-pipe are not copied to the destinations, because of space/time issues (traversing through entire subtrees, copying a lot of property-files) and because it's not sure that this is really wanted. TODO: option for copying properties? Todo: -0 like for xargs? Todo: Are different revision numbers for load necessary? Should dump print the source revision number? Todo: Copying from URLs means update from there Note: As subversion currently treats a rename as copy+delete, the mv command is an alias to cp. If you have a need to give the filenames dump or load as first parameter for copyfrom relations, give some path, too, as in "./dump". Note: The source is internally stored as URL with revision number, so that operations like these $ fsvs cp a b $ rm a/1 $ fsvs ci a $ fsvs ci b work - FSVS sends the old (too recent!) revision number as source, and so the local filelist stays consistent with the repository. But it is not implemented (yet) to give an URL as copyfrom source directly - we'd have to fetch a list of entries (and possibly the data!) from the repository. Todo: Filter for dump (patterns?). copyfrom-detect fsvs copyfrom-detect [paths...] This command tells FSVS to look through the new entries, and see whether it can find some that seem to be copied from others already known. It will output a list with source and destination path and why it could match. This is just for information purposes and doesn't change any FSVS state, (TODO: unless some option/parameter is set). The list format is on purpose incompatible with the load syntax, as the best match normally has to be taken manually. Todo: some parameter that just prints the "best" match, and outputs the correct format. If verbose is used, an additional value giving the percentage of matching blocks, and the count of possibly copied entries is printed. Example: $ fsvs copyfrom-list -v newfile1 md5:oldfileA newfile2 md5:oldfileB md5:oldfileC md5:oldfileD newfile3 inode:oldfileI manber=82.6:oldfileF manber=74.2:oldfileG manber=53.3:oldfileH ... 3 copyfrom relations found. The abbreviations are: md5 The MD5 of the new file is identical to that of one or more already committed files; there is no percentage. inode The device/inode number is identical to the given known entry; this could mean that the old entry has been renamed or hardlinked. Note: Not all filesystems have persistent inode numbers (eg. NFS) - so depending on your filesystems this might not be a good indicator! name The entry has the same name as another entry. manber Analysing files of similar size shows some percentage of (variable-sized) common blocks (ignoring the order of the blocks). dirlist The new directory has similar files to the old directory. The percentage is (number_of_common_entries)/(files_in_dir1 + files_in_dir2 - number_of_common_entries). Note: manber matching is not implemented yet. If too many possible matches for an entry are found, not all are printed; only an indicator ... is shown at the end. uncp fsvs uncopy DEST [DEST ...] The uncopy command removes a copyfrom mark from the destination entry. This will make the entry unknown again, and reported as New on the next invocations. Only the base of a copy can be un-copied; if a directory structure was copied, and the given entry is just implicitly copied, this command will return an error. This is not folded in revert, because it's not clear whether revert on copied, changed entries should restore the original copyfrom data or remove the copy attribute; by using another command this is no longer ambiguous. Example: $ fsvs copy SourceFile DestFile # Whoops, was wrong! $ fsvs uncopy DestFile diff fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...] This command gives you diffs between local and repository files. With -v the meta-data is additionally printed, and changes shown. If you don't give the revision arguments, you get a diff of the base revision in the repository (the last commit) against your current local file. With one revision, you diff this repository version against your local file. With both revisions given, the difference between these repository versions is calculated. You'll need the diff program, as the files are simply passed as parameters to it. The default is to do non-recursive diffs; so fsvs diff . will output the changes in all files in the current directory and below. The output for special files is the diff of the internal subversion storage, which includes the type of the special file, but no newline at the end of the line (which diff complains about). For entries marked as copy the diff against the (clean) source entry is printed. Please see also Options relating to the "diff" action and Using colordiff. Todo: Two revisions diff is buggy in that it (currently) always fetches the full trees from the repository; this is not only a performance degradation, but you'll see more changed entries than you want (like changes A to B to A). This will be fixed. export fsvs export REPOS_URL [-r rev] If you want to export a directory from your repository without storing any FSVS-related data you can use this command. This restores all meta-data - owner, group, access mask and modification time; its primary use is for data recovery. The data gets written (in the correct directory structure) below the current working directory; if entries already exist, the export will stop, so this should be an empty directory. help help [command] This command shows general or specific help (for the given command). A similar function is available by using -h or -? after a command. groups fsvs groups dump|load fsvs groups [prepend|append|at=n] group-definition [group-def ...] fsvs ignore [prepend|append|at=n] pattern [pattern ...] fsvs groups test [-v|-q] [pattern ...] This command adds patterns to the end of the pattern list, or, with prepend, puts them at the beginning of the list. With at=x the patterns are inserted at the position x , counting from 0. The difference between groups and ignore is that groups requires a group name, whereas the latter just assumes the default group ignore. For the specification please see the related documentation . fsvs dump prints the patterns to STDOUT . If there are special characters like CR or LF embedded in the pattern without encoding (like \r or \n), the output will be garbled. The patterns may include * and ? as wildcards in one directory level, or ** for arbitrary strings. These patterns are only matched against new (not yet known) files; entries that are already versioned are not invalidated. If the given path matches a new directory, entries below aren't found, either; but if this directory or entries below are already versioned, the pattern doesn't work, as the match is restricted to the directory. So: fsvs ignore ./tmp ignores the directory tmp; but if it has already been committed, existing entries would have to be unmarked with fsvs unversion. Normally it's better to use fsvs ignore ./tmp/** as that takes the directory itself (which might be needed after restore as a mount point anyway), but ignore all entries below. Currently this has the drawback that mtime changes will be reported and committed; this is not the case if the whole directory is ignored. Examples: fsvs group group:unreadable,mode:4:0 fsvs group 'group:secrets,/etc/*shadow' fsvs ignore /proc fsvs ignore /dev/pts fsvs ignore './var/log/*-*' fsvs ignore './**~' fsvs ignore './**/*.bak' fsvs ignore prepend 'take,./**.txt' fsvs ignore append 'take,./**.svg' fsvs ignore at=1 './**.tmp' fsvs group dump fsvs group dump -v echo "./**.doc" | fsvs ignore load # Replaces the whole list Note: Please take care that your wildcard patterns are not expanded by the shell! Testing patterns To see more easily what different patterns do you can use the test subcommand. The following combinations are available: * fsvs groups test pattern Tests only the given pattern against all new entries in your working copy, and prints the matching paths. The pattern is not stored in the pattern list. * fsvs groups test Uses the already defined patterns on the new entries, and prints the group name, a tab, and the path. With -v you can see the matching pattern in the middle column, too. By using -q you can avoid getting the whole list; this makes sense if you use the group_stats option at the same time. rign fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] fsvs ri [prepend|append|at=n] path-spec [path-spec ...] If you keep the same repository data at more than one working copy on the same machine, it will be stored in different paths - and that makes absolute ignore patterns infeasible. But relative ignore patterns are anchored at the beginning of the WC root - which is a bit tiring to type if you're deep in your WC hierarchy and want to ignore some files. To make that easier you can use the rel-ignore (abbreviated as ri) command; this converts all given path-specifications (which may include wildcards as per the shell pattern specification above) to WC-relative values before storing them. Example for /etc as working copy root: fsvs rel-ignore '/etc/X11/xorg.conf.*' cd /etc/X11 fsvs rel-ignore 'xorg.conf.*' Both commands would store the pattern "./X11/xorg.conf.*". Note: This works only for shell patterns. For more details about ignoring files please see the ignore command and Specification of groups and patterns. info fsvs info [-R [-R]] [PATH...] Use this command to show information regarding one or more entries in your working copy. You can use -v to obtain slightly more information. This may sometimes be helpful for locating bugs, or to obtain the URL and revision a working copy is currently at. Example: $ fsvs info URL: file: .... 200 . Type: directory Status: 0x0 Flags: 0x100000 Dev: 0 Inode: 24521 Mode: 040755 UID/GID: 1000/1000 MTime: Thu Aug 17 16:34:24 2006 CTime: Thu Aug 17 16:34:24 2006 Revision: 4 Size: 200 The default is to print information about the given entry only. With a single -R you'll get this data about all entries of a given directory; with another -R you'll get the whole (sub-)tree. log fsvs log [-v] [-r rev1[:rev2]] [-u name] [path] This command views the revision log information associated with the given path at its topmost URL, or, if none is given, the highest priority URL. The optional rev1 and rev2 can be used to restrict the revisions that are shown; if no values are given, the logs are given starting from HEAD downwards, and then a limit on the number of revisions is applied (but see the limit option). If you use the -v -option, you get the files changed in each revision printed, too. There is an option controlling the output format; see the log_output option. Optionally the name of an URL can be given after -u; then the log of this URL, instead of the topmost one, is shown. TODOs: * --stop-on-copy * Show revision for all URLs associated with a working copy? In which order? prop-get fsvs prop-get PROPERTY-NAME PATH... Prints the data of the given property to STDOUT. Note: Be careful! This command will dump the property as it is, ie. with any special characters! If there are escape sequences or binary data in the property, your terminal might get messed up! If you want a safe way to look at the properties, use prop-list with the -v parameter. prop-set fsvs prop-set [-u URLNAME] PROPERTY-NAME VALUE PATH... This command sets an arbitrary property value for the given path(s). Note: Some property prefixes are reserved; currently everything starting with svn: throws a (fatal) warning, and fsvs: is already used, too. See Special property names. If you're using a multi-URL setup, and the entry you'd like to work on should be pinned to a specific URL, you can use the -u parameter; this is like the add command, see there for more details. prop-del fsvs prop-del PROPERTY-NAME PATH... This command removes a property for the given path(s). See also prop-set. prop-list fsvs prop-list [-v] PATH... Lists the names of all properties for the given entry. With -v, the value is printed as well; special characters will be translated, as arbitrary binary sequences could interfere with your terminal settings. If you need raw output, post a patch for --raw, or write a loop with prop-get. remote-status fsvs remote-status PATH [-r rev] This command looks into the repository and tells you which files would get changed on an update - it's a dry-run for update . Per default it compares to HEAD, but you can choose another revision with the -r parameter. Please see the update documentation for details regarding multi-URL usage. resolve fsvs resolve PATH [PATH...] When FSVS tries to update local files which have been changed, a conflict might occur. (For various ways of handling these please see the conflict option.) This command lets you mark such conflicts as resolved. revert fsvs revert [-rRev] [-R] PATH [PATH...] This command undoes local modifications: * An entry that is marked to be unversioned gets this flag removed. * For a already versioned entry (existing in the repository) the local entry is replaced with its repository version, and its status and flags are cleared. * An entry that is a modified copy destination gets reverted to the copy source data. * Manually added entries are changed back to "N"ew. Please note that implicitly copied entries, ie. entries that are marked as copied because some parent directory is the base of a copy, can not be un-copied; they can only be reverted to their original (copied-from) data, or removed. If you want to undo a copy operation, please see the uncopy command. See also HOWTO: Understand the entries' statii. If a directory is given on the command line all versioned entries in this directory are reverted to the old state; this behaviour can be modified with -R/-N, or see below. The reverted entries are printed, along with the status they had before the revert (because the new status is per definition unchanged). If a revision is given, the entries' data is taken from this revision; furthermore, the new status of that entry is shown. Note: Please note that mixed revision working copies are not (yet) possible; the BASE revision is not changed, and a simple revert without a revision arguments gives you that. By giving a revision parameter you can just choose to get the text from a different revision. Difference to update If something doesn't work as it should in the installation you can revert entries until you are satisfied, and directly commit the new state. In contrast, if you update to an older version, you * cannot choose single entries (no mixed revision working copies yet), * and you cannot commit the old version with changes, as the "skipped" (later) changes will create conflicts in the repository. Currently only known entries are handled. If you need a switch (like --delete in rsync(1) ) to remove unknown (new, not yet versioned) entries, to get the directory in the exact state it is in the repository, please tell the dev@ mailing list. Todo: Another limitation is that just-deleted just-committed entries cannot be fetched via revert, as FSVS no longer knows about them. TODO: If a revision is given, take a look there, and ignore the local data? As a workaround you could use the cat and/or checkout commands to fetch repository-only data. Removed directory structures If a path is specified whose parent is missing, fsvs complains. We plan to provide a switch (probably -p), which would create (a sparse) tree up to this entry. Recursive behaviour When the user specifies a non-directory entry (file, device, symlink), this entry is reverted to the old state. If the user specifies a directory entry, these definitions should apply: command line switch result -N this directory only (meta-data), none this directory, and direct children of the directory, -R this directory, and the complete tree below. Working with copied entries If an entry is marked as copied from another entry (and not committed!), a revert will fetch the original copyfrom source. To undo the copy setting use the uncopy command. status fsvs status [-C [-C]] [-v] [-f filter] [PATHs...] This command shows the entries that have been changed locally since the last commit. The most important output formats are: * A status columns of four (or, with -v , six) characters. There are either flags or a "." printed, so that it's easily parsed by scripts -- the number of columns is only changed by -q, -v -- verbose/quiet. * The size of the entry, in bytes, or "dir" for a directory, or "dev" for a device. * The path and name of the entry, formatted by the path option. Normally only changed entries are printed; with -v all are printed, but see the filter option for more details. The status column can show the following flags: * 'D' and 'N' are used for deleted and new entries. * 'd' and 'n' are used for entries which are to be unversioned or added on the next commit; the characters were chosen as little delete (only in the repository, not removed locally) and little new (although ignored). See add and unversion. If such an entry does not exist, it is marked with an "!" in the last column -- because it has been manually marked, and so the removal is unexpected. * A changed type (character device to symlink, file to directory etc.) is given as 'R' (replaced), ie. as removed and newly added. * If the entry has been modified, the change is shown as 'C'. If the modification or status change timestamps (mtime, ctime) are changed, but the size is still the same, the entry is marked as possibly changed (a question mark '?' in the last column) - but see change detection for details. * A 'x' signifies a conflict. * The meta-data flag 'm' shows meta-data changes like properties, modification timestamp and/or the rights (owner, group, mode); depending on the -v/-q command line parameters, it may be splitted into 'P' (properties), 't' (time) and 'p' (permissions). If 'P' is shown for the non-verbose case, it means only property changes, ie. the entries filesystem meta-data is unchanged. * A '+' is printed for files with a copy-from history; to see the URL of the copyfrom source, see the verbose option. Here's a table with the characters and their positions: * Without -v With -v * .... ...... * NmC? NtpPC? * DPx! D x! * R + R + * d d * n n * Furthermore please take a look at the stat_color option, and for more information about displayed data the verbose option. sync-repos fsvs sync-repos [-r rev] [working copy base] This command loads the file list afresh from the repository. A following commit will send all differences and make the repository data identical to the local. This is normally not needed; the only use cases are * debugging and * recovering from data loss in the $FSVS_WAA area. It might be of use if you want to backup two similar machines. Then you could commit one machine into a subdirectory of your repository, make a copy of that directory for another machine, and sync this other directory on the other machine. A commit then will transfer only _changed_ files; so if the two machines share 2GB of binaries (/usr , /bin , /lib , ...) then these 2GB are still shared in the repository, although over time they will deviate (as both committing machines know nothing of the other path with identical files). This kind of backup could be substituted by two or more levels of repository paths, which get overlaid in a defined priority. So the base directory, which all machines derive from, will be committed from one machine, and it's no longer necessary for all machines to send identical files into the repository. The revision argument should only ever be used for debugging; if you fetch a filelist for a revision, and then commit against later revisions, problems are bound to occur. Note: There's issue 2286 in subversion which describes sharing identical files in the repository in unrelated paths. By using this relaxes the storage needs; but the network transfers would still be much larger than with the overlaid paths. update fsvs update [-r rev] [working copy base] fsvs update [-u url@rev ...] [working copy base] This command does an update on the current working copy; per default for all defined URLs, but you can restrict that via -u. It first reads all filelist changes from the repositories, overlays them (so that only the highest-priority entries are used), and then fetches all necessary changes. Updating to zero If you start an update with a target revision of zero, the entries belonging to that URL will be removed from your working copy, and the URL deleted from your URL list. This is a convenient way to replace an URL with another. Note: As FSVS has no full mixed revision support yet, it doesn't know whether under the removed entry is a lower-priority one with the same path, which should get visible now. Directories get changed to the highest priority URL that has an entry below (which might be hidden!). Because of this you're advised to either use that only for completely distinct working copies, or do a sync-repos (and possibly one or more revert calls) after the update. urls fsvs urls URL [URLs...] fsvs urls dump fsvs urls load Initializes a working copy administrative area and connects the current working directory to REPOS_URL. All commits and updates will be done to this directory and against the given URL. Example: fsvs urls http://svn/repos/installation/machine-1/trunk For a format definition of the URLs please see the chapter Format of URLs. Note: If there are already URLs defined, and you use that command later again, please note that as of 1.0.18 the older URLs are not overwritten as before, but that the new URLs are appended to the given list! If you want to start afresh, use something like true | fsvs urls load Loading URLs You can load a list of URLs from STDIN; use the load subcommand for that. Example: ( echo 'N:local,prio:10,http://svn/repos/install/machine-1/trunk' ; echo 'P:50,name:common,http://svn/repos/install/common/trunk' ) | fsvs urls load Empty lines are ignored. Dumping the defined URLs To see which URLs are in use for the current WC, you can use dump. As an optional parameter you can give a format statement: p priority n name r current revision t target revision R readonly-flag u URL I internal number for this URL Note: That's not a real printf()-format; only these and a few \ sequences are recognized. Example: fsvs urls dump " %u %n:%p\\n" http://svn/repos/installation/machine-1/trunk local:10 http://svn/repos/installation/common/trunk common:50 The default format is "name:%n,prio:%p,target:%t,ro:%r,%u\\n"; for a more readable version you can use -v. Loading URLs You can change the various parameters of the defined URLs like this: # Define an URL fsvs urls name:url1,target:77,readonly:1,http://anything/... # Change values fsvs urls name:url1,target:HEAD fsvs urls readonly:0,http://anything/... fsvs urls name:url1,prio:88,target:32 Note: FSVS as yet doesn't store the whole tree structures of all URLs. So if you change the priority of an URL, and re-mix the directory trees that way, you'll need a sync-repos and some revert commands. I'd suggest to avoid this, until FSVS does handle that case better. fsvs-1.2.6/doc/fsvs-groups.50000644000202400020240000003030011346140057014575 0ustar marekmarek.TH "FSVS - Group definitions" 5 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME Using grouping patterns \- .PP Patterns are used to define groups for new entries; a group can be used to ignore the given entries, or to automatically set properties when the entry is taken on the entry list. Patterns are used to define groups for new entries; a group can be used to ignore the given entries, or to automatically set properties when the entry is taken on the entry list. So the auto-props are assigned when the entry gets put on the internal list; that happens for the \fBadd\fP, \fBprop-set\fP or \fBprop-del\fP, and of course \fBcommit\fP commands. .br To override the auto-props of some new entry just use the property commands. .SH "Overview" .PP When \fCFSVS\fP walks through your working copy it tries to find \fBnew\fP (ie. not yet versioned) entries. Every \fBnew\fP entry gets tested against the defined grouping patterns (in the given order!); if a pattern matches, the corresponding group is assigned to the entry, and no further matching is done. .PP See also \fBentry statii\fP. .SS "Predefined group 1: 'ignore'" If an entry gets a group named \fC'ignore'\fP assigned, it will not be considered for versioning. .PP This is the only \fBreally\fP special group name. .SS "Predefined group 2: 'take'" This group mostly specifies that no further matching is to be done, so that later \fBignore\fP patterns are not tested. .PP Basically the \fC'take'\fP group is an ordinary group like all others; it is just predefined, and available with a \fBshort-hand notation\fP. .SH "Why should I ignore files?" .PP Ignore patterns are used to ignore certain directory entries, where versioning makes no sense. If you're versioning the complete installation of a machine, you wouldn't care to store the contents of \fC/proc\fP (see \fCman 5 proc\fP), or possibly because of security reasons you don't want \fC/etc/shadow\fP , \fC/etc/sshd/ssh_host_*key\fP , and/or other password- or key-containing files. .PP Ignore patterns allow you to define which directory entries (files, subdirectories, devices, symlinks etc.) should be taken, respectively ignored. .SH "Why should I assign groups?" .PP The grouping patterns can be compared with the \fCauto-props\fP feature of subversion; it allows automatically defining properties for new entries, or ignoring them, depending on various criteria. .PP For example you might want to use encryption for the files in your users' \fC\fP.ssh directory, to secure them against unauthorized access in the repository, and completely ignore the private key files: .PP Grouping patterns: .PP .nf group:ignore,/home/*/.ssh/id* group:encrypt,/home/*/.ssh/** .fi .PP And the \fC$FSVS_CONF/groups/encrypt\fP file would have a definition for the \fCfsvs:commit-pipe\fP (see the \fBspecial properties\fP). .SH "Syntax of group files" .PP A group definition file looks like this: .PD 0 .IP "\(bu" 2 Whitespace on the beginning and the end of the line is ignored. .IP "\(bu" 2 Empty lines, and lines with the first non-whitespace character being \fC'#'\fP (comments) are ignored. .IP "\(bu" 2 It can have \fBeither\fP the keywords \fCignore\fP or \fCtake\fP; if neither is specified, the group \fCignore\fP has \fCignore\fP as default (surprise, surprise!), and all others use \fCtake\fP. .IP "\(bu" 2 An arbitrary (small) number of lines with the syntax .br \fCauto-prop \fIproperty-name\fP \fIproperty-value\fP\fP can be given; \fIproperty-name\fP may not include whitespace, as there's no parsing of any quote characters yet. .PP .PP An example: .PP .nf # This is a comment # This is another auto-props fsvs:commit-pipe gpg -er admin@my.net # End of definition .fi .PP .SH "Specification of groups and patterns" .PP While an ignore pattern just needs the pattern itself (in one of the formats below), there are some modifiers that can be additionally specified: .PP .nf [group:{name},][dir-only,][insens|nocase,][take,][mode:A:C,]pattern .fi .PP These are listed in the section \fBModifiers\fP below. .PP These kinds of ignore patterns are available: .SH "Shell-like patterns" .PP These must start with \fC./\fP, just like a base-directory-relative path. \fC\fP? , \fC*\fP as well as character classes \fC\fP[a-z] have their usual meaning, and \fC**\fP is a wildcard for directory levels. .PP You can use a backslash \fC\\\fP outside of character classes to match some common special characters literally, eg. \fC\\*\fP within a pattern will match a literal asterisk character within a file or directory name. Within character classes all characters except \fC\fP] are treated literally. If a literal \fC\fP] should be included in a character class, it can be placed as the first character or also be escaped using a backslash. .PP Example for \fC/\fP as the base-directory .PP .nf ./[oa]pt ./sys ./proc/* ./home/**~ .fi .PP .PP This would ignore files and directories called \fCapt\fP or \fCopt\fP in the root directory (and files below, in the case of a directory), the directory \fC/sys\fP and everything below, the contents of \fC/proc\fP (but take the directory itself, so that upon restore it gets created as a mountpoint), and all entries matching \fC*~\fP in and below \fC/home\fP . .PP \fBNote:\fP .RS 4 The patterns are anchored at the beginning and the end. So a pattern \fC./sys\fP will match \fBonly\fP a file or directory named \fCsys\fP. If you want to exclude a directories' files, but not the directory itself, use something like \fC./dir/*\fP or \fC./dir/**\fP .RE .PP If you're deep within your working copy and you'd like to ignore some files with a WC-relative ignore pattern, you might like to use the \fBrel-ignore\fP command. .SS "Absolute shell patterns" There is another way to specify shell patterns - using absolute paths. .br The syntax is similar to normal shell patterns; but instead of the \fC./\fP prefix the full path, starting with \fC/\fP, is used. .PP .PP .nf /etc/**.dpkg-old /etc/**.dpkg-bak /**.bak /**~ .fi .PP .PP The advantage of using full paths is that a later \fCdump\fP and \fCload\fP in another working copy (eg. when moving from versioning \fC/etc\fP to \fC/\fP) does simply work; the patterns don't have to be modified. .PP Internally this simply tries to remove the working copy base directory at the start of the patterns (on loading); then they are processed as usual. .PP If a pattern does \fBnot\fP match the wc base, and neither has the wild-wildcard prefix \fC/**\fP, a \fBwarning\fP is issued. .SH "PCRE-patterns" .PP PCRE stands for Perl Compatible Regular Expressions; you can read about them with \fCman pcre\fP (if the manpages are installed), and/or \fCperldoc perlre\fP (if perldoc is installed). .br If both fail for you, just google it. .PP These patterns have the form \fCPCRE:{pattern}\fP, with \fCPCRE\fP in uppercase. .PP An example: .PP .nf PCRE:./home/.*~ .fi .PP This one achieves exactly the same as \fC./home/**~\fP . .PP Another example: .PP .nf PCRE:./home/[a-s] .fi .PP .PP This would match \fC/home/anthony\fP , \fC/home/guest\fP , \fC/home/somebody\fP and so on, but would not match \fC/home/theodore\fP . .PP One more: .PP .nf PCRE:./.*(\.(tmp|bak|sik|old|dpkg-\w+)|~)$ .fi .PP .PP Note that the pathnames start with \fC\fP./ , just like above, and that the patterns are anchored at the beginning. To additionally anchor at the end you could use a \fC$\fP at the end. .SH "Ignoring all files on a device" .PP Another form to discern what is needed and what not is possible with \fCDEVICE:[<|<=|>|>=]major[:minor]\fP. .PP This takes advantage of the major and minor device numbers of inodes (see \fCman 1 stat\fP and \fCman 2 stat\fP). .PP The rule is as follows: .IP "\(bu" 2 Directories have their parent matched against the given string .IP "\(bu" 2 All other entries have their own device matched. .PP .PP This is because mount-points (ie. directories where other filesystems get attached) show the device of the mounted device, but should be versioned (as they are needed after restore); all entries (and all binding mounts) below should not. .PP The possible options \fC<=\fP or \fC>=\fP define a less-or-equal-than respective bigger-or-equal-than relationship, to ignore a set of device classes. .PP Examples: .PP .nf tDEVICE:3 ./* .fi .PP This patterns would define that all filesystems on IDE-devices (with major number 3) are \fItaken\fP , and all other files are ignored. .PP .PP .nf DEVICE:0 .fi .PP This would ignore all filesystems with major number 0 - in linux these are the \fIvirtual\fP filesystems ( \fCproc\fP , \fCsysfs\fP , \fCdevpts\fP , etc.; see \fC/proc/filesystems\fP , the lines with \fCnodev\fP ). .PP Mind NFS and smb-mounts, check if you're using \fImd\fP , \fIlvm\fP and/or \fIdevice-mapper\fP ! .PP Note: The values are parsed with \fCstrtoul()\fP , so you can use decimal, hexadecimal (by prepending \fC'0x'\fP, like \fC'0x102'\fP) and octal (\fC'0'\fP, like \fC'0777'\fP) notation. .SH "Ignoring a single file, by inode" .PP At last, another form to ignore entries is to specify them via the device they are on and their inode: .PP .nf INODE:major:minor:inode .fi .PP This can be used if a file can be hardlinked to many places, but only one copy should be stored. Then one path can be marked as to \fItake\fP , and other instances can get ignored. .PP \fBNote:\fP .RS 4 That's probably a bad example. There should be a better mechanism for handling hardlinks, but that needs some help from subversion. .RE .PP .SH "Modifiers" .PP All of these patterns can have one or more of these modifiers \fBbefore\fP them, with (currently) optional \fC','\fP as separators; not all combinations make sense. .PP For patterns with the \fCm\fP (mode match) or \fCd\fP (dironly) modifiers the filename pattern gets optional; so you don't have to give an all-match wildcard pattern (\fC./**\fP) for these cases. .SS "'take': Take pattern" This modifier is just a short-hand for assigning the group \fBtake\fP. .SS "'ignore': Ignore pattern" This modifier is just a short-hand for assigning the group \fBignore\fP. .SS "'insens' or 'nocase': Case insensitive" With this modifier you can force the match to be case-insensitive; this can be useful if other machines use eg. \fCsamba\fP to access files, and you cannot be sure about them leaving \fC'.BAK'\fP or \fC'.bak'\fP behind. .SS "'dironly': Match only directories" This is useful if you have a directory tree in which only certain files should be taken; see below. .SS "'mode': Match entries' mode" This expects a specification of two octal values in the form \fCm:\fIand_value\fP:\fIcompare_value\fP\fP, like \fCm:04:00\fP; the bits set in \fCand_value\fP get isolated from the entries' mode, and compared against \fCcompare_value\fP. .PP As an example: the file has mode \fC0750\fP; a specification of .PD 0 .IP "\(bu" 2 \fCm:0700:0700\fP matches, .IP "\(bu" 2 \fCm:0700:0500\fP doesn't; and .IP "\(bu" 2 \fCm:0007:0000\fP matches, but .IP "\(bu" 2 \fCm:0007:0007\fP doesn't. .PP .PP A real-world example: \fCm:0007:0000\fP would match all entries that have \fBno\fP right bits set for \fI'others'\fP, and could be used to exclude private files (like \fC/etc/shadow\fP). (Alternatively, the \fIothers-read\fP bit could be used: \fCm:0004:0000\fP. .PP FSVS will reject invalid specifications, ie. when bits in \fCcompare_value\fP are set that are cleared in \fCand_value:\fP these patterns can never match. .br An example would be \fCm:0700:0007\fP. .SS "Examples" .PP .nf take,dironly,./var/vmail/** take,./var/vmail/**/.*.sieve ./var/vmail/** .fi .PP This would take all \fC'.*.sieve'\fP files (or directories) below \fC/var/vmail\fP, in all depths, and all directories there; but no other files. .PP If your files are at a certain depth, and you don't want all other directories taken, too, you can specify that exactly: .PP .nf take,dironly,./var/vmail/* take,dironly,./var/vmail/*/* take,./var/vmail/*/*/.*.sieve ./var/vmail/** .fi .PP .PP .PP .nf mode:04:0 take,./etc/ ./** .fi .PP This would take all files from \fC/etc\fP, but ignoring the files that are not world-readable (\fCother-read\fP bit cleared); this way only 'public' files would get taken. .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/PERFORMANCE0000644000202400020240000000525010644624417013712 0ustar marekmarek- Program size is 280kB with debug information, 96kB without. Needed libraries not counted. Some debug code could be eliminated by "configure --enable-release". - Initial checkin can take a while - there's a lot of data to transfer. - memory usage: my test machine with 150000 files never grew over 34MB in memory usage. (That is, with apr_pool_destroy(); with apr_pool_clean() I had to kill the process at 170MB) - The fsfs backend makes two files out of one date file - one for meta-data (properties) and one for the real file-data. So 300000 files are created for a commit of 130000 files. ** On ext3 enable dir_index ** (see "tune2fs", "fsck.ext3 -D") or use bdb. - "fsvs status" is (on cold caches) faster than "find"! See here: Script started on Mon 09 Jul 2007 16:48:34 CEST # How many entries are here? dolly:/example# find . | wc -l 22147 # Initialize fsvs, so that it knows its basepath dolly:/example# fsvs urls file://// # Warm up caches dolly:/example# find . > /dev/null # find with hot cache: dolly:/example# time find . > /dev/null real 0m0.096s user 0m0.052s sys 0m0.044s # Warm up cache (should already be done by find) dolly:/example# fsvs st > /dev/null # fsvs with hot cache: dolly:/example# time fsvs st > /dev/null real 0m0.175s user 0m0.088s sys 0m0.088s # Clear cache dolly:/example# echo 3 > /proc/sys/vm/drop_caches # find with cold cache - harddisk must seek a fair bit. dolly:/example# time find . > /dev/null real 0m8.279s user 0m0.084s sys 0m0.212s # Clear cache dolly:/example# echo 3 > /proc/sys/vm/drop_caches # fsvs with cold cache - harddisk must seek again dolly:/example# time fsvs st > /dev/null real 0m7.333s user 0m0.148s sys 0m0.372s # Now build a list of entries, like the one that exists after commit dolly:/example# fsvs _build > /dev/null # Clear cache dolly:/example# echo 3 > /proc/sys/vm/drop_caches # fsvs with cold cache, but using a sorted list of existing entries - # harddisk doesn't need to seek as much dolly:/example# time fsvs st > /dev/null real 0m6.000s user 0m0.240s sys 0m0.372s # Result: dolly:/example# bc -l bc 1.06 Copyright 1991-1994, 1997, 1998, 2000 Free Software Foundation, Inc. This is free software with ABSOLUTELY NO WARRANTY. For details type `warranty'. 8.279/6.00 1.37983333333333333333 6.00/8.279 .72472520835849740306 # 28% (or 38%) time saved! dolly:/example# exit Script done on Mon 09 Jul 2007 16:50:13 CEST - testing goes much faster if you create a /tmp/ram directory and mount a tmpfs there. *** DO NOT USE ramfs !!! *** ramfs doesn't update the directory modification time on file creations, so fsvs won't work. fsvs-1.2.6/doc/fsvs.10000644000202400020240000014430111346140057013263 0ustar marekmarek.TH "FSVS - fast versioning tool" 1 "11 Mar 2010" "Version trunk:2424" "fsvs" \" -*- nroff -*- .ad l .nh .SH NAME Commands and command line parameters \- .PP fsvs is a client for subversion repositories; it is designed for fast versioning of big directory trees. fsvs is a client for subversion repositories; it is designed for fast versioning of big directory trees. .SH "SYNOPSIS" .PP \fCfsvs command [options] [args]\fP .PP The following commands are understood by FSVS: .SH "Local configuration and information:" .PP .IP "\fB\fBurls\fP\fP" 1c \fCDefine working copy base directories by their URL(s)\fP .IP "\fB\fBstatus\fP\fP" 1c \fCGet a list of changed entries\fP .IP "\fB\fBinfo\fP\fP" 1c \fCDisplay detailed information about single entries\fP .IP "\fB\fBlog\fP\fP" 1c \fCFetch the log messages from the repository\fP .IP "\fB\fBdiff\fP\fP" 1c \fCGet differences between files (local and remote)\fP .IP "\fB\fBcopyfrom-detect\fP\fP" 1c \fCAsk FSVS about probably copied/moved/renamed entries; see \fBcp\fP\fP .PP .SH "Defining which entries to take:" .PP .IP "\fB\fBignore\fP and \fBrign\fP\fP" 1c \fCDefine ignore patterns\fP .IP "\fB\fBunversion\fP\fP" 1c \fCRemove entries from versioning\fP .IP "\fB\fBadd\fP\fP" 1c \fCAdd entries that would be ignored\fP .IP "\fB\fBcp\fP, \fBmv\fP\fP" 1c \fCTell FSVS that entries were copied\fP .PP .SH "Commands working with the repository:" .PP .IP "\fB\fBcommit\fP\fP" 1c \fCSend changed data to the repository\fP .IP "\fB\fBupdate\fP\fP" 1c \fCGet updates from the repository\fP .IP "\fB\fBcheckout\fP\fP" 1c \fCFetch some part of the repository, and register it as working copy\fP .IP "\fB\fBcat\fP\fP" 1c \fCGet a file from the directory \fP .IP "\fB\fB\fCrevert\fP\fP and \fB\fCuncp\fP\fP\fP" 1c \fC\fCUndo local changes and entry markings\fP \fP .IP "\fB\fB\fCremote-status\fP\fP\fP" 1c \fC\fCAsk what an \fBupdate\fP would bring\fP \fP .PP .PP .SH "Property handling:" .PP \fC .IP "\fB\fBprop-set\fP\fP" 1c \fCSet user-defined properties\fP .IP "\fB\fBprop-get\fP\fP" 1c \fCAsk value of user-defined properties\fP .IP "\fB\fBprop-list\fP\fP" 1c \fCGet a list of user-defined properties\fP .PP \fP .PP .SH "Additional commands used for recovery and debugging:" .PP \fC .IP "\fB\fBexport\fP\fP" 1c \fCFetch some part of the repository\fP .IP "\fB\fBsync-repos\fP\fP" 1c \fCDrop local information about the entries, and fetch the current list from the repository.\fP .PP \fP .PP \fC .PP \fBNote:\fP .RS 4 Multi-url-operations are relatively new; there might be rough edges. .RE .PP The \fBreturn code\fP is \fC0\fP for success, or \fC2\fP for an error. \fC1\fP is returned if the option \fBChecking for changes in a script\fP is used, and changes are found; see also \fBFiltering entries\fP.\fP .PP .SH "Universal options" .PP .SS "-V -- show version" \fC \fC-V\fP makes FSVS print the version and a copyright notice, and exit.\fP .PP .SS "-d and -D -- debugging" \fC If FSVS was compiled using \fC--enable-debug\fP you can enable printing of debug messages (to \fCSTDOUT\fP) with \fC-d\fP. Per default all messages are printed; if you're only interested in a subset, you can use \fC-D\fP \fIstart-of-function-name\fP. .PP .nf fsvs -d -D waa_ status .fi .PP would call the \fIstatus\fP action, printing all debug messages of all WAA functions - \fCwaa__init\fP, \fCwaa__open\fP, etc.\fP .PP \fC For more details on the other debugging options \fBdebug_output\fP and \fBdebug_buffer\fP please see the options list.\fP .PP .SS "-N, -R -- recursion" \fC The \fC-N\fP and \fC-R\fP switches in effect just decrement/increment a counter; the behaviour is chosen depending on that. So a command line of \fC-N -N -N -R -R\fP is equivalent to \fC-3 +2 = -1\fP, this results in \fC-N\fP.\fP .PP .SS "-q, -v -- verbose/quiet" \fC \fC-v\fC/\fC-q\fC set/clear verbosity flags, and so give more/less output.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please see \fBthe verbose option\fP for more details.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "-C -- checksum" \fC\fC\fC\fC\fC \fC-C\fP chooses to use more change detection checks; please see \fBthe change_check option\fP for more details.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "-f -- filter entries" \fC\fC\fC\fC\fC This parameter allows to do a bit of filtering of entries, or, for some operations, modification of the work done on given entries.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC It requires a specification at the end, which can be any combination of \fCany\fP, \fCtext\fP, \fCnew\fP, \fCdeleted\fP (or \fCremoved\fP), \fCmeta\fP, \fCmtime\fP, \fCgroup\fP, \fCmode\fP, \fCchanged\fP or \fCowner\fP; \fCdefault\fP or \fCdef\fP use the default value.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC By giving eg. the value \fCtext\fP, with a \fBstatus\fP action only entries that are new or changed are shown; with \fCmtime\fP,group only entries whose group or modification time has changed are printed.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 Please see \fBChange detection\fP for some more information. .PP If an entry gets replaced with an entry of a different type (eg. a directory gets replaced by a file), that counts as \fCdeleted\fP \fBand\fP \fCnew\fP. .RE .PP If you use \fC-v\fP, it's used as a \fCany\fP internally.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you use the string \fCnone\fP, it resets the bitmask to \fBno\fP entries shown; then you can built a new mask. So \fCowner\fP,none,any,none,delete would show deleted entries. If the value after all commandline parsing is \fCnone\fP, it is reset to the default.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "-W warning=action -- set warnings" \fC\fC\fC\fC\fC Here you can define the behaviour for certain situations that should not normally happen, but which you might encounter.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The general format here is \fIspecification\fP = \fIaction\fP, where \fIspecification\fP is a string matching the start of at least one of the defined situations, and \fIaction\fP is one of these: .IP "\(bu" 2 \fIonce\fP to print only a single warning, .IP "\(bu" 2 \fIalways\fP to print a warning message \fBevery\fP time, .IP "\(bu" 2 \fIstop\fP to abort the program, .IP "\(bu" 2 \fIignore\fP to simply ignore this situation, or .IP "\(bu" 2 \fIcount\fP to just count the number of occurrences. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If \fIspecification\fP matches more than one situation, all of them are set; eg. for \fImeta=ignore\fP all of \fImeta-mtime\fP, \fImeta-user\fP etc. are ignored.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If at least a single warning that is \fBnot\fP ignored is encountered during the program run, a list of warnings along with the number of messages it would have printed with the setting \fIalways\fP is displayed, to inform the user of possible problems.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The following situations can be handled with this: \fImeta-mtime\fP, \fImeta-user\fP, \fImeta-group\fP, \fImeta-umask\fP These warnings are issued if a meta-data property that was fetched from the repository couldn't be parsed. This can only happen if some other program or a user changes properties on entries. .br In this case you can use \fC-Wmeta=always\fP or \fC-Wmeta=count\fP, until the repository is clean again. .PP \fIno-urllist\fP This warning is issued if a \fBinfo\fP action is executed, but no URLs have been defined yet. .PP \fIcharset-invalid\fP If the function \fCnl_langinfo(3)\fP couldn't return the name of the current character encoding, a default of UTF-8 is used. You might need that for a minimal system installation, eg. on recovery. .PP \fIchmod-eperm\fP, \fIchown-eperm\fP If you update a working copy as normal user, and get to update a file which has another owner but which you may modify, you'll get errors because neither the user, group, nor mode can be set. .br This way you can make the errors non-fatal. .PP \fIchmod-other\fP, \fIchown-other\fP If you get another error than \fCEPERM\fP in the situation above, you might find these useful. .PP \fImixed-rev-wc\fP If you specify some revision number on a \fBrevert\fP, it will complain that mixed-revision working copies are not allowed. .br While you cannot enable mixed-revision working copies (I'm working on that) you can avoid being told every time. .PP \fIpropname-reserved\fP It is normally not allowed to set a property with the \fBprop-set\fP action with a name matching some reserved prefixes. .PP \fIignpat-wcbase\fP This warning is issued if an \fBabsolute ignore \fP pattern' does not match the working copy base directory. \\n See \\ref ignpat_shell_abs 'absolute shell patterns" for more details. .PP \fIdiff-status\fP GNU diff has defined that it returns an exit code 2 in case of an error; sadly it returns that also for binary files, so that a simply \fCfsvs diff some-binary-file text-file\fP would abort without printing the diff for the second file. .br Because of this FSVS currently ignores the exit status of diff per default, but this can be changed by setting this option to eg. \fIstop\fP. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Also an environment variable FSVS_WARNINGS is used and parsed; it is simply a whitespace-separated list of option specifications.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "-u URLname[@revision[:revision]] -- select URLs" \fC\fC\fC\fC\fC Some commands can be reduced to a subset of defined URLs; the \fBupdate\fP command is a example.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you have more than a single URL in use for your working copy, \fCupdate\fP normally updates \fBall\fP entries from \fBall\fP URLs. By using this parameter you can tell FSVS to update only the specified URLs.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The parameter can be used repeatedly; the value can have multiple URLs, separated by whitespace or one of \fC',;'\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP .nf fsvs up -u base_install,boot@32 -u gcc .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This would get \fCHEAD\fP of \fCbase_install\fP and \fCgcc\fP, and set the target revision of the \fCboot\fP URL \fBfor this command\fP at 32.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "-o [name[=value]] -- other options" \fC\fC\fC\fC\fC This is used for setting some seldom used option, for which default can be set in a configuration file (to be implemented, currently only command-line).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC For a list of these please see \fBFurther options for FSVS.\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SH "Signals" .PP \fC\fC\fC\fC\fC If you have a running FSVS, and you want to change its verbosity, you can send the process either \fCSIGUSR1\fP (to make it more verbose) or \fCSIGUSR2\fP (more quiet).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "add" .PP \fC\fC\fC\fC\fC .PP .nf fsvs add [-u URLNAME] PATH [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC With this command you can explicitly define entries to be versioned, even if they have a matching ignore pattern. They will be sent to the repository on the next commit, just like other new entries, and will therefore be reported as \fINew\fP .\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The \fC-u\fP option can be used if you're have more than one URL defined for this working copy and want to have the entries pinned to the this URL.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Example" \fC\fC\fC\fC\fC Say, you're versioning your home directory, and gave an ignore pattern of \fC./.*\fP to ignore all \fC.*\fP entries in your home-directory. Now you want \fC.bashrc\fP, \fC.ssh/config\fP, and your complete \fC.kde3-tree\fP saved, just like other data.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC So you tell fsvs to not ignore these entries: .PP .nf fsvs add .bashrc .ssh/config .kde3 .fi .PP Now the entries below \fC.kde3\fP would match your earlier \fC./.*\fP pattern (as a match at the beginning is sufficient), so you have to insert a negative ignore pattern (a \fItake\fP pattern): .PP .nf fsvs ignore prepend t./.kde3 .fi .PP Now a \fCfsvs st\fP would show your entries as \fINew\fP , and the next commit will send them to the repository.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "unversion" .PP \fC\fC\fC\fC\fC .PP .nf fsvs unversion PATH [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command flags the given paths locally as removed. On the next commit they will be deleted in the repository, and the local information of them will be removed, but not the entries themselves. So they will show up as \fINew\fP again, and you get another chance at ignoring them.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Example" \fC\fC\fC\fC\fC Say, you're versioning your home directory, and found that you no longer want \fC.bash_history\fP and \fC.sh_history\fP versioned. So you do .PP .nf fsvs unversion .bash_history .sh_history .fi .PP and these files will be reported as \fCd\fP (will be deleted, but only in the repository).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Then you do a .PP .nf fsvs commit .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Now fsvs would report these files as \fCNew\fP , as it does no longer know anything about them; but that can be cured by .PP .nf fsvs ignore './.*sh_history' .fi .PP Now these two files won't be shown as \fINew\fP , either.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The example also shows why the given paths are not just entered as separate ignore patterns - they are just single cases of a (probably) much broader pattern.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 If you didn't use some kind of escaping for the pattern, the shell would expand it to the actual filenames, which is (normally) not what you want. .RE .PP \fP\fP\fP\fP\fP .SH "_build_new_list" .PP \fC\fC\fC\fC\fC This is used mainly for debugging. It traverses the filesystem and builds a new entries file. In production it should not be used; as neither URLs nor the revision of the entries is known, information is lost by calling this function!\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Look at \fBsync-repos\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "delay" .PP \fC\fC\fC\fC\fC This command delays execution until time has passed at least to the next second after writing the data files used by FSVS (\fBdir\fP and \fBurls\fP).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command is for use in scripts; where previously the \fBdelay\fP option was used, this can be substituted by the given command followed by the \fCdelay\fP command.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The advantage against the \fBdelay\fP option is that read-only commands can be used in the meantime.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC An example: .PP .nf fsvs commit /etc/X11 -m 'Backup of X11' ... read-only commands, like 'status' fsvs delay /etc/X11 ... read-write commands, like 'commit' .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The optional path can point to any path in the WC.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC In the testing framework it is used to save a bit of time; in normal operation, where FSVS commands are not so tightly packed, it is normally preferable to use the \fBdelay\fP option.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "cat" .PP \fC\fC\fC\fC\fC .PP .nf fsvs cat [-r rev] path .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Fetches a file repository, and outputs it to \fCSTDOUT\fP. If no revision is specified, it defaults to BASE, ie. the current local revision number of the entry.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "checkout" .PP \fC\fC\fC\fC\fC .PP .nf fsvs checkout [path] URL [URLs...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Sets one or more URLs for the current working directory (or the directory \fCpath\fP), and does an \fBcheckout\fP of these URLs.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf fsvs checkout . http://svn/repos/installation/machine-1/trunk .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The distinction whether a directory is given or not is done based on the result of URL-parsing -- if it looks like an URL, it is used as an URL. .br Please mind that at most a single path is allowed; as soon as two non-URLs are found an error message is printed.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If no directory is given, \fC'.'\fP is used; this differs from the usual subversion usage, but might be better suited for usage as a recovery tool (where versioning \fC/\fP is common). Opinions welcome.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The given \fCpath\fP must exist, and \fBshould\fP be empty -- FSVS will abort on conflicts, ie. if files that should be created already exist. .br If there's a need to create that directory, please say so; patches for some parameter like \fC-p\fP are welcome.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC For a format definition of the URLs please see the chapter \fBFormat of URLs\fP and the \fBurls\fP and \fBupdate\fP commands.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Furthermore you might be interested in \fBUsing an alternate root directory\fP and \fBRecovery for a non-booting system\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "commit" .PP \fC\fC\fC\fC\fC .PP .nf fsvs commit [-m 'message'|-F filename] [-v] [-C [-C]] [PATH [PATH ...]] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Commits (parts of) the current state of the working copy into the repository.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Example" \fC\fC\fC\fC\fC The working copy is \fC/etc\fP , and it is set up and committed already. .br Then \fC/etc/hosts\fP and \fC/etc/inittab\fP got modified. Since these are non-related changes, you'd like them to be in separate commits.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC So you simply run these commands: .PP .nf fsvs commit -m 'Added some host' /etc/hosts fsvs commit -m 'Tweaked default runlevel' /etc/inittab .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If the current directory is \fC/etc\fP you could even drop the \fC/etc/\fP in front, and use just the filenames.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please see \fBstatus\fP for explanations on \fC-v\fP and \fC-C\fP . .br For advanced backup usage see also \fBthe \fP commit-pipe property".\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SH "cp" .PP \fC\fC\fC\fC\fC .PP .nf fsvs cp [-r rev] SRC DEST fsvs cp dump fsvs cp load .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The \fCcopy\fP command marks \fCDEST\fP as a copy of \fCSRC\fP at revision \fCrev\fP, so that on the next commit of \fCDEST\fP the corresponding source path is sent as copy source.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The default value for \fCrev\fP is \fCBASE\fP, ie. the revision the \fCSRC\fP (locally) is at.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please note that this command works \fBalways\fP on a directory \fBstructure\fP - if you say to copy a directory, the \fBwhole\fP structure is marked as copy. That means that if some entries below the copy are missing, they are reported as removed from the copy on the next commit. .br (Of course it is possible to mark files as copied, too; non-recursive copies are not possible, but can be emulated by having parts of the destination tree removed.)\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 TODO: There will be differences in the exact usage - \fCcopy\fP will try to run the \fCcp\fP command, whereas \fCcopied\fP will just remember the relation. .RE .PP If this command are used without parameters, the currently defined relations are printed; please keep in mind that the \fBkey\fP is the destination name, ie. the 2nd line of each pair!\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The input format for \fCload\fP is newline-separated - first a \fCSRC\fP line, followed by a \fCDEST\fP line, then an line with just a dot (\fC'.'\fP) as delimiter. If you've got filenames with newlines or other special characters, you have to give the paths as arguments.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Internally the paths are stored relative to the working copy base directory, and they're printed that way, too.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Later definitions are \fBappended\fP to the internal database; to undo mistakes, use the \fBuncopy\fP action.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 \fBImportant:\fP User-defined properties like \fBfsvs:commit-pipe\fP are \fBnot\fP copied to the destinations, because of space/time issues (traversing through entire subtrees, copying a lot of property-files) and because it's not sure that this is really wanted. \fBTODO:\fP option for copying properties? .PP As subversion currently treats a rename as copy+delete, the \fBmv\fP command is an alias to \fBcp\fP. .RE .PP If you have a need to give the filenames \fCdump\fP or \fCload\fP as first parameter for copyfrom relations, give some path, too, as in \fC'./dump'\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 The source is internally stored as URL with revision number, so that operations like these .PP .nf $ fsvs cp a b $ rm a/1 $ fsvs ci a $ fsvs ci b .fi .PP work - FSVS sends the old (too recent!) revision number as source, and so the local filelist stays consistent with the repository. .br But it is not implemented (yet) to give an URL as copyfrom source directly - we'd have to fetch a list of entries (and possibly the data!) from the repository. .RE .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "copyfrom-detect" .PP \fC\fC\fC\fC\fC .PP .nf fsvs copyfrom-detect [paths...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command tells FSVS to look through the new entries, and see whether it can find some that seem to be copied from others already known. .br It will output a list with source and destination path and why it could match.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This is just for information purposes and doesn't change any FSVS state, (TODO: unless some option/parameter is set).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The list format is \fBon purpose\fP incompatible with the \fCload\fP syntax, as the best match normally has to be taken manually.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If \fBverbose\fP is used, an additional value giving the percentage of matching blocks, and the count of possibly copied entries is printed.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf $ fsvs copyfrom-list -v newfile1 md5:oldfileA newfile2 md5:oldfileB md5:oldfileC md5:oldfileD newfile3 inode:oldfileI manber=82.6:oldfileF manber=74.2:oldfileG manber=53.3:oldfileH ... 3 copyfrom relations found. .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The abbreviations are: \fImd5\fP The \fBMD5\fP of the new file is identical to that of one or more already committed files; there is no percentage. .PP \fIinode\fP The \fBdevice/inode\fP number is identical to the given known entry; this could mean that the old entry has been renamed or hardlinked. \fBNote:\fP Not all filesystems have persistent inode numbers (eg. NFS) - so depending on your filesystems this might not be a good indicator! .PP \fIname\fP The entry has the same name as another entry. .PP \fImanber\fP Analysing files of similar size shows some percentage of (variable-sized) \fBcommon blocks\fP (ignoring the order of the blocks). .PP \fIdirlist\fP The new directory has similar files to the old directory. .br The percentage is (number_of_common_entries)/(files_in_dir1 + files_in_dir2 - number_of_common_entries). .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 \fBmanber\fP matching is not implemented yet. .PP If too many possible matches for an entry are found, not all are printed; only an indicator \fC...\fP is shown at the end. .RE .PP \fP\fP\fP\fP\fP .SH "uncp" .PP \fC\fC\fC\fC\fC .PP .nf fsvs uncopy DEST [DEST ...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The \fCuncopy\fP command removes a \fCcopyfrom\fP mark from the destination entry. This will make the entry unknown again, and reported as \fCNew\fP on the next invocations.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Only the base of a copy can be un-copied; if a directory structure was copied, and the given entry is just implicitly copied, this command will return an error.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This is not folded in \fBrevert\fP, because it's not clear whether \fCrevert\fP on copied, changed entries should restore the original copyfrom data or remove the copy attribute; by using another command this is no longer ambiguous.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf $ fsvs copy SourceFile DestFile # Whoops, was wrong! $ fsvs uncopy DestFile .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "diff" .PP \fC\fC\fC\fC\fC .PP .nf fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command gives you diffs between local and repository files.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC With \fC-v\fP the meta-data is additionally printed, and changes shown.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you don't give the revision arguments, you get a diff of the base revision in the repository (the last commit) against your current local file. With one revision, you diff this repository version against your local file. With both revisions given, the difference between these repository versions is calculated.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC You'll need the \fCdiff\fP program, as the files are simply passed as parameters to it.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The default is to do non-recursive diffs; so \fCfsvs diff .\fP will output the changes in all files \fBin the current directory\fP and below.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The output for special files is the diff of the internal subversion storage, which includes the type of the special file, but no newline at the end of the line (which \fCdiff\fP complains about).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC For entries marked as copy the diff against the (clean) source entry is printed.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please see also \fBOptions relating to the 'diff' action\fP and \fBUsing colordiff\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "export" .PP \fC\fC\fC\fC\fC .PP .nf fsvs export REPOS_URL [-r rev] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you want to export a directory from your repository \fBwithout\fP storing any FSVS-related data you can use this command.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This restores all meta-data - owner, group, access mask and modification time; its primary use is for data recovery.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The data gets written (in the correct directory structure) below the current working directory; if entries already exist, the export will stop, so this should be an empty directory.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "help" .PP \fC\fC\fC\fC\fC .PP .nf help [command] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command shows general or specific \fBhelp\fP (for the given command). A similar function is available by using \fC-h\fP or \fC-\fP? after a command.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "groups" .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP .nf fsvs groups dump|load fsvs groups [prepend|append|at=n] group-definition [group-def ...] fsvs ignore [prepend|append|at=n] pattern [pattern ...] fsvs groups test [-v|-q] [pattern ...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command adds patterns to the end of the pattern list, or, with \fCprepend\fP, puts them at the beginning of the list. With \fCat=x\fP the patterns are inserted at the position \fCx\fP , counting from 0.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The difference between \fCgroups\fP and \fCignore\fP is that \fCgroups\fP \fBrequires\fP a group name, whereas the latter just assumes the default group \fCignore\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC For the specification please see the related \fBdocumentation\fP .\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fCfsvs dump\fP prints the patterns to \fCSTDOUT\fP . If there are special characters like \fCCR\fP or \fCLF\fP embedded in the pattern \fBwithout encoding\fP (like \fC\\r\fP or \fC\\n\fP), the output will be garbled.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The patterns may include \fC*\fP and \fC\fP? as wildcards in one directory level, or \fC**\fP for arbitrary strings.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC These patterns are only matched against new (not yet known) files; entries that are already versioned are not invalidated. .br If the given path matches a new directory, entries below aren't found, either; but if this directory or entries below are already versioned, the pattern doesn't work, as the match is restricted to the directory.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC So: .PP .nf fsvs ignore ./tmp .fi .PP ignores the directory \fCtmp\fP; but if it has already been committed, existing entries would have to be unmarked with \fBfsvs unversion\fP. Normally it's better to use .PP .nf fsvs ignore ./tmp/** .fi .PP as that takes the directory itself (which might be needed after restore as a mount point anyway), but ignore \fBall\fP entries below. .br Currently this has the drawback that mtime changes will be reported and committed; this is not the case if the whole directory is ignored.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Examples: .PP .nf fsvs group group:unreadable,mode:4:0 fsvs group 'group:secrets,/etc/*shadow' fsvs ignore /proc fsvs ignore /dev/pts fsvs ignore './var/log/*-*' fsvs ignore './**~' fsvs ignore './**/*.bak' fsvs ignore prepend 'take,./**.txt' fsvs ignore append 'take,./**.svg' fsvs ignore at=1 './**.tmp' fsvs group dump fsvs group dump -v echo './**.doc' | fsvs ignore load # Replaces the whole list .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 Please take care that your wildcard patterns are not expanded by the shell! .RE .PP \fP\fP\fP\fP\fP .SS "Testing patterns" \fC\fC\fC\fC\fC To see more easily what different patterns do you can use the \fCtest\fP subcommand. The following combinations are available: .PD 0 .IP "\(bu" 2 \fCfsvs groups test \fIpattern\fP\fP Tests \fBonly\fP the given pattern against all new entries in your working copy, and prints the matching paths. The pattern is not stored in the pattern list. .IP "\(bu" 2 \fCfsvs groups test\fP .br Uses the already defined patterns on the new entries, and prints the group name, a tab, and the path. .br With \fC-v\fP you can see the matching pattern in the middle column, too. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC By using \fC-q\fP you can avoid getting the whole list; this makes sense if you use the \fBgroup_stats\fP option at the same time.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "rign" .PP \fC\fC\fC\fC\fC .PP .nf fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...] fsvs ri [prepend|append|at=n] path-spec [path-spec ...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you keep the same repository data at more than one working copy on the same machine, it will be stored in different paths - and that makes absolute ignore patterns infeasible. But relative ignore patterns are anchored at the beginning of the WC root - which is a bit tiring to type if you're deep in your WC hierarchy and want to ignore some files.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC To make that easier you can use the \fCrel-ignore\fP (abbreviated as \fCri\fP) command; this converts all given path-specifications (which may include wildcards as per the shell pattern specification above) to WC-relative values before storing them.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example for \fC/etc\fP as working copy root: .PP .nf fsvs rel-ignore '/etc/X11/xorg.conf.*' cd /etc/X11 fsvs rel-ignore 'xorg.conf.*' .fi .PP Both commands would store the pattern './X11/xorg.conf.*'.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 This works only for \fBshell patterns\fP. .RE .PP For more details about ignoring files please see the \fBignore\fP command and \fBSpecification of groups and patterns\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "info" .PP \fC\fC\fC\fC\fC .PP .nf fsvs info [-R [-R]] [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Use this command to show information regarding one or more entries in your working copy. .br You can use \fC-v\fP to obtain slightly more information.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This may sometimes be helpful for locating bugs, or to obtain the URL and revision a working copy is currently at.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf $ fsvs info URL: file: .... 200 . Type: directory Status: 0x0 Flags: 0x100000 Dev: 0 Inode: 24521 Mode: 040755 UID/GID: 1000/1000 MTime: Thu Aug 17 16:34:24 2006 CTime: Thu Aug 17 16:34:24 2006 Revision: 4 Size: 200 .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The default is to print information about the given entry only. With a single \fC-R\fP you'll get this data about \fBall\fP entries of a given directory; with another \fC-R\fP you'll get the whole (sub-)tree.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "log" .PP \fC\fC\fC\fC\fC .PP .nf fsvs log [-v] [-r rev1[:rev2]] [-u name] [path] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command views the revision log information associated with the given \fIpath\fP at its topmost URL, or, if none is given, the highest priority URL.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The optional \fIrev1\fP and \fIrev2\fP can be used to restrict the revisions that are shown; if no values are given, the logs are given starting from \fCHEAD\fP downwards, and then a limit on the number of revisions is applied (but see the \fBlimit\fP option).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you use the \fB-v\fP -option, you get the files changed in each revision printed, too.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC There is an option controlling the output format; see the \fBlog_output option\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Optionally the name of an URL can be given after \fC-u\fP; then the log of this URL, instead of the topmost one, is shown.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC TODOs: .IP "\(bu" 2 \fC--stop-on-copy\fP .IP "\(bu" 2 Show revision for \fBall\fP URLs associated with a working copy? In which order? .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "prop-get" .PP \fC\fC\fC\fC\fC .PP .nf fsvs prop-get PROPERTY-NAME PATH... .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Prints the data of the given property to \fCSTDOUT\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 Be careful! This command will dump the property \fBas it is\fP, ie. with any special characters! If there are escape sequences or binary data in the property, your terminal might get messed up! .br If you want a safe way to look at the properties, use prop-list with the \fC-v\fP parameter. .RE .PP \fP\fP\fP\fP\fP .SH "prop-set" .PP \fC\fC\fC\fC\fC .PP .nf fsvs prop-set [-u URLNAME] PROPERTY-NAME VALUE PATH... .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command sets an arbitrary property value for the given path(s).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 Some property prefixes are reserved; currently everything starting with \fCsvn:\fP throws a (fatal) warning, and \fCfsvs:\fP is already used, too. See \fBSpecial property names\fP. .RE .PP If you're using a multi-URL setup, and the entry you'd like to work on should be pinned to a specific URL, you can use the \fC-u\fP parameter; this is like the \fBadd\fP command, see there for more details.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "prop-del" .PP \fC\fC\fC\fC\fC .PP .nf fsvs prop-del PROPERTY-NAME PATH... .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command removes a property for the given path(s).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC See also \fBprop-set\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "prop-list" .PP \fC\fC\fC\fC\fC .PP .nf fsvs prop-list [-v] PATH... .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Lists the names of all properties for the given entry. .br With \fC-v\fP, the value is printed as well; special characters will be translated, as arbitrary binary sequences could interfere with your terminal settings.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you need raw output, post a patch for \fC--raw\fP, or write a loop with \fBprop-get\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "remote-status" .PP \fC\fC\fC\fC\fC .PP .nf fsvs remote-status PATH [-r rev] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command looks into the repository and tells you which files would get changed on an \fBupdate\fP - it's a dry-run for \fBupdate\fP .\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Per default it compares to \fCHEAD\fP, but you can choose another revision with the \fC-r\fP parameter.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please see the \fBupdate\fP documentation for details regarding multi-URL usage.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "resolve" .PP \fC\fC\fC\fC\fC .PP .nf fsvs resolve PATH [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC When FSVS tries to update local files which have been changed, a conflict might occur. (For various ways of handling these please see the \fBconflict\fP option.)\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command lets you mark such conflicts as resolved.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "revert" .PP \fC\fC\fC\fC\fC .PP .nf fsvs revert [-rRev] [-R] PATH [PATH...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command undoes local modifications: .IP "\(bu" 2 An entry that is marked to be unversioned gets this flag removed. .IP "\(bu" 2 For a already versioned entry (existing in the repository) the local entry is replaced with its repository version, and its status and flags are cleared. .IP "\(bu" 2 An entry that is a \fBmodified\fP copy destination gets reverted to the copy source data. .IP "\(bu" 2 Manually added entries are changed back to \fI'N'\fPew.\fB\fP .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Please note that implicitly copied entries, ie. entries that are marked as copied because some parent directory is the base of a copy, \fBcan not\fP be un-copied; they can only be reverted to their original (copied-from) data, or removed.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If you want to undo a \fCcopy\fP operation, please see the \fBuncopy\fP command.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC See also \fBHOWTO: Understand the entries' statii\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If a directory is given on the command line \fBall versioned entries in this directory\fP are reverted to the old state; this behaviour can be modified with \fB-R/-N\fP, or see below.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The reverted entries are printed, along with the status they had \fBbefore\fP the revert (because the new status is per definition \fIunchanged\fP).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If a revision is given, the entries' data is taken from this revision; furthermore, the \fBnew\fP status of that entry is shown.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 Please note that mixed revision working copies are not (yet) possible; the \fIBASE\fP revision is not changed, and a simple \fCrevert\fP without a revision arguments gives you that. .br By giving a revision parameter you can just choose to get the text from a different revision. .RE .PP \fP\fP\fP\fP\fP .SS "Difference to update" \fC\fC\fC\fC\fC If something doesn't work as it should in the installation you can revert entries until you are satisfied, and directly \fBcommit\fP the new state.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC In contrast, if you \fBupdate\fP to an older version, you .IP "\(bu" 2 cannot choose single entries (no mixed revision working copies yet), .IP "\(bu" 2 and you cannot commit the old version with changes, as the 'skipped' (later) changes will create conflicts in the repository. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Currently only known entries are handled." \fC\fC\fC\fC\fC If you need a switch (like \fC--delete\fP in \fCrsync(1)\fP ) to remove unknown (new, not yet versioned) entries, to get the directory in the exact state it is in the repository, please tell the \fCdev@\fP mailing list.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Removed directory structures" \fC\fC\fC\fC\fC If a path is specified whose parent is missing, \fCfsvs\fP complains. .br We plan to provide a switch (probably \fC-p\fP), which would create (a sparse) tree up to this entry.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Recursive behaviour" \fC\fC\fC\fC\fC When the user specifies a non-directory entry (file, device, symlink), this entry is reverted to the old state.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC If the user specifies a directory entry, these definitions should apply: command line switchresult \fC-N\fP this directory only (meta-data), none this directory, and direct children of the directory, \fC-R\fP this directory, and the complete tree below. \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Working with copied entries" \fC\fC\fC\fC\fC If an entry is marked as copied from another entry (and not committed!), a \fCrevert\fP will fetch the original copyfrom source. To undo the copy setting use the \fBuncopy\fP command.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "status" .PP \fC\fC\fC\fC\fC .PP .nf fsvs status [-C [-C]] [-v] [-f filter] [PATHs...] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command shows the entries that have been changed locally since the last commit.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The most important output formats are: .IP "\(bu" 2 A status columns of four (or, with \fC-v\fP , six) characters. There are either flags or a '.' printed, so that it's easily parsed by scripts -- the number of columns is only changed by \fB-q, -v -- verbose/quiet\fP. .IP "\(bu" 2 The size of the entry, in bytes, or \fC'dir'\fP for a directory, or \fC'dev'\fP for a device. .IP "\(bu" 2 The path and name of the entry, formatted by the \fBpath\fP option. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Normally only changed entries are printed; with \fC-v\fP all are printed, but see the \fBfilter\fP option for more details.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The status column can show the following flags: .IP "\(bu" 2 \fC 'D'\fP and \fC'N'\fP are used for \fIdeleted\fP and \fInew\fP entries. .IP "\(bu" 2 \fC 'd'\fP and \fC'n'\fP are used for entries which are to be unversioned or added on the next commit; the characters were chosen as \fIlittle delete\fP (only in the repository, not removed locally) and \fIlittle new\fP (although \fBignored\fP). See \fBadd\fP and \fBunversion\fP. .br If such an entry does not exist, it is marked with an \fC'!'\fP in the last column -- because it has been manually marked, and so the removal is unexpected. .IP "\(bu" 2 A changed type (character device to symlink, file to directory etc.) is given as \fC'R'\fP (replaced), ie. as removed and newly added. .IP "\(bu" 2 If the entry has been modified, the change is shown as \fC'C'\fP. .br If the modification or status change timestamps (mtime, ctime) are changed, but the size is still the same, the entry is marked as possibly changed (a question mark \fC'\fP?' in the last column) - but see \fBchange detection\fP for details. .IP "\(bu" 2 A \fC'x'\fP signifies a conflict. .IP "\(bu" 2 The meta-data flag \fC'm'\fP shows meta-data changes like properties, modification timestamp and/or the rights (owner, group, mode); depending on the \fB-v/-q\fP command line parameters, it may be splitted into \fC'P'\fP (properties), \fC't'\fP (time) and \fC'p'\fP (permissions). .br If \fC'P'\fP is shown for the non-verbose case, it means \fBonly\fP property changes, ie. the entries filesystem meta-data is unchanged. .IP "\(bu" 2 A \fC'+'\fP is printed for files with a copy-from history; to see the URL of the copyfrom source, see the \fBverbose\fP option. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Here's a table with the characters and their positions: .PP .nf * Without -v With -v * .... ...... * NmC? NtpPC? * DPx! D x! * R + R + * d d * n n * .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Furthermore please take a look at the \fBstat_color\fP option, and for more information about displayed data the \fBverbose\fP option.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "sync-repos" .PP \fC\fC\fC\fC\fC .PP .nf fsvs sync-repos [-r rev] [working copy base] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command loads the file list afresh from the repository. .br A following commit will send all differences and make the repository data identical to the local.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This is normally not needed; the only use cases are .IP "\(bu" 2 debugging and .IP "\(bu" 2 recovering from data loss in the \fB$FSVS_WAA\fP area. .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC It might be of use if you want to backup two similar machines. Then you could commit one machine into a subdirectory of your repository, make a copy of that directory for another machine, and \fCsync\fP this other directory on the other machine.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC A commit then will transfer only _changed_ files; so if the two machines share 2GB of binaries (\fC/usr\fP , \fC/bin\fP , \fC/lib\fP , ...) then these 2GB are still shared in the repository, although over time they will deviate (as both committing machines know nothing of the other path with identical files).\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This kind of backup could be substituted by two or more levels of repository paths, which get \fIoverlaid\fP in a defined priority. So the base directory, which all machines derive from, will be committed from one machine, and it's no longer necessary for all machines to send identical files into the repository.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The revision argument should only ever be used for debugging; if you fetch a filelist for a revision, and then commit against later revisions, problems are bound to occur.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 There's issue 2286 in subversion which describes sharing identical files in the repository in unrelated paths. By using this relaxes the storage needs; but the network transfers would still be much larger than with the overlaid paths. .RE .PP \fP\fP\fP\fP\fP .SH "update" .PP \fC\fC\fC\fC\fC .PP .nf fsvs update [-r rev] [working copy base] fsvs update [-u url@rev ...] [working copy base] .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC This command does an update on the current working copy; per default for all defined URLs, but you can restrict that via \fB-u\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC It first reads all filelist changes from the repositories, overlays them (so that only the highest-priority entries are used), and then fetches all necessary changes.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Updating to zero" \fC\fC\fC\fC\fC If you start an update with a target revision of zero, the entries belonging to that URL will be removed from your working copy, and the URL deleted from your URL list. .br This is a convenient way to replace an URL with another. .br \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 As FSVS has no full mixed revision support yet, it doesn't know whether under the removed entry is a lower-priority one with the same path, which should get visible now. .br Directories get changed to the highest priority URL that has an entry below (which might be hidden!). .RE .PP Because of this you're advised to either use that only for completely distinct working copies, or do a \fBsync-repos\fP (and possibly one or more \fBrevert\fP calls) after the update.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC\fP\fP\fP\fP\fP .SH "urls" .PP \fC\fC\fC\fC\fC .PP .nf fsvs urls URL [URLs...] fsvs urls dump fsvs urls load .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Initializes a working copy administrative area and connects \fCthe\fP current working directory to \fCREPOS_URL\fP. All commits and updates will be done to this directory and against the given URL.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf fsvs urls http://svn/repos/installation/machine-1/trunk .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC For a format definition of the URLs please see the chapter \fBFormat of URLs\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 If there are already URLs defined, and you use that command later again, please note that as of 1.0.18 \fBthe older URLs are not overwritten\fP as before, but that the new URLs are \fBappended\fP to the given list! If you want to start afresh, use something like .PP .nf true | fsvs urls load .fi .PP .RE .PP \fP\fP\fP\fP\fP .SS "Loading URLs" \fC\fC\fC\fC\fC You can load a list of URLs from \fCSTDIN\fP; use the \fCload\fP subcommand for that.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Example: .PP .nf ( echo 'N:local,prio:10,http://svn/repos/install/machine-1/trunk' ; echo 'P:50,name:common,http://svn/repos/install/common/trunk' ) | fsvs urls load .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC Empty lines are ignored.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Dumping the defined URLs" \fC\fC\fC\fC\fC To see which URLs are in use for the current WC, you can use \fCdump\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC As an optional parameter you can give a format statement: \fCp\fP priority \fCn\fP name \fCr\fP current revision \fCt\fP target revision \fCR\fP readonly-flag \fCu\fP URL \fCI\fP internal number for this URL \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 That's not a real \fCprintf()-format\fP; only these and a few \fC\\\fP sequences are recognized. .RE .PP Example: .PP .nf fsvs urls dump ' %u %n:%p\\n' http://svn/repos/installation/machine-1/trunk local:10 http://svn/repos/installation/common/trunk common:50 .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC The default format is \fC'name:%n,prio:%p,target:%t,ro:%r,%u\\\\n'\fP; for a more readable version you can use \fB-v\fP.\fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC \fP\fP\fP\fP\fP .SS "Loading URLs" \fC\fC\fC\fC\fC You can change the various parameters of the defined URLs like this: .PP .nf # Define an URL fsvs urls name:url1,target:77,readonly:1,http://anything/... # Change values fsvs urls name:url1,target:HEAD fsvs urls readonly:0,http://anything/... fsvs urls name:url1,prio:88,target:32 .fi .PP \fP\fP\fP\fP\fP .PP \fC\fC\fC\fC\fC .PP \fBNote:\fP .RS 4 FSVS as yet doesn't store the whole tree structures of all URLs. So if you change the priority of an URL, and re-mix the directory trees that way, you'll need a \fBsync-repos\fP and some \fBrevert\fP commands. I'd suggest to avoid this, until FSVS does handle that case better. .RE .PP \fP\fP\fP\fP\fP .SH "Author" .PP Generated automatically by Doxygen for fsvs from the source code. fsvs-1.2.6/doc/notice.txt0000644000202400020240000000044411100577715014244 0ustar marekmarekMany of the files in this directory are autogenerated from the comments in the source files. It might be better to change them; but I'll accept documentation patches, too. (I just have to put the changes back into the source files). If you want to help, just ask on the dev@ mailing list. fsvs-1.2.6/tests/0000755000202400020240000000000012554717231012620 5ustar marekmarekfsvs-1.2.6/tests/060_components0000755000202400020240000000131511320474076015315 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/060.comp-tests if [[ "$opt_DEBUG" != 1 ]] then $INFO "No debugging compiled in, cannot do componenttests." exit 0 fi # test for matching if $COMPONENT_SCRIPT $logfile $TEST_PROG_DIR/comp-test/fail-test.ct > /dev/null 2>&1 then $ERROR "Component-test doesn't fail when it should!" else $SUCCESS "Component-tests running" fi ( cd / $BINq urls file:/// ) base=$TEST_PROG_DIR/comp-test for scr in `cd $base && ls ???_*.ct` do echo "testing $scr" if $COMPONENT_SCRIPT $logfile $base/$scr then $INFO "$scr ok" else echo "$base/$scr:1: unexpected answer" $ERROR "Component-test failed" fi done $SUCCESS "ok" fsvs-1.2.6/tests/022_update_details0000755000202400020240000000261611040023007016101 0ustar marekmarek#!/bin/bash # Test whether "fsvs up" hides previously new files. set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC filename=new-file-XYZ dir=new-dir-ABC file2=$dir/EFGH logfile=$LOGDIR/022.logfile # We don't use backticks here - in case there's an error in fsvs, the shell # would not stop. $BINq up > $logfile # this next line has two tabulators - in grep and cut rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d"."` echo "now at rev. $rev" touch empty-file $BINq ci -m "new file" # Goto old revision $BINq up -r$rev -o delay=yes # Modify WC mkdir $dir echo A > $filename echo B > $file2 # Save current state $BINdflt st | grep new > $logfile # Goto last revision $BINq up # The status must not have changed! if $BINdflt st | grep new | cmp $logfile - then $SUCCESS "update keeps new files, status shows them" else $ERROR_NB "old was:" cat $logfile $ERROR_NB "new is:" $BINdflt st $ERROR "update hides new files" fi # Now commit, so that the new files are versioned $BINq ci -m "new" # now set a fixed target revision $INFO "Testing fixed target revision $rev" $BINq urls target:$rev,$REPURL $BINq up ( cd $WC2 ; $BINq up -r$rev ) $COMPAREWITH $WC2 # But command-line has still precedence $BINq up -rHEAD ( cd $WC2 ; $BINq up ) $COMPAREWITH $WC2 # Return to old revision $BINq up ( cd $WC2 ; $BINq up -r$rev ) $COMPAREWITH $WC2 $SUCCESS "Update-behaviour passed." fsvs-1.2.6/tests/023_parallel_changes0000755000202400020240000000344410755223237016422 0ustar marekmarek#!/bin/bash # Tests whether parallel changes in multiple working copies are ok. # How many WCs we use NUM_WC=5 # How often we do that (change*n ; update) cycles NUM_LOOPS=15 set -e $PREPARE_CLEAN WC_COUNT=$NUM_WC > /dev/null $INCLUDE_FUNCS # # Thread i # - creates file i+1 # - changes file i # - removes file (i-1) # i <= i+4, modulo (4*N) for loop in `seq 1 $NUM_LOOPS` do $INFO "Parallel changes -- loop #$loop of $NUM_LOOPS." exec 5> $LOGDIR/023.round_$loop.log perl -e ' $wc_cnt=shift; $base=shift; $loop=shift; $mult=5; sub FN { return "$base$i/$n"; } sub P { print(join("\t","°°",$loop,$wc_cnt,$mult,$i,$n,$$,$_[0],FN()),"\n"); } for $i (1 .. $wc_cnt) { $n=1000 + ( $i*$mult + $loop ) % ($wc_cnt * $mult); P("-"); ( unlink(FN()) && P("remove") ) || die $! if -f FN() && !(($i + $loop) % 7); $n++; ( open(F,">> ".FN()) && print(F $$," ",$i," ",$n,"\n") && close(F) && P("append") ) || die $! if -f FN(); $n++; ( open(F,"> ".FN()) && print(F localtime()." ",$$," ",$i," ",$n,"\n") && close(F) && P("write") ) || die $! if !-f FN(); } ' $NUM_WC $WCBASE $loop >&5 2>&5 for i in `seq 1 $NUM_WC` do ( cd $WCBASE$i && echo "=== update $i" && $BINdflt up && echo "=== commit $i" && $BINdflt ci -m "$i-$loop" ) || false done >&5 2>&5 for i in `seq 1 $NUM_WC` do ( cd $WCBASE$i && echo "=== update $i" && $BINdflt up && echo "=== status $i" && $BINdflt st -C -C ) || false echo "=== check $i" if [[ `cd $WCBASE$i && $BINdflt st` != '' ]] then $ERROR "$WCBASE$i has a bad status!" > `tty` 2>&1 fi echo "=== compare $i" $COMPAREWITH $WCBASE$i done >&5 2>&5 done $SUCCESS "Parallel working copy modifications ok." # cat /tmp/ram/fsvs-test-1000/log/023.round_* | grep °° | sort -n -k2 -k5 | grep 1011 fsvs-1.2.6/tests/011_ignore0000755000202400020240000001740111133037565014412 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # To test: # - ignoring a file # - via shell-pattern # - pcre # - not ignoring a file (take-pattern) # - all of these in subdirectories logfile=$LOGDIR/011.ignore logfile_all=$logfile.all export PREFIX=igntest export TAKE=TAKE export IGNORE=IGN # To separate changed directories from wanted/not wanted files, # we grep for this postfix. # Change caused by using tmpfs, which (correctly!) changed the directories' # mtime - using ramfs before didn't do that. export POSTFIX=txt # delete the old ignore file ign_file=`$PATH2SPOOL $(pwd) Ign` test -e $ign_file && rm $ign_file # remove old test files find . -depth -iname "*$PREFIX*$POSTFIX" -exec rm -rf {} \; # create some files # Matrix: shell shell-abs pcre # ignored s.i.IGN S.i.IGN p.i.IGN # taken s.t.TAKE S.t.TAKE p.t.TAKE function M() { test -d "$1" || mkdir -p $1 # s for shell # p for perl # e for wild-wildcard # A for absolute # S for absolute, root-based for p in s p e S A do for t in $TAKE $IGNORE do for a in `seq 1 1` do echo $p.$t.$a > $1/$PREFIX-$p.$t.$a.$counter.$POSTFIX counter=`expr $counter + 1` done done done } DIRLIST=". ign-dir1 $IGNORE-s-dir2 ign-dir2/sub ign-dir2/$IGNORE-p-dir3 deep/dir/for/wild/wild/cards" for d in $DIRLIST do mkdir -p $d done $BINq ci -m "create dirs" -o delay=yes $WC2_UP_ST_COMPARE # So we have a constant amount of digits (sorted ls) counter=1000 for d in $DIRLIST do M $d done $BINdflt st | grep $POSTFIX > $logfile_all all_new=`wc -l < $logfile_all` take_new=`grep $TAKE < $logfile_all | wc -l` echo $all_new new files, $take_new to take. PATTERN_COUNT=15 # We need the patterns in the order take, ignore. # Test the --prepend here, too. $BINq ignore "./**$PREFIX-s**" "PCRE:.*$PREFIX-p\." $BINq ignore prepend "take,./**$PREFIX-s.$TAKE**" "take,PCRE:.*$PREFIX-p\.$TAKE" $BINq ignore at=1 "take,nocase,./*/dir/*/W?LD/**/$PREFIX-s.$TAKE**" $BINq ignore at=1 "take,insens,./**/$PREFIX-[ef].$TAKE**" $BINq ignore "take,/**$PREFIX-S.$TAKE**" "/**$PREFIX-S**" $BINq ignore "take,$WC/**$PREFIX-A.$TAKE**" "$WC/**$PREFIX-A**" $BINq ignore "insens,./**/$PREFIX-[ef]**" $BINq ignore prepend "DEVICE:<0" "DEVICE:>=0xff:0xff" ignored_file=$PREFIX-perinode-$POSTFIX touch $ignored_file $BINq ignore prepend `perl -e '@f=stat(shift); $f[1] || die $!; printf "INODE:%d:%d:%d", $f[0] >> 8, $f[0] & 0xff, $f[1];' $ignored_file` # this should never match $BINq ignore prepend "DEVICE:0xff:0xff" if [[ `$BINdflt ignore dump | wc -l` -eq $PATTERN_COUNT ]] then $SUCCESS '"ignore dump" returns the correct number of lines' else $ERROR '"ignore dump" gives bad number of lines' fi cd ign-dir1 if [[ `$BINdflt ignore dump | wc -l` -eq $PATTERN_COUNT ]] then $SUCCESS '"ignore dump" works in subdirectories' else $ERROR '"ignore dump" wrong in subdirs?' fi cd .. # Comparing strings with embedded newlines doesn't seem to work, # so we take the MD5 of the returned lists before=`$BINdflt ignore dump | md5sum` transfer=`$BINdflt ignore dump | $BINdflt ignore load` after=`$BINdflt ignore dump | md5sum` if [[ $transfer = "$PATTERN_COUNT patterns loaded." && $after = $before ]] then $SUCCESS "'ignore dump | ignore load' gives identity" else echo "**** Got: $after" echo "**** expected: $before" echo "**** Transfer said: $transfer" $ERROR "ignore dump/load error" fi $BINdflt st | grep $POSTFIX > $logfile filt_new=`wc -l < $logfile` echo $filt_new after filtering. if [[ $filt_new -ne $take_new ]] then cat $logfile $ERROR " mismatch - $filt_new got, $take_new expected!" fi echo "Testing for files which should not be found ..." if $BINdflt st | grep $POSTFIX | grep $IGNORE | grep -v $TAKE > /dev/null 2>&1 then $ERROR "other files found??" fi echo "Testing for files which should be found ..." take_filt=`$BINdflt st | grep $TAKE | wc -l` if [[ $take_filt -ne $filt_new ]] then $ERROR " wrong files found??\n" fi committed=`$BINdflt ci -m "ci" | grep "^N" | wc -l` echo " committed $committed files, expect $filt_new" if [[ "$committed" -ne "$filt_new" ]] then $ERROR "change in which files are committed" fi # update other wc pushd $WC2 > /dev/null echo " up-x" $BINq up # If we find differences in ignore files, it is ok - they should not # be committed, after all. # Files that should be taken should have no difference. $COMPARE -x "IGN|igntest-perinode" $WC/ $WC2/ > $logfile after_up=`grep $TAKE < $logfile || true` if [[ "$after_up" = "" ]] then $SUCCESS "update gets no ignored files" else $ERROR "update gets ignored files!" fi popd > /dev/null # Restore wcs echo "Restoring working copies to an equal state." find . -depth -iname "*$PREFIX*" -exec rm -rf {} \; $BINq st $BINq ci -m asf pushd $WC2 > /dev/null $BINq up popd > /dev/null echo "Testing pattern list edits." echo "" | $BINq ignore load #if [[ `cat $ign_file` == 0 ]] if [[ -e $ign_file ]] then cat $ign_file ls -la $ign_file $ERROR "not emptied" else $SUCCESS "empty state." fi $BINq ignore prepend group:ignore,./1 $BINq ignore append group:take,./3 $BINq ignore at=1 ./2 $BINq ignore prepend group:take,./0 file=dump.txt $BINq ignore dump > $file if ( echo group:take,./0 ; echo group:ignore,./1 ; echo group:ignore,./2 ; echo group:take,./3 ) | cmp -s - $file then $SUCCESS "pattern edit operations work." else cat $file $ERROR "dump gives wrong results" fi rm $file $WC2_UP_ST_COMPARE # Create a directory tree, where something deep within is ignored. # Get a defined state $BINq ci -m1 -odelay=yes mkdir -p a/b/c/d/e/f/g/h touch a/b/c/d/e/f/g/h/$STG_UTF8 $BINdflt ignore '/**/a/**/c/*/e/f' if $BINdflt st | grep "$STG_UTF8" then $ERROR "Deep ignores in build_tree doesn't work." else $SUCCESS "Deep ignores in build_tree work." fi $BINq ci -mO # Now we put the WAA in there and test again, to see whether it gets # correctly ignored. new=$WC/waa cp -a $FSVS_WAA $new FSVS_WAA=$new # Only "." and "waa" may be shown - nothing below. if [[ `$BINdflt st -C | wc -l` -le 2 ]] then $SUCCESS "WAA gets ignored" else $BINdflt st -C $ERROR "WAA would get versioned" fi # Test whether the absolute patterns warn bad=/$RANDOM-$RANDOM/does-not-exist if $BINdflt ignore "$bad" -o ignpat-wcbase=stop 2>/dev/null then $ERROR "Bad absolute ignore pattern taken" fi if ! $BINdflt ignore "$bad" -W ignpat-wcbase=ignore then $ERROR "Bad absolute ignore pattern not taken" fi # Pattern already saved, a simple dump should do the trick if [[ `( $BINdflt ignore dump -W ignpat-wcbase=always 2>&1 ; true )` != *"WARNING: The absolute shell pattern"*"$bad"*"does neither have the working copy base path"*"nor a wildcard path"* ]] then $BINdflt ignore dump -W ignpat-wcbase=always $ERROR "Bad absolute ignore pattern warning not given?" fi $SUCCESS "Absolute ignore pattern warnings tests" # Test the "dir-only" specification true | $BINdflt ignore load $BINq ci -m1 -o delay=yes # now we're clean. mkdir -p deep/a/b/c/dir touch deep/fileA touch deep/a/fileB touch deep/a/b/fileC touch deep/a/b/c/fileD touch deep/a/b/c/dir/file-ok $BINq ignore 'take,./deep/**ok*' 'dir,take,./deep/**' './deep/**' # Store the statistic for later. $BINq ignore test -o group_stats=yes > $logfile $BINq ci -m1 if [[ `$BINdflt log -v -r HEAD | grep file | wc -l` -eq 1 ]] then $SUCCESS "dir-only pattern looks ok." else $ERROR "wrong commit for dir-only pattern" fi # Test group statistics. l1="Grouping statistics" l2=`echo -e "9\t1\ttake\ttake,./deep/**ok*"` l3=`echo -e "8\t4\ttake\tdir,take,./deep/**"` l4=`echo -e "4\t4\tignore\tgroup:ignore,./deep/**"` if [[ `cat $logfile` == *"$l1"*"$l2"*"$l3"*"$l4" ]] then $SUCCESS "Group statistics" else echo *"$l1"*"$l2"*"$l3"*"$l4" $ERROR "Group statistics output" fi fsvs-1.2.6/tests/055_rel-ignore0000755000202400020240000000257511110760254015201 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # General ignore patterns are tested somewhere else. # Here just the special rel-ignore function is tested. logfile=$LOGDIR/055.log mkdir -p a/b $BINq ci -m1 -o delay=yes touch a/b/f1 a/b/fI touch g1 gI function exp { dumped="group:ignore,$1" cnt=$2 cnt_I="$3" shift 3 # Rest are patterns to be loaded while [[ "$1" != "" ]] do pat="$1" shift true | $BINdflt ignore load $INFO "Testing $pat" $BINdflt rel-ignore "$pat" $BINdflt st > $logfile if [[ `grep I $logfile | wc -l` -ne $cnt_I ]] then cat $logfile $ERROR "$cnt_I *I can be ignored, but found "`grep -c I < $logfile` fi if [[ `wc -l < $logfile` -ne $cnt ]] then cat $logfile $ERROR "$cnt lines expected, "`wc -l < $logfile`" got" fi $BINdflt ignore dump > $logfile if [[ `cat $logfile` == "$dumped" ]] then $SUCCESS "$dumped matched" else cat $logfile $ERROR "$dumped not matched" fi done } exp "./**/*I" 4 0 "**/*I" "**/../**/*I" exp "./**I" 4 0 "**I" "./**I" "$WC/**I" # Ignore only on top level exp "./*I" 5 1 "**/../*I" # From here on only level 2 below is ignored, so the single ./gI entry # gets found. exp "./*/*/*I" 5 1 "*/*/*I" "*/../*/X/../*/*I" "$WC/*/*/227/../*I" exp "./a/**I" 5 1 "./a/**I" # All can be found exp "./a/**p" 6 2 "a/**p" exp "./*/*I" 6 2 "*/*I" "./*/*I" "$WC/*/*I" fsvs-1.2.6/tests/path2spool0000755000202400020240000000221011216125755014632 0ustar marekmarek#!/usr/bin/perl use Digest::MD5 qw(md5_hex); $_=shift; # Filename. If empty, just the path to the WAA is returned; if '^', the # path to the conf area is returned. $file=shift; # The softroot - see the option "softroot". ( $softroot=shift ) =~ s,/+$,,; # The WC base; defaults to the current directory. $wc_base=shift() || $ENV{"PWD"} || die $!; unless (m#^/#) { $p=$ENV{"PWD"}; $_ = $p . "/" . $_; } $conf = ($file =~ /^[A-Z]/); $conf=1, $file="" if ($file eq '^'); 1 while # remove /./ and /. s#/\./#/#g || s#/\.$#/#g || # remove / at end; at least a single character (/) must be kept s#(.)/+$#\1#g || # change // to / s#//+#/#g; # remove the softroot die if $softroot && !s#^$softroot##; die if $softroot && $wc_base !~ s#^$softroot##; $wc_base =~ s# /+ $ # #gx; #print "wc=$wc_base; entry=$_;\n"; $m=md5_hex($_); if ($conf) { print +($ENV{"FSVS_CONF"} || "/etc/fsvs"), "/" . $m . "/" . $file . "\n"; } else { $wc=substr(md5_hex($wc_base), 0, $ENV{"WAA_CHARS"}+0); print +($ENV{"FSVS_WAA"} || "/var/spool/fsvs"), "/" . $wc, "/" . substr($m,0,2), "/" . substr($m,2,2), "/" . substr($m,4), "/" . $file . "\n"; } fsvs-1.2.6/tests/037_mkdir_base0000755000202400020240000000256711150447254015245 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/037.create_base subdir1=ä-$RANDOM/ö-$$ subdir2=$subdir1/§-$RANDOM data_file=X1 echo $REPURL/$subdir2 | $BINq urls load $BINdflt delay seq 1 $RANDOM > $data_file mkdir abc date > abc/file if $BINq ci -m1 > $logfile 2>&1 then $ERROR "invalid base directory silently accepted" else $SUCCESS "invalid base directory rejected" fi if $BINq ci -m1 -o mkdir_base=yes > $logfile 2>&1 then $SUCCESS "mkdir_base didn't die." else $ERROR "mkdir_base gives an error." fi echo $REPURL | $BINq urls load $BINq up $BINq diff $BINq sync $BINq diff $BINq ci -m1 if [[ -d $subdir2 ]] then $SUCCESS "Subdirectory created" else $ERROR "Subdirectory doesn't exist" fi if [[ -f $subdir2/$data_file ]] then $SUCCESS "Data file exists." else $ERROR "No data file" fi if diff -u $data_file $subdir2/$data_file then $SUCCESS "Data file ok." else $ERROR "Data file with wrong data" fi if [[ `svn pl -v $REPURL/$subdir1` == "" ]] then $SUCCESS "No properties in the intermediate levels." else $ERROR "Properties in the intermediate levels?" fi # As the middle directories have no properties recorded, the updated # entries will have different mtimes. # To avoid getting errors, we'll touch them; the $BINq up $WC2 find $WC1 $WC2 -type d -iname "?*-*" | xargs touch -- $COMPARE_1_2 $SUCCESS "Correctly updated." fsvs-1.2.6/tests/up_st_cmp0000755000202400020240000000050310606620402014522 0ustar marekmarek#!/bin/bash set -e $INCLUDE_FUNCS echo " ** up" cd $WC2 $BINq up echo " ** st" if [[ `$BINdflt st | wc -l` -eq 0 ]] then # null statement true else $ERROR_NB "expected _no_ output, but got one:" $BINdflt st exit 1 fi echo " ** compare" $COMPARE_1_2 $SUCCESS "2nd working copy correctly updated." fsvs-1.2.6/tests/component-test.pl0000755000202400020240000000374210757274771016157 0ustar marekmarek#!/usr/bin/perl use IPC::Open2; $logfile=shift; $|=1; # Don't try $BINdflt - that might have valgrind or similar in front. $pid = open2(RDR, WTR, 'gdb ' . $ENV{"BIN_FULLPATH"} . ' 2>&1'); $gdb_prompt_delimiter = "GDB-delim-$$-" . time; $ign=Exch("set pagination off\nset prompt $gdb_prompt_delimiter\\n"); $match=""; $err=0; $running=0; $line=0; @output=(); while (<>) { $line++; chomp; next if m/^\s*##/; next if m/^\s*$/; $match="\\\$\\d+ = $1",next if m{^\s*#=\s+(.*)}; $match=$1,next if m{^\s*#[/~]\s+(.*)}; if (!$running) { if (/^\s*(print|set|call)\s*/) { $err ||= Exch("b _do_component_tests"); # We have to use -D to avoid getting debug messages ... they'd show # up in the output, and potentially mess our matching up. $err ||= Exch("r -d -D invalid"); $running=1; } else { $running=1 if m#^\s*(r|R)#; } } if (s#^\+##) { $_=eval($_); die $@ if $@; } else { # substitute $#$ENV{"WAA"}# and similar. # We don't use ${} as that's needed for hash lookup (%ENV) while (s/\$\#(.*?)\#/eval($1)/e) { die $@ if $@; } } $err ||= Exch($_, $match); $match=""; $running=0 if m#^\s*kill#; } Exch("kill"); Exch("q"); open(LOG, "> $logfile") || die "$logfile: $!\n"; print LOG @output; close LOG; print @output if $err || length($ENV{"VERBOSE"}); exit $err; sub Exch { my($out, $exp)=@_; my($input, $ok, $err); local(%SIG); local($/); $/=$gdb_prompt_delimiter; $SIG{"ALRM"}=sub { die "Timeout waiting for $exp\n"; }; print WTR $out,"\n"; push @output,"send>> ", $out,"\n"; alarm(4); $input=; alarm(0); substr($input, -length($/), length($/))=""; # find non-empty lines @in=(); map { push @in, $_; } grep(/\S/, split(/\n/, $input)); @in_str=map { "recv<< " . $_ . "\n"; } @in; push @output, @in_str; return 0 if (!$exp); $found = grep(m/$exp/m, @in); $err=!$found; push @output, "expect '@in' to match /$exp/: err=$err\n"; warn("$ARGV:$line: /$exp/ not matched:\n", @in_str) if ($err); return $err; } fsvs-1.2.6/tests/010_non-existing_uids0000755000202400020240000000131111021704125016552 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC if [[ $UID -ne 0 ]] then echo "Warning: cannot test changing userids as normal user." exit fi filename=uid-gid-test # Find an unused id function unused_id { perl -e '$i=shift || 100; $grp=shift; $i++ while ($grp ? getgrgid($i) : getpwuid($i)); print $i' $1 $2 } uid=`unused_id 100 0` gid=`unused_id 100 1` echo "Using UID $uid and GID $gid for unknown-user-tests." if [[ -e $filename ]] then rm $filename $BINq ci -m "delete the test-file" fi date > $filename chown $uid.$gid $filename echo " ci1" $BINq ci -m "uid-gid-test" echo "Another line" >> $filename echo " ci2" $BINq ci -m "uid-gid-expand" $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/004_delete0000755000202400020240000000032411021704125014354 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC rm empty-file blabla rm -r tree/c echo " ci" $BINq ci -m "deleted file, dir and symlink" echo " st1" $BINq st $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/044_copyfrom0000755000202400020240000001333011036577670015000 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC fn1=file1 fn2=file2 fn3=file3 logfile=$LOGDIR/044.copyfrom copydb=`$PATH2SPOOL . Copy` ###################################################### # Check copyfrom relation setting logic. # Overwrite is not allowed - the user would have to revert (TODO). # We could allow it for non-directories, however. touch 1 4 5 s $BINq ci -mx # If these file had a different timestamp, the revert below would only # change this; but we want the copy relations to be removed. touch -r 1 2 3 6 $BINdflt cp 1 2 $BINdflt cp 5 6 $BINdflt cp ./4 $WC/3 if [[ ! -s $copydb ]] then $ERROR "Expected a copyfrom database at $copydb" fi if [[ $(echo `$BINdflt cp dump | sort`) == ". . 2 3 6 $REPURL/1 $REPURL/4 $REPURL/5" ]] then $SUCCESS "Setting and overwriting copyfrom information works." else $BINdflt cp dump $ERROR "Setting or overwriting makes mistakes?" fi # Remove copyfrom markers $BINq sync-repos ( echo 5 echo 6 echo . echo 4 echo 3 echo . echo 1 echo 2 ) | $BINdflt cp load # we still have the values of the last copy operations if [[ $(echo `$BINdflt cp dump | sort`) == ". . 2 3 6 $REPURL/1 $REPURL/4 $REPURL/5" ]] then $SUCCESS "Loading copyfrom information works." else $BINdflt cp dump $ERROR "Loading copyfrom fails." fi # A "true | fsvs cp load" doesn't work, as the copyfrom information is # added, not replaced. So we undo the copy. $BINdflt uncopy 3 6 2 if [[ `$BINdflt cp dump -v` == "No copyfrom information was written." ]] then $SUCCESS "Purging copyfrom works" else $ERROR "Purging copyfrom fails" fi if [[ `true | $BINdflt cp load -v` == "0 copyfrom relations loaded." ]] then $SUCCESS "Nothing loaded." else $ERROR "Loading nothing on empty copy database fails" fi ###################################################### # Test copyfrom on commits. seq 1 100 > $fn1 $BINq ci -m "file1" -o delay=yes # Now the copyfrom relation file should be cleaned up. if [[ -s $copydb ]] then $ERROR "Expected copyfrom database at $copydb to be deleted" fi function Status { $BINdflt st > $logfile while [[ "$1" != "" ]] do if ! grep -F "$1" < $logfile | grep -F "$2" then $ERROR "Expected status '$1' for '$2'" fi shift shift done } ###################################################### # Test simple copying. # The other file is moved before the commit, so we can check that a commit # removed *exactly* the used records, and no others. cat $fn1 > $fn2 mv $fn1 $fn3 $BINq cp $fn1 $fn2 Status ".m.+" $fn2 "N..." $fn3 "D..." $fn1 $BINq mv $fn1 $fn3 Status ".m.+" $fn2 "...+" $fn3 "D..." $fn1 $BINq ci -m "file2" $fn2 # Status ".m.+" $fn2 "...+" $fn3 "D..." $fn1 if [[ $(echo `$BINdflt cp dump | sort`) == "$fn3 $REPURL/$fn1" ]] then $SUCCESS "On commit correct entries purged." else $BINdflt cp dump $ERROR "Commit removed wrong copyfrom entries." fi function CopyLog { src="$1" dest="$2" msg="$3" LC_ALL=C svn log -v -r HEAD $REPURL > $logfile if grep " A /$REPSUBDIR/$dest .from /$REPSUBDIR/$src:" $logfile then # ------------------------------------------------------------------------ # r5 | flip | 2007-11-22 18:39:20 +0100 (Thu, 22 Nov 2007) | 1 line # Changed paths: # A /trunk/file2 (from /trunk/file1:4) # # file2 # ------------------------------------------------------------------------ $SUCCESS "$msg" else cat $logfile $ERROR "$msg" fi } # Doing a $WC2_UP_ST_COMPARE wouldn't work here, as the moved file is not # yet committed. CopyLog $fn1 $fn2 "File was copied" ###################################################### # Test file renaming (moving) # The operations have been done above. $BINdflt ci -m "move" # -d > $logfile $WC2_UP_ST_COMPARE if [[ -s $copydb ]] then $ERROR "Expected copyfrom database at $copydb to be deleted" fi CopyLog $fn1 $fn3 "File was moved - 1" if grep " D /$REPSUBDIR/$fn1" $logfile then $SUCCESS "File was moved - 2" else cat $logfile $ERROR "File not moved? - 2" fi ###################################################### # Test directory copying mkdir dir1 date > dir1/date-file seq 1 600 > dir1/large $BINq ci -m "dir1" src=dir1 dest=dir2 cp -a $src $dest $BINq cp $src $dest $BINq ci -m "$dest" $WC2_UP_ST_COMPARE CopyLog $src $dest "Dir was copied" if [[ -s $copydb ]] then $ERROR "Expected copyfrom database at $copydb to be deleted" fi # 2nd try: new directory without files; the entries are implicitly copied, # and have to be removed from therein. dest=dir3 mkdir $dest $BINdflt cp $src $dest $BINq ci -m "$dest" $WC2_UP_ST_COMPARE CopyLog $src $dest "Empty dir was copied" # Now try the same, but not starting in the wc root: # Copy definition wc/a/b/c => wc/a/e/f dirname=aergheanrgerkgergwergwergwa pre=a/b fulldir=$dirname/$dirname/$dirname/$dirname/$dirname full=$fulldir/gggg mkdir -p $pre/d/$fulldir echo $$ $RANDOM $RANDOM > $pre/c echo $RANDOM > $pre/d/$full $BINq ci -m "prep" mkdir -p a/e a/g cp $pre/c a/e/f cp -r $pre/d a/g/d # copy the file ( cd a ; $BINq cp b/c ../a/e/f ) ( cd a ; $BINq cp b/d ../a/g/d ) # Test diff for *long* filenames. echo $$-$RANDOM > a/g/d/$full $BINdflt diff a/g/d/$full > $logfile if [[ `egrep "^[+-]" $logfile | wc -l` -eq 4 ]] then $SUCCESS "Diff for long copied entries with long names works." else $ERROR "Diff for long copied entries with long names" fi $BINq ci -m "copy" -o delay=yes CopyLog a/b/c a/e/f "Copy definition below wc root" $WC2_UP_ST_COMPARE cp -r a with_subs $BINq cp a with_subs # Currently copying a copied file that's not committed is not possible. mkdir will_error if $BINdflt cp with_subs will_error then $ERROR "Unexpected exit status of copying copied, uncommitted entry." else $SUCCESS "Expected exit status of copying copied, uncommitted entry." fi $BINq ci -m "subs" $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/059_commit_meta_data0000755000202400020240000000303711104031575016423 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # Test whether non-chosen directories don't get their meta-data changed, # unless they're new (and need some meta-data in the repository). # The format as it should be in the repository. DATE=2008-07-06T05:04:03.000000Z # touch or date don't understand the ISO format. touchDATE="2008-07-06 05:04:03UTC" # Test 1: new directory mkdir -p a/new/dir date > a/new/dir/with_a_file touch -d "$touchDATE" a a/new a/new/dir echo "Set $DATE" $BINq ci -m1 a/new/dir/with_a_file # Check a timestamp ts=`svn pg -r HEAD svn:text-time "$REPURL/a/new"` echo "Got $ts" if [[ "$ts" == "$DATE" ]] then $SUCCESS "Correct timestamp for new directories." else $ERROR_NB "Wrong timestamp for new directories:" $ERROR "Got '$ts', expected '$DATE'." fi # Now the meta-data should be in the repository, so that WC2 gets # identical. $WC2_UP_ST_COMPARE # Test 2: existing directory # We change the mtime (so that it would be committed), but look whether # it's correct afterwards. date >> a/new/dir/with_a_file echo now > a/new/dir/with_another_file touch a a/new a/new/dir $BINq ci -m1 a/new/dir/with_* # The directories shouldn't be changed, so we can't compare the WCs. # We check the timestamps instead. for entry in a a/new a/new/dir do ts=`svn pg -r HEAD svn:text-time "$REPURL/$entry"` echo "Got $ts ($entry)" if [[ "$ts" != "$DATE" ]] then $ERROR_NB "Wrong timestamp for existing directory $entry:" $ERROR "Got '$ts', expected '$DATE'." fi done $SUCCESS "Existing directories unchanged." fsvs-1.2.6/tests/017_locale_iconv0000755000202400020240000000573411256332235015576 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC log=$LOGDIR/017.log function testfunc { filename=$1 touch file-$filename ln -s file-$filename link-$filename ln -s bad-$filename badlink-$filename # These must be the bytes c3 b6, which is an ö mkdir -p dir-$filename/dir-ö/f-$filename echo $filename | od -Ax -tc -tx1 > $log $BINdflt ci -m "locale ci $filename" >> $log $WC2_UP_ST_COMPARE > /dev/null # "svn ls" gives characters > \x80 as eg. "?\228". # With --xml we get the raw string, but *always* in UTF-8 ... so try both ways. svn ls -R --xml $REPURL/ > $log.1 svn ls -R $REPURL/ > $log.2 if [[ `grep -F "$filename" < $log.1 | wc -l` -eq 6 || `grep -F "$filename" < $log.2 | wc -l` -eq 6 ]] then echo "Ok, found all entries." else $ERROR_NB "Expected filename:" echo $filename | od -Ax -tx1 -tc $ERROR "En/Decode problem - entries not found." fi # TODO: test whether the entries are correct in the other locale. if [[ "$other" != "" ]] then # Remove all entries $BINq up -r1 $WC2 > /dev/null # Checkout LC_ALL=$other $BINq up $WC2 > /dev/null if $BINdflt st $WC2 | grep . then $ERROR "Didn't expect any output after update" fi LC_ALL=$other $BINq sync $WC2 > /dev/null if $BINdflt st $WC2 | grep . then $ERROR "Didn't expect any output after sync" fi fi echo "Cleaning up" rm -rf * $BINdflt ci -m "locale ci $filename cleanup" > $log LC_ALL=$other $WC2_UP_ST_COMPARE > $log } # look for UTF8 utf8_locale=`locale -a | grep .utf8 | head -1` if [[ "$utf8_locale" != "" ]] then echo "Found UTF8-locale '$utf8_locale', using that for testing." else echo "Found no utf8-locale, cannot test" fi # look for non-utf8 loc_locale=`locale -a | egrep -v "(POSIX|C|utf8$)" | head -1` if [[ "$loc_locale" != "" ]] then echo "Found non-UTF8-locale '$loc_locale', using that for testing, too." else echo "Found no non-utf8-locale, cannot test" fi # Trivial test with current settings # We must use only ASCII as we don't know in which locale # this script is parsed. $INFO "testing default locale" testfunc test12 $SUCCESS "default locale" # Clear environment unset LC_ALL LC_CTYPE # Test UTF8 if [[ "$utf8_locale" != "" ]] then $INFO "testing utf8 locale $utf8_locale" export LC_ALL=$utf8_locale export other=$loc_locale # The bytes here must be c3 a4; in utf8 that's an ä # Use a hex editor. testfunc ä testfunc $STG_UTF8 # This sequence of bytes in $STG_LOC is in latin1 or something similar, which # is not a valid UTF-8 sequence, so we can't use that. $SUCCESS "utf8 locale" fi # Test non-UTF8 if [[ "$loc_locale" != "" ]] then $INFO "testing non-utf8 locale $loc_locale" export LC_ALL=$loc_locale export other=$utf8_locale # The bytes here must be \xc2\x61, that is an invalid UTF8-sequence. # Use a hex editor. testfunc Âa # In non-UTF8 locales all byte sequences are valid - they have no # interdependencies. testfunc $STG_UTF8 testfunc $STG_LOC $SUCCESS "non-utf8 locale" fi # vi: binary fsvs-1.2.6/tests/006_move_entries0000755000202400020240000000031711021704125015615 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC mv tree/a tree/Aa mv tree/b tree_b echo " ci" $BINq ci -m "renamed two directories" echo " st1" $BINq st $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/005_device_ops__uid00000755000202400020240000000117111120137277016324 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC if [[ $UID -eq 0 ]] then mv device device-2 mkdir device mv device-2 device/device-node chown 1.1 empty-file # We don't know whether there's sudo, su or something else possible. # lchown() isn't available on the cli, so use perl. mkdir -m 777 all_open perl -e '$(=$)="5 5"; $<=$>=1; symlink(shift(), shift()) || die $!' device all_open/KK rm reclink touch reclink echo " ci" $BINq ci -m "changed dev to dir/dev and link to file" echo " st1" $BINq st $WC2_UP_ST_COMPARE else $WARN "cannot test device creation as normal user." fi fsvs-1.2.6/tests/002_co_other0000755000202400020240000000017311256332235014725 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC $BINq up cd $WC2 $WC2_UP_ST_COMPARE $BINq sync fsvs-1.2.6/tests/007_update_changed0000755000202400020240000000205011133037606016055 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC filename=update-file echo "A testline #1" > $filename echo "A testline #2" >> $filename echo " ci1" $BINq ci -m "new file" orig=`md5sum $filename` $WC2_UP_ST_COMPARE echo "A further line" >> $filename echo " ci2" $BINq ci -m "new file" pushd $WC2 > /dev/null echo "A newly changed line" >> $filename echo " up1" if FSVS_CONFLICT=STOP $BINq up 2> /dev/null then $ERROR "The modified file was overwritten!" else $SUCCESS "Modified files are not overwritten." fi #$BINdflt -d revert $filename if $BINq revert $filename then $SUCCESS "The modified file was reverted." else $ERROR "Not reverted?" fi now=`md5sum $filename` echo " Before: $orig" echo " After : $now" if [[ $now == $orig ]] then $SUCCESS "The modified file was really reverted." else $ERROR "Revert did not work!" fi if [[ `$BINdflt st`x == "x" ]] then $SUCCESS "No status output after revert." else $BINdflt st $ERROR "Some status change??" fi rm $filename popd > /dev/null $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/015_sync_repos0000755000202400020240000001074511026162660015317 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC2 # The temporary filenames for the lists after update, sync, and from wc TMP_UP=$LOGDIR/015.sync-repos-test.up.tmp TMP_UP0=$LOGDIR/015.sync-repos-test.up0.tmp TMP_SYNC=$LOGDIR/015.sync-repos-test.sync.tmp TMP_WC=$LOGDIR/015.sync-repos-test.wc.tmp logfile=$LOGDIR/015.log dir_path=`$PATH2SPOOL $WC2 dir` echo "Directory file is $dir_path" REV=HEAD # Get the dir-files. ################################ # If we do the sync-repos later it should take the inodes from the working # copy, so that both entry lists are identically sorted. function copy_dir { # Quick fix: the files $sync and $up won't be identical, as the RF_CHECK # flag will be set for some entries. perl -e 'print scalar(<>); while (<>) { @a=split(/(\s+)/); $a[6] &= ~4; } ' } echo "step 1: update from empty set." rm -rf $WC2/* rm $dir_path # re-set the URL, so that we start empty. echo $REPURL | $BINq urls load $BINq up -r $REV ls -lad tree/b cp -a $dir_path $TMP_UP-ORIG copy_dir < $dir_path > $TMP_UP up_md5=`md5sum < $TMP_UP` echo "step 2: update from already-up-to-date." $BINq up -r $REV cp -a $dir_path $TMP_UP-ORIG2 copy_dir < $dir_path > $TMP_UP0 up0_md5=`md5sum < $TMP_UP0` echo "step 3: build new list" rm $dir_path $BINq _build-new-list -C cp $dir_path $TMP_WC wc_md5=`md5sum < $TMP_WC` echo "step 4: sync" # Do the sync at last, so that a correct list is left # for other tests. # Test whether a copy database gets removed. # We need an entries file with correct revision numbers, so we do an # initial sync-repos before the copy. $BINq sync-repos -r $REV # We copy on an non-existing target; if we'd create that directory, the # status check below would fail. $BINq cp tree/a ggg $BINq sync-repos -r $REV $BINdflt st > $logfile if [[ `wc -l < $logfile ` -eq 0 ]] then $SUCCESS "no status output - 1" else cat $logfile $ERROR "status prints something - 1" fi if [[ `$BINdflt cp dump | wc -l` -eq 1 ]] then $SUCCESS "copy db gets removed on sync-repos" else $ERROR "copy db not removed on sync-repos?" fi # The "cp dump" iterates through the hash itself; look for the copy # markings in the status report, too. if $BINdflt st -v | grep -F ".....+" > $logfile then cat $logfile $ERROR "Still copy flags set" else # If grep returns an error, no lines were found. $SUCCESS "No copy flags set" fi # As the RF_CHECK flag is set, we need to normalize: # - Header is taken unchanged # - Keep the whitespace separator, to get the line as identical as possible. # - The 4th field (counted beginning with 1 :-) has index 6 (separators). cp -a $dir_path $TMP_SYNC-ORIG copy_dir < $dir_path > $TMP_SYNC sync_md5=`md5sum < $TMP_SYNC` echo "found MD5s: update=$up_md5" echo " noop update=$up0_md5" echo " sync=$sync_md5" echo " from wc=$wc_md5" if [[ $up_md5 == $up0_md5 ]] then $SUCCESS "update ident with noop update" # remove temporary files on successful testing. # we keep them if any tests fails, though. rm $TMP_SYNC $TMP_UP0 $TMP_WC else $ERROR_NB "$TMP_UP and $TMP_UP0 are different" diff -au $TMP_UP $TMP_UP0 $ERROR "'fsvs update error" fi # test if status and update work # root modified is allowed. $BINdflt st > $logfile if [[ `grep -v ' \.$' $logfile | wc -l` -eq 0 ]] then $SUCCESS "no status output - 2" else cat $logfile $ERROR "status prints something - 2" fi # Update to HEAD $BINq up REV=3 # Now go down, sync with older revision, and go up again $BINq up -r $REV $BINq sync-repos -r $REV $BINq up -r HEAD $INFO "Updating entries without meta-data" # Now do some other entries *without* the meta-data properties, and look. # We have to get rid of special devices ... svn would bail out. find . -not -type f -and -not -type d -exec rm {} \; $BINq ci -m "no more special nodes" svn import --no-ignore --no-auto-props -m no-meta . $REPURL/no-meta > /dev/null # We'd like to fake some device entry; but that's not easily possible, as # there's no "svn propset URL" currently. $BINq up > $logfile $BINdflt st > $logfile if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "No status output after meta-data less update" else cat $logfile $ERROR "Status output for meta-data-less entries unexpected" fi # Symlinks have lrwxrwxrwx, all other entries should be not writeable for # group/others. find no-meta -not -type l -printf "%U %m\n" | grep -v "^$UID [67]00\$" > $logfile || true if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "Owner and mode correctly set." else $ERROR "Wrong rights set - expected go-rwx." fi fsvs-1.2.6/tests/090_special0000755000202400020240000000642211176260072014555 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/090.spec mv tree gt mkdir 4rg for a in `seq 1 200` do echo $a > 4rg/$a done # Commit a file that's actively changing $BINdflt ci -m1 > 4rg/file $BINdflt ci -m1 > 4rg/file2 $BINdflt ci -m1 > 4rg/file3 $BINq ci -m1 $WC2_UP_ST_COMPARE ############################################################### # # Test commit delay. # From common sense this should be done as soon as possible, # as most other tests need that to work; but in 001 it would # slow down even single case testing, and as 002 it might be # run after all other tests (with RANDOM_ORDER=1). # So we hope that it just works - and try to get a test # summary. $INFO "Testing commit delay" function CT { delay_start=`date +%s` # Make sure the files have different lengths msecs=0 for a in 1 22 333 4444 do echo $a > file $BINq ci -m$a $1 > /dev/null nsec=`date +%_9N` c_msec=${nsec:0:3} msecs=$(($msecs + ${c_msec:=0})) done # We get an error here if the result is 0 ... so make it at least 1. delta=$(expr `date +%s` - $delay_start + 1) } CT -odelay=yes with_delay=$delta wd_msec=$msecs echo "With delay that took $delta seconds (dm=$wd_msec)" CT -odelay=no normal=$delta n_msec=$msecs echo "Without delay that took $delta seconds (dm=$n_msec)" diff=$(expr $with_delay - $normal) factor=$(expr $n_msec / $wd_msec) $INFO "Difference $diff, factor $factor" # At least two seconds difference should be here. if [[ $diff -ge 2 ]] then $SUCCESS "The delay option seems to work (runtime)." else if [[ $factor -ge 4 ]] then $SUCCESS "The delay option seems to work (msec-diff)." else $ERROR "The delay option is too fast" fi fi $INFO "Testing delay command." dirfile=`$PATH2SPOOL . dir` touch -d "now + 2 seconds" $dirfile a=`date +%s` $BINdflt delay b=`date +%s` diff=`echo $b - $a | bc` if [ $diff -ge 1 ] then $SUCCESS "Delay command works." else $ERROR "Delay command took only diff=$diff" fi # Test having *many* subdirectories in parallel in waa__output_dir(), # triggering reallocating the directory array. # On tmpfs the inode numbers are given in ascending order; so creating many # directories, and then putting files there might do the trick. # We do that as 50 * 50 directories, with each having a file. $INFO "Creating many directories" list=`seq 1 50` declare -a entries for a in $list do mkdir $a for b in $list do entries+=($a/$b/file) mkdir $a/$b done done # create files touch "${entries[@]}" opt="" if [[ "$opt_DEBUG" == "1" ]] then opt="-d -D waa__output_tree" fi $BINq ci -m1 $opt > $logfile $WC2_UP_ST_COMPARE if [[ "$opt_DEBUG" == "1" ]] then if grep "reallocated directory pointers to " $logfile then $SUCCESS "Many parallel directories in output_tree" else # Should that only be a warning? Might not work on other filesystems. $ERROR "No realloc message found for many parallel directories" fi else $WARN "no debug info compiled in" fi # If writing the tree didn't work, we will find out when we do a status or # another commit. G=40/32/file touch $G if [[ `$BINdflt st` == ".m.."*" 0 "*"$G" ]] then $SUCCESS "Entry list seems to be correct" else $ERROR "Status wrong - entry list destroyed?" fi if $BINq ci -m1 then $SUCCESS "Commit ok" else $ERROR "Commit barks" fi $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/032_commit-pipe0000755000202400020240000000735711176010503015354 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN WC_COUNT=4 > /dev/null $INCLUDE_FUNCS cd $WC function cmpdiff { [[ `cat $logfile` == *"--- $filename"*"$1"*"+++ $filename"*"$2"*"@@ -1 +1 @@"*"-$text"*"+$text$text" ]] } logfile=$LOGDIR/032.commit-pipe logfile2=$logfile.2 filename=abcdefg.ijk text=abcde.123 encoder="openssl enc -e -a" decoder="openssl enc -d -a" export encoder # try with a few bytes first echo $text > $filename $BINq ps fsvs:commit-pipe "$encoder" $filename $BINq ps fsvs:update-pipe "$decoder" $filename $BINdflt ci -m1 > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` rev_base64=$rev base64=`$encoder < $filename` repos_val=`svn cat $REPURL/$filename -r $rev_base64` echo "Expect $base64, found $repos_val" if [[ $repos_val == $base64 ]] then $SUCCESS "Commit-pipe works" else $ERROR "Encoded commit failed!" fi # Try update $WC2_UP_ST_COMPARE # Make sure both arrived there if [[ X`$BINdflt pl -v $WC2/$filename | sort` == X"fsvs:commit-pipe=$encoder"*"fsvs:update-pipe=$decoder" ]] then $SUCCESS "En- and decoder arrived in $WC2." else $ERROR "En- or decoder didn't arrive in $WC2." fi # Try diff echo $text$text > $filename $BINdflt diff $filename > $logfile if cmpdiff "Rev. $rev_base64" "Local version" then $SUCCESS "Diff works." else $ERROR "Error while diffing!" fi # use a different encoder $BINq ps fsvs:commit-pipe "gzip" $filename $BINq ps fsvs:update-pipe "gzip -d" $filename # compare $BINdflt diff $filename > $logfile if cmpdiff "Rev. $rev_base64" "Local version" then $SUCCESS "Diff after changing the decoder works." else $ERROR "Error while diffing after setting another decoder!" fi # commit other encoder, and do repos-repos diff $BINdflt ci -m2 > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` rev_gzip=$rev # diff -rx:y currently prints the full path - TODO # If we'd just pipe to perl we wouldn't stop on error. $BINdflt diff -r $rev_base64:$rev_gzip $filename > $logfile2 perl -pe 's('"$WC"'/*)()g' < $logfile2 > $logfile if cmpdiff "Rev. $rev_base64" "Rev. $rev_gzip" then $SUCCESS "Repos-repos-diff works." else $ERROR "Error while doing repos-repos diff!" fi # The data should still be the same - we just committed, after all. if [[ `$BINdflt st -C -C $filename | wc -l` != 0 ]] then $BINdflt info $filename md5sum $filename $ERROR "File seen as changed?" fi # Change data, using a temporary file to store the date, # and see if it's found as changed. tmp=x$filename cp -a $filename $tmp perl -e 'open(F, "+< " . shift) || die $!; print F $$;' $filename touch -r $tmp $filename # Should be seen when using checksums, and not without. # With a single -C it would be checksummed if it's likely to be changed - # which it is, because the ctime changed. if [[ `$BINdflt st $filename -o change_check=none | wc -l` != 0 || `$BINdflt st $filename -o change_check=allfiles | wc -l` == 0 ]] then $BINdflt st $filename -o change_check=none $BINdflt st $filename -o change_check=allfiles $ERROR "File status wrong?" fi # now try with more data - to see if blocking works cd $WC seq -f%9.0f 1000000 1100000 > $filename $BINdflt ci -m2 $WC2_UP_ST_COMPARE # Try export cd ${WCBASE}3 cd $WC $INFO "Try with no data" > $filename $BINdflt ci -m4 $WC2_UP_ST_COMPARE # Check for error checking cd $WC echo $text$text > $filename $BINq ps fsvs:commit-pipe false $filename if $BINq ci -m2 2> /dev/null then $ERROR "Error return of commit pipe not seen?" else $SUCCESS "Commit pipe gets checked for return level" fi # Delayed error $BINq ps fsvs:commit-pipe "cat && false" $filename if $BINq ci -m2 2> /dev/null then $ERROR "Delayed error return of commit pipe not seen?" else $SUCCESS "Commit pipe gets checked for return level, even for delayed" fi # vim: formatoptions=q fsvs-1.2.6/tests/045_copy_details0000755000202400020240000001274711145217104015615 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # A file gets copied; but before it gets marked as copied, it is changed: # - The MD5 of the copy must be the original MD5. # - If the copy gets changed, it must have the correct MD5 after commit. file1=src-f file2=cpy-f logfile=$LOGDIR/045.log orig=text-$RANDOM-$RANDOM-$RANDOM-text new=NEW function CheckStoredMD5 { file=$1 expect=$2 msg=$3 $BINdflt info $file > $logfile have=`perl -ne 'print $1 if m#^\s+Repos-MD5:\s+(\S+)#' < $logfile` if [[ "$expect" == "$have" ]] then $SUCCESS "MD5($file): $msg" else $ERROR_NB "MD5($file): $msg:" $ERROR_NB " expected $expect" $ERROR " have $have" fi } function Test { # Generate source file echo "$orig" > $file1 $BINq ci -m "$file1" "$file1" orig_md5=`md5sum < $file1 | cut -f1 -d" "` # Copy, change, and record as copied CheckStoredMD5 "$file1" $orig_md5 "$file1 after commit" cat $file1 > $file2 echo "$new" > $file1 new_md5=`md5sum < $file1 | cut -f1 -d" "` # The stored value must not change until commit. CheckStoredMD5 "$file1" $orig_md5 "changed $file1" $BINq cp $file1 $file2 CheckStoredMD5 "$file1" $orig_md5 "copy recorded - $file1" CheckStoredMD5 "$file2" $orig_md5 "copy recorded - $file2" # Change other file echo "$RANDOM-$file2" > $file2 CheckStoredMD5 "$file2" $orig_md5 "changed" # Commit other file $BINq ci -m "$file2" "$file2" new2_md5=`md5sum < $file2 | cut -f1 -d" "` CheckStoredMD5 "$file2" $new2_md5 "$file2 after commit" CheckStoredMD5 "$file1" $orig_md5 "$file1 after committing $file2" $BINq ci -m "$file1" "$file1" CheckStoredMD5 "$file1" $new_md5 "committed $file1" } $INFO "Run 1" Test ls -i | sort -n # There might be differences in the behaviour, depending on whether file1 # or file2 is first in the inode order. So we try both ways. $INFO "Run 2 with exchanged inodes" mv "$file1" xx mv "$file2" "$file1" mv xx "$file2" # Put the second file out of the filelist, so that it can be added again $BINq unversion "$file2" $BINq ci -m "inodes" Test ls -i | sort -n # Test uncopy. mkdir d1 date > d1/date $BINq ci -m "T" -o delay=yes targ=target-$$ echo $targ $$ > "$targ" mkdir d2 echo $$ > d2/date echo $$ > d2/new $BINdflt cp "$file1" "$targ" $BINdflt cp d1 d2 if [[ `$BINdflt st "$targ"` != ".mC+ "*" $targ" ]] then $BINdflt st "$targ" $ERROR "Unexpected status output after cp." fi $BINdflt st d2 > $logfile if [[ `sort < $logfile` != ".mC+ "*" d2/date"*".mC+ "*" dir d2"*"N... "*" d2/new" ]] then cat $logfile $ERROR "Unexpected status output after cp." fi $BINdflt uncopy "$targ" if [[ `$BINdflt st "$targ"` == "N... "*" $targ" ]] then $SUCCESS "'uncopy file' works" else $BINdflt st "$targ" $ERROR "Unexpected status output after 'uncopy file'." fi $BINdflt uncopy d2 $BINdflt st > $logfile if [[ `grep "^N\.\.\." < $logfile | wc -l` -eq 4 ]] then $SUCCESS "'uncopy dir' works" else $BINdflt st $ERROR "Unexpected status output after 'uncopy dir'." fi # Now test uncopy of added and property-set entries $BINq cp d1 d2 echo 12345 > d2/added echo 54321 > d2/prop $BINq ps a b d2/prop $BINq add d2/added $BINdflt uncopy d2 function C { $BINdflt st "$1" -N -N > $logfile if [[ `cat $logfile` != $2* ]] then cat $logfile $ERROR "Wrong status for $1, exp. $2" fi } $BINdflt st d2 > $logfile C d2 'n...' C d2/added 'n...' C d2/prop 'nP..' $SUCCESS "uncopy sets ADDED". # Now we build a big file (so that manber hashes are done), commit it, and # define a copy. # - for the simple copy with the same mtime *no* MD5 should be calculated # - for a change (different mtime) the MD5 has to be done. # - On commit the manber hashes should be done (but locally only, the file # must not be sent to the repository) # We need the debug output to see whether that works as wanted. if [[ "$opt_DEBUG" != "1" ]] then $WARN "Can't test manber hashing without debug log." # Or can we, simply looking whether the md5s file exist? exit fi file1=tt-$RANDOM file2=$file1.cp function CountManber { exp="$1" shift if $BINdflt st -v "$@" | fgrep "^$exp" then $BINdflt st -v "$@" $ERROR "Expected output '$exp'" fi $BINdflt st -D cs___end_of_block -d "$@" | grep "manber found a border" | wc -l } $INFO "Preparing big file ..." # We need a file with some minimum number of manber blocks. # I had "seq 10000000 10111111" but that gave only 3. exp_manber=15 perl -e '$zero="\0" x 65536; for(0 .. 15) { print $_, $zero; }' > $file1 $BINdflt delay touch . $BINq ci -m1 $INFO "Doing copy." cp -a $file1 $file2 $BINq cp $file1 $file2 if [[ `CountManber "....." $file2` == 0 ]] then $SUCCESS "No hashing on unchanged copy." else $ERROR "Manber-hashing on unchanged copy?" fi # change data at beginning, give another mtime dd conv=notrunc of=$file2 if=$logfile bs=1 count=16 seek=123 touch -d yesterday $file2 if [[ `CountManber ".t.C." $file2` -gt $exp_manber ]] then $SUCCESS "Hashing on changed copy." else $ERROR "Changed copy not hashed?" fi # Revert to original, so commit doesn't have to send data. cat $file1 > $file2 touch -d yesterday $file2 # Now commit, and look whether the manber hashes are done *locally*. $BINdflt ci -m1 -d > $logfile if grep 'ci__nondir.* doing local MD5.' $logfile then $SUCCESS "OK, local MD5 done." else $ERROR "no local MD5?" fi # Now test whether we really find changes soon. dd conv=notrunc of=$file2 if=$logfile bs=1 count=16 seek=1 if [[ `CountManber ".t.C." $file2` -gt 1 ]] then $ERROR "Committed copy hashed full, no manber hashes available." else $SUCCESS "Manber hashes exist for unchanged copy." fi fsvs-1.2.6/tests/051_softroot0000755000202400020240000000346711104514466015017 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS # We test the softroot feature, with the additional handicap # that we try to use it on a simulated snapshot. # # Szenario: # / is versioned; a snapshot is taken and mounted in some # directory. From there a commit gets done. # The WAA and CONF should be the 6666* ones (for /), so # that revert/diff etc. for / work immediately after the commit. logfile=$LOGDIR/051.softroot SRDIR=$WC/softroot DIR=/bin FILE=$DIR/ls if [[ ! -e $FILE ]] then $WARN "Cannot test without $FILE" exit fi # First we define / as base directory, and put some entries # in the repository. # We have FSVS_WAA and _CONF defined, so let's go! cd / echo $REPURL | $BINq urls load # We take only this single entry from the root directory. echo './**' | $BINq ignore load # make sure there's no old data rm `$PATH2SPOOL . dir` 2> /dev/null || true $BINq add $FILE # Now something is in / that could be committed. # Populate the softroot - for real snapshots that would require # LVM and a matching filesystem, so we cheat here. mkdir -p $SRDIR/$DIR cp -a $FILE $SRDIR/$DIR/ # We commit via the "snapshot" # To do this, we fake the FSVS_WAA and FSVS_CONF directories to # appear (to a strcmp) to be within the softroot, so they don't # get appended again. FSVS_WAA=$SRDIR/../../../../../../$FSVS_WAA \ FSVS_CONF=$SRDIR/../../../../../../$FSVS_CONF \ $BINdflt commit -m "x" -o softroot=$SRDIR $SRDIR > $logfile # Now owner and group will be different ... "cp -a" can't copy # them for normal users. So we look for text changes only. if [[ `$BINdflt st -C -o filter=text | wc -l` == 0 ]] then $SUCCESS "Commit on softroot" else $ERROR_NB "unexpected status output" $BINdflt st -C -o filter=text $ERROR_NB "Full output:" $BINdflt st -C $ERROR "Status output after commit on softroot" fi fsvs-1.2.6/tests/run-tests0000755000202400020240000000141011147210642014475 0ustar marekmarek#!/bin/bash set -e anyfail=0 . ./test_functions if [[ "$LC_ALL" != "" ]] then $ERROR_NB "Using LC_ALL overrides the needed LC_MESSAGES setting." $ERROR "Please use LANG instead." fi for test in $TEST_LIST do echo "" tput setaf 4 || true echo "_______________________________________________________" echo " \"$test\":1: ("`date`")"`tput op` script=`pwd`/$test if ! ( cd $TESTBASE && CURRENT_TEST=$test bash $BASH_VERBOSE $script 2>&1 ) then echo "----++---- $script failed ----++----" if [[ -z "$TEST_FAIL_WRITE_HDL" ]] then exit 1 else if [[ "$anyfail" == 0 && -n "$TEST_TTY_HDL" ]] then $ERROR_NB "First failed test is $test" > $TEST_TTY_HDL fi anyfail=1 echo $test > $TEST_FAIL_WRITE_HDL fi fi done exit $anyfail fsvs-1.2.6/tests/065_test_group-test0000755000202400020240000000304711251251711016301 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/065.group-test grp_dir=`$PATH2SPOOL $WC ^`/groups mkdir $grp_dir for a in 1 2 3 Ab CA void do echo $a > $a $BINq groups "group:$a,./**$a*" echo "take" > $grp_dir/$a done $BINq delay # Make sure that the new entries are seen. touch . function Check { pattern="$1" count="$2" echo "Testing '$pattern'." $BINdflt group test "$pattern" > $logfile if [[ `wc -l < $logfile` -ne "$count" ]] then cat $logfile $ERROR "Wrong number of output lines - expected $count." fi } Check './*A*' 2 Check './?' 3 echo "Non-existing groups should be ignored" Check 'group:doesntexist,./*d' 1 # Now check full listings function CheckFull { echo "Testing '$1', expect $2." if [[ `grep "$1" $logfile | wc -l` -ne $2 ]] then $ERROR "Wrong number of lines." fi } $INFO "Testing group lists." mkdir X touch X/1 X/3 $BINdflt group test > $logfile CheckFull . 9 CheckFull ^1 2 CheckFull ^3 2 CheckFull ^2 1 CheckFull ^Ab 1 CheckFull ^X 0 CheckFull X 3 $INFO "With -N" $BINdflt group test -N > $logfile CheckFull X 1 CheckFull . 7 $INFO "In subdir." cd X $BINdflt group test > $logfile CheckFull X 3 CheckFull ^1 1 # The directory itself isn't printed. CheckFull . 3 cd $WC $INFO "Verbose output" $BINdflt group test -v > $logfile CheckFull . 9 CheckFull group: 8 CheckFull group:., 5 CheckFull '^Ab[[:space:]]group:Ab,./..Ab.[[:space:]]./Ab$' 1 CheckFull i 1 $BINq ignore './**' $BINdflt group test -v > $logfile CheckFull group: 9 CheckFull i 2 $SUCCESS "group/ignore test ok." fsvs-1.2.6/tests/021_multi_url_update0000755000202400020240000000465611214371525016513 0ustar marekmarek#!/bin/bash # How many parallel working copies are used DATA_WCs=4 # Which working copy is used for updating UP_WC=`expr $DATA_WCs + 1` # Which working copy gets the data per rsync CMP_WC=`expr $UP_WC + 1` set -e $PREPARE_CLEAN WC_COUNT=$CMP_WC > /dev/null $INCLUDE_FUNCS logfile=$LOGDIR/021.multiurl.log for i in `seq 1 $DATA_WCs` do cd $WCBASE$i tu=$REPURL/$i svn mkdir $tu -m $i echo $tu | $BINq urls load mkdir dir-$i common common/sdir-$i echo $RANDOM | tee bfile-$i dir-$i/dfile-$i echo $RANDOM | tee common/cfile-$i common/sdir-$i/csfile-$i echo "Overlay $i" > overlayed $BINq ci -m "ci$i" done cd $TESTBASE # In the first run we do an update, the others do changes. function CheckURL { $BINdflt st -o verbose=none,url > $logfile while [[ $# -ne 0 ]] do path="$1" exp="$2" shift 2 if ! grep "$REPURL/$exp/$path"'$' $logfile then grep "/$path"'$' $logfile || true $ERROR "Expected $path to be in URL $exp" fi done } export _WC=$WCBASE$UP_WC # make -C $TEST_PROG_DIR prepare_wc > /dev/null cd $_WC # A nice side-effect is that URL 4 has the highest priority afterwards. for prio_has in `seq 1 $DATA_WCs` do $INFO "Going with prio_has=$prio_has" rm -rf ./* rm -f `$PATH2SPOOL $_WC dir` # Construct the URL list and build the compare-directory parm=--delete true | $BINq urls load for i in `seq 1 $DATA_WCs` do # rotate the highest-priority URL nr=`perl -e 'print 1+(shift()-1+shift()-1) % shift()' $prio_has $i $DATA_WCs` $BINq urls N:u$nr,P:$i,$REPURL/$nr # We need to give the checksum parameter, so that rsync isn't misled by # the equal mtimes. rsync -a $parm $WCBASE$nr/ $WCBASE$CMP_WC/ -c -c parm=--ignore-existing done $BINdflt up > $logfile $COMPARE -d $WCBASE$UP_WC/ $WCBASE$CMP_WC/ CheckURL dir-1 1 dir-3 3 common $prio_has common/sdir-2 2 common/cfile-3 3 done $SUCCESS "Priorities are taken into account." # Test what happens to entries in common directories, if such a directory # gets removed. without=2 cd $WCBASE$without rm -rf ./* $BINq ci -m remove $WCBASE$without parm=--delete cd $_WC for i in `$BINdflt urls dump "%n\n" | cut -c2-` do echo "Sync $i" # We need to give the checksum parameter, so that rsync isn't misled by # the equal mtimes. if [[ $i -ne $without ]] then rsync -a $parm $WCBASE$i/ $WCBASE$CMP_WC/ -c -c fi parm=--ignore-existing done $BINdflt up $_WC > $logfile $COMPARE -d $_WC/ $WCBASE$CMP_WC/ $SUCCESS "Multi-url update test passed." fsvs-1.2.6/tests/026_diff0000755000202400020240000001160211150447263014041 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC file=diff-file copy=copy-file log=$LOGDIR/026.diff-log echo "line" > $file $BINq ci -m "repos-vers" -o delay=yes echo "something else" > $file if [[ `$BINdflt diff $file | wc -l` -eq 6 ]] then $SUCCESS "We get a diff" else $ERROR "No diff shown." fi # Only headerline if [[ `FSVS_DIFF_PRG=true $BINdflt diff $file | wc -l` -eq 1 ]] then $SUCCESS "FSVS_DIFF_PRG is honored" else $ERROR "FSVS_DIFF_PRG doesn't work?" fi FSVS_DIFF_PRG=true $BINdflt diff -v $file > $log # We cannot be absolutely sure that the mtime doesn't wrap into the next # second, so there might be +- lines for meta-data. if [[ `grep -E '^.(Mode|MTime|Owner|Group): ' $log | wc -l` -lt 4 ]] then $ERROR "Meta-data output missing?" else $SUCCESS "Meta-data is printed" fi $BINq revert $file -odelay=yes if [[ `$BINdflt diff $file | wc -l` -eq 0 ]] then $SUCCESS "No diff if not changed" else $ERROR "Too much output for unchanged files!" fi # Tests for diff of copied entries. cp -a $file $copy $BINq cp $file $copy # We don't use -a; because of the sleep above we get a different timestamp, # and so the header is printed. if [[ `$BINdflt diff $copy | wc -l` -le 1 ]] then $SUCCESS "Diff for unchanged, copied, mtime entry" else $ERROR "Diff for unchanged, copied, mtime wrong" fi cp -a $file $copy # Now they should have the same timestamp, so no change. if [[ `$BINdflt diff $copy | wc -l` -le 1 ]] then $SUCCESS "Diff for unchanged, copied entry" else $BINdflt diff $copy $ERROR "Diff for unchanged, copied unexpected" fi echo X > $copy $BINdflt diff -r HEAD $copy > $log if [[ `wc -l < $log` -eq 6 ]] then $SUCCESS "Diff for copied" else wc -l < $log $ERROR "Diff for copied wrong - expected 6 lines" fi chmod 700 $copy $BINq ps a b $copy $BINq ci -m 1 -o delay=yes echo Y > $copy chmod 550 $copy $BINq ps a c $copy # TODO: user-defined property diff $BINdflt diff -v $copy > $log if perl -e 'undef $/; $_=<>; exit 1 unless /\n-Mode: 0700\n\+Mode: 0550\n/m' < $log then $SUCCESS "Diff for changed copy" else $BINdflt diff -v $copy $ERROR "Diff for changed copy, expected mode change" fi # Try colordiff auto mode $BINdflt diff -v $copy -o colordiff="" > /dev/null # Try error handling if $BINdflt diff $copy -o colordiff=mustneverexist_invalidbinary.$$.$RANDOM > $log 2>&1 then $ERROR "Doesn't error out for an invalid colordiff name?" else $SUCCESS "Reports bad names for colordiff" fi # No temporary file may be left behind. if ls $copy.* 2> /dev/null then $ERROR "Temporary file left behind." fi # True immediately exits if $BINdflt diff $copy -o colordiff=true > /dev/null 2>&1 then $ERROR "Doesn't error out for a non-reading colordiff?" else $SUCCESS "Reports stopping colordiffs" fi # No temporary file may be left behind. if ls $copy.* 2> /dev/null then $ERROR "Temporary file left behind." fi # EPIPE? if $BINdflt diff $copy -o colordiff=cat | true then $SUCCESS "Ignores EPIPE" else $ERROR "Doesn't handle EPIPE" fi # No temporary file may be left behind. if ls $copy.* 2> /dev/null then $ERROR "Temporary file left behind." fi # Test "diff -rX" against entries in subdirectories, and compare against # "live" diff. # The header lines (current version, timestamp, etc.) are different and # made equal for comparision. $BINq ci -m1 -odelay=yes > $log rev=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` fn=tree/b/2/file-x equalizer="perl -pe s#($fn).*#filename_and_so_on#" echo $RANDOM $$ > $fn $BINdflt diff $fn | $equalizer > $log $BINq ci -m1 # echo aaa > $fn # for verification that the test mechanism works if $BINdflt diff -r$rev $fn | $equalizer | diff -u - $log then $SUCCESS "diff -rX" else $ERROR "'diff -rX' gives a different answer" fi # Test diff over special entries ln -s old X $BINq ci -m1 -odelay=yes > $log rev1=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` ln -sf new X EXPECT=9 function testdiff { $BINq diff "$@" > $log # There are additional lines "no linefeed" and "special entry changed". if [[ `wc -l < $log` -ne $EXPECT ]] then cat $log $ERROR "'diff "$@"' line count wrong" fi if grep -F -- '-link old' < $log && grep -F -- '+link new' < $log then $SUCCESS "'diff "$@"' ok" else cat $log $ERROR "'diff "$@"' output wrong" fi } testdiff -r$rev1 # Test whether other, non-wanted, entries are diffed. testdiff X -r$rev1 # "diff x:y" should get rewritten, I think. # We don't get the "special entry" lines. $BINq ci -m1 -odelay=yes > $log rev2=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` testdiff -r$rev1:$rev2 testdiff -r$rev1:$rev2 X # Test how much gets diffed on -rX:Y date > $fn $BINq ci -m1 > $log rev3=`grep "revision " $log | tail -1 | cut -f2 -d" " | cut -f1 -d" "` testdiff -r$rev1:$rev3 X # Test diff on removed entries # Here we get the message again? rm X testdiff -r$rev1:$rev3 X $BINq ci -m1 > $log testdiff -r$rev1:$rev3 X fsvs-1.2.6/tests/057_setenv0000755000202400020240000000177611251246016014447 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC dir=1/a/X fn=kairo22 path=$dir/$fn logenv=$LOGDIR/056.env logrev=$LOGDIR/056.rev mkdir -p $dir echo Data=$$ > $path # We use update, to see whether the revision is set too. # We have to read from STDIN, else FSVS cries. # Again we have to use perl, because some shells put single ticks around # the values. $BINdflt ps fsvs:update-pipe "perl -e '; print \"\$_=\$ENV{\$_}\\n\" for grep(/^FSVS_/, keys %ENV);' > $logenv" $path $BINq ci -mx $BINdflt up $WC2 > $logrev rev=`grep "revision " $logrev | tail -1 | cut -f2 -d" " | cut -f1 -d.` function Check { if ! grep --line-regexp "$@" "$logenv" then cat $logenv $ERROR "Didn't see $@" fi } Check FSVS_CONF=$FSVS_CONF Check FSVS_WAA=$FSVS_WAA Check FSVS_CURRENT_ENTRY=$path Check FSVS_WC_ROOT=$WC2 Check FSVS_WC_CONF=$($TEST_PROG_DIR/path2spool $WC2 "^") Check FSVS_TARGET_REVISION=$rev # This is now unsetenv() if empty. # FSVS_SOFTROOT= $SUCCESS "Environment correctly set." fsvs-1.2.6/tests/067_readonly_repo0000755000202400020240000000121712163264335016003 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC TMP=$LOGDIR/067.msg date > blabla chmod 765 tree/c function ER { msg=$1 shift if $@ &> $TMP then $ERROR "Shouldn't succeed." else echo "Got an error:" cat $TMP if grep -F "$msg" $TMP then $SUCCESS "correct message." else $ERROR "wrong message" fi fi } $INFO "ci to inaccessible dir" # chmod goes top-down, which doesn't work with removing rights. find $REP -depth | xargs chmod 000 ER "Couldn't open a repository (180001)" $BINq ci -m "RO" $INFO "ci to readonly dir" chmod -R 555 $REP ER "Permission denied (13)" $BINq ci -m "RO" chmod -R 777 $REP fsvs-1.2.6/tests/042_checkout0000755000202400020240000000320511104514555014732 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS logfile=$LOGDIR/042.log # export to new directory CODIR=$WC2/checkout if [[ -e $CODIR ]] then rm -r $CODIR fi dir_norm=`$PATH2SPOOL $CODIR dir "" $CODIR` dir_sr=`$PATH2SPOOL $CODIR dir $WC2 $CODIR` echo Got filenames: echo " normal "$dir_norm echo " softroot "$dir_sr rm $dir_norm $dir_sr 2> /dev/null || true mkdir $CODIR $BINdflt checkout $REPURL $CODIR > $logfile $COMPAREWITH $CODIR rm -rf $CODIR mkdir $CODIR $BINq checkout -r 3 $REPURL $CODIR > $logfile $BINq up -r 3 $WC $COMPAREWITH $CODIR if [[ -e $dir_norm && ! -e $dir_sr ]] then $SUCCESS "Checkout makes the correct dir file (1)" else $ERROR "Checkout doesn't create the dir listings (1)" fi # Test softroot rm -rf $CODIR mkdir -p $CODIR $WC2/$FSVS_CONF $WC2/$FSVS_WAA rm $WC2/$dir_norm $WC2/$dir_sr $dir_norm $dir_sr 2>/dev/null || true # Test normalization too - make extra slashes $BINq checkout -o softroot=//$WC2/././ $REPURL //$CODIR// > $logfile $BINq up $WC $COMPAREWITH $CODIR if [[ ! -e $WC2/$dir_norm && -e $WC2/$dir_sr && ! -e $dir_norm && ! -e $dir_sr ]] then $SUCCESS "Checkout makes the correct dir files (2)" else LC_ALL=C ls -la $WC2/$dir_norm $WC2/$dir_sr $dir_norm $dir_sr 2>&1 | perl -pe 'sub BEGIN { $x=shift() } s#$x/##;' $TESTBASE || true $ERROR "Checkout doesn't create the correct dir listings (2)" fi # Lets see if a later update works $BINq update -r 3 -o softroot=/./$WC2/.// /./$CODIR// $BINq update -r HEAD -o softroot=/././$WC2/././ /./$CODIR// $COMPAREWITH $CODIR # Try a commit touch $CODIR/softroot $BINq commit -o softroot=$WC2 $CODIR -m 1 $BINq up $WC $COMPAREWITH $CODIR fsvs-1.2.6/tests/020_partial_ci0000755000202400020240000001337311142755447015250 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC dir1=dA dir2=dB dir3=dC dir31=dC/dA dir32=dC/dB dir4=dD dirN=dNew file11=$dir1/1 file12=$dir1/2 file21=$dir2/1 file22=$dir2/2 file41=$dir4/1 log=$LOGDIR/020.logfile if [[ "$VERBOSE" == 1 ]] then # BINdflt="$BINdflt -d" true fi function ChangeData { for dir in d? d?/d? do [ -e $dir ] || continue for file in 1 2 do echo "A testline " > $dir/$file dd if=/dev/zero bs=$RANDOM count=1 >> $dir/$file 2>/dev/null done done } mkdir $dir1 $BINq ci -m "def dir" # Test empty_commit option. # We cannot use $REPURL, as that includes subdirectories within the # repository, which don't get touched - we see the revision only at the # repository root. msg="EMPTY_COMMIT" if $BINdflt ci -m "$msg" -o empty_commit=no | grep "Avoiding empty commit as requested." && ! ( svn log $REPURLBASE | grep "$msg" ) then $SUCCESS "Empty commit avoided" else $ERROR "Always commits?" fi if $BINdflt ci -m "$msg" -o empty_commit=yes && svn log -r HEAD $REPURLBASE | grep "$msg" then $SUCCESS "Empty commit done" else $ERROR "Doesn't commit when empty?" fi ChangeData $BINdflt ci -m "new file" $file11 > $log # file, directory # The / is necessary to exclude the directory itself, but to include # the other file, if it should show up. if [[ `grep $dir1/ < $log | wc -l` -eq 1 ]] then $SUCCESS "Ok, commit of a file beneath another new file works" else cat $log $ERROR "wrong data committed (1); only $file11 expected!" fi $BINdflt ci -m "new file2" $dir1 -o delay=yes > $log if [[ `grep $dir1 < $log | wc -l` -eq 2 ]] then $SUCCESS "commit of a file beneath a committed file works" else cat $log $ERROR "wrong data committed (2); only $dir1 and $file12 expected!" fi mkdir $dir2 ChangeData $BINdflt ci -m "new file" $file21 > $log if [[ `grep $dir2 < $log | wc -l` -eq 2 ]] then $SUCCESS "commit of a changed file beneath another changed file works" else cat $log $ERROR "wrong data committed (3); only $file21 expected!" fi if false then # This test currently doesn't work - that's the same bug as # with an update-before-commit. $BINdflt st # now the directory should still show as changed: if [[ `$BINdflt st | grep $dir2 | wc -l` -eq 1 ]] then $SUCCESS "directory still shows as changed" else $BINdflt st $ERROR "directory isn't seen as changed" fi fi # This time the directory gets committed, too - so it will be stored # with correct values and won't show as changed. $BINdflt ci -m "new dir" $dir2 -o delay=yes > $log if [[ `grep $dir2 $log | wc -l` -eq 2 ]] then $SUCCESS "commit of a single directory works" else cat $log $ERROR "wrong data committed (4); only 2 lines expected!" fi # Now test for an initial commit - where no previous dir file exists. # Remove file list ... rm `$PATH2SPOOL $WC dir` mv $dir1 $dirN $BINdflt ci -m "new dir" $dirN > $log if [[ `grep -F " $dirN" < $log | wc -l` -eq 3 ]] then $SUCCESS "initial commit works" else cat $log $ERROR "wrong data committed (5); only 3 lines expected!" fi # A more complex commit, fails with unpatched fsvs 1.0.15... rm `$PATH2SPOOL $WC dir` mkdir $dir3 $dir4 $dir31 $dir32 $BINdflt ci -m "new dir, more complex case" $dir31 > $log if [[ `grep -F " $dir31" < $log | wc -l` -eq 1 ]] then $SUCCESS "complex initial commit works" else cat $log $ERROR "wrong data committed (6); only 2 lines expected!" fi ChangeData $BINdflt sync-repos -q $BINdflt ci -m "changed files after sync-repos" ./$dir31 > $log if [[ `grep -F ./ < $log | wc -l` -eq 3 ]] then $SUCCESS "complex partial commit after sync-repos works" else cat $log $ERROR "wrong data committed (7); expected 'committing to,dir,2files,revX'" fi $BINq ci -m "known state" $WC2_UP_ST_COMPARE # Now change data, and look whether filtering works. function TT { expected=$1 do_changes=$3 other_changes=$4 if [[ "$do_changes" == "" ]] ; then ChangeData ; fi $BINdflt st $2 > $log if [[ `grep -F ./ < $log | wc -l` -ne $expected ]] then cat $log $ERROR "Filtered status for '$2' expected $expected entries." fi $BINdflt ci -m "options are $2, expect $expected" $2 > $log if [[ `grep -F ./ < $log | wc -l` -ne $expected ]] then cat $log $ERROR "Filtered commit for '$2' expected $expected entries." fi # We cannot easily compare with WC2 here, because there are many changed # entries, and only a few get committed. # But we can check if there are still changes here. $BINdflt st $2 > $log if [[ `wc -l < $log` -ne 0 ]] then cat $log $ERROR "Still changes after 'commit $2'." fi if [[ "$other_changes" != "" ]] then # Expect some changes. $BINdflt st > $log if [[ `wc -l < $log` -ne $other_changes ]] then $ERROR "Expected $other_changes changes after 'commit $2'" fi fi $SUCCESS "Filtered commit for '$2' successful." $BINq delay } # Simple: all data files changed, no new or deleted entries - so no # directory modifications. TT 10 "." # The directory is, depending on the time dependency, marked as changed, # too. To get a definitive answer we'll manually touch it. echo aaaa > dB/Newwww touch -d"2008-1-1 4:5:6" dB # expect 1 file, afterwards 1 timestamp TT 1 "-f new ." 1 rm dB/Newwww # expect 1 file, afterwards 1 timestamp TT 1 "-f deleted ." 1 # Other entries may not be recorded. $INFO "Testing that other entries are not recorded" echo > $file41 echo aaaa > dB/Newwww2 # We expect a single file to be committed; afterwards all other 9 data # files are changed, and "dB" (mtime). TT 1 "-f new ." "" 11 # Now commit all changed entries; keeps "dB" as mtime. TT 10 "-f text ." 1 # Now sync ... - no changes anymore. $BINq ci -m sync1 $WC2_UP_ST_COMPARE echo a > new1 echo a > $dir31/new2 rm $file41 # We expect ".", "dD" and "dC/dA" to be "mtime" TT 3 "-f new,deleted ." no-change-data 3 # Sync again ... $BINq ci -m sync2 $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/008_update_to_rev0000755000202400020240000000461611040023007015760 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/008.log filename=update-file.upd dir=2313 file2=$dir/garble function VL { if [[ "$VERBOSE" != "" ]] then # If the files don't exist, we'd stop because # ls doesn't find the given name. # This way the output is shorter - only 2 lines compared to 10. ls -la --full-time $WC/* $WC2/* || true fi } if [[ -e $filename ]] then rm $filename $BINq ci -m "delete the test-file" fi # this next line has two tabulators - in grep and cut rev=`$BINdflt up | grep "revision " | tail -1 | cut -f2 -d" " | cut -f1 -d"."` echo "now at rev. $rev" echo "A testline #1" > $filename echo "A testline #2" >> $filename # We do a directory with an entry here, to see # whether directories only available in the repository # are correctly handled. mkdir $dir echo $file2 > $file2 echo " ci1" VL $BINq ci -m "new file" echo "A further line" >> $filename echo $file2 >> $file2 echo " ci2" VL $BINq ci -m "new file" VL $BINq up -r$rev VL if [[ -e $filename ]] then $ERROR 'File should not exist!' else $SUCCESS 'Ok, file was deleted' fi if [[ `$BINdflt remote-status | grep $filename` == "N... "* ]] then $SUCCESS "remote-status says to-be-done (1)." else $ERROR " remote-status failed (1)!" fi $BINq up -r`expr $rev + 1` VL if [[ `wc -l < $filename` -ne 2 ]] then $ERROR 'File has wrong data!' else $SUCCESS 'Ok, file was added' fi if [[ `$BINdflt remote-status | grep $filename` == .?C?" "* ]] then $SUCCESS " remote-status says to-be-done (2)." else $ERROR " remote-status failed (2)!" fi $BINq up -r`expr $rev + 2` VL if [[ `wc -l < $filename` -ne 3 ]] then $ERROR 'File has wrong data!' else $SUCCESS 'file was modified' fi if [[ `$BINdflt remote-status -r$rev | grep $filename` == "D..."* ]] then $SUCCESS " remote-status to old revision says to-be-deleted." else $ERROR " remote-status -r $rev failed (3)!" fi $BINdflt remote-status > $logfile if grep $filename < $logfile then cat $logfile $ERROR " remote-status failed (4)!" else $SUCCESS " remote-status says nothing." fi touch -t 200406271837 $filename $BINdflt remote-status > $logfile if grep $filename < $logfile then $ERROR " remote-status failed (5)!" else $SUCCESS " remote-status on touched file says nothing." fi VL # No $WC2_UP_ST_COMPARE here, as the file was touched. # We'd have to update to an earlier version, and return to the present :-) fsvs-1.2.6/tests/013_manber0000755000202400020240000001126211050470207014364 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # fsvs is too good at finding differences. # If we just save a timestamp in another file (via "touch -r"), # it carps about the sub-second part (which isn't done by touch). # If we just set a round time, the ctime is found as changed (after dd). # So we'd have to set the ctime too ... # But how should that be done? utime() only sets atime and mtime, # and I found no way to set a ctime. # Hardlinks don't help, too - as the data must be changed for step 2, # and that changes ctime ... # I thought about trying "tee file1 file2" ... but how to change the data? # So it will have to be a perl one-line script or something like that. # # PS: The perl-script works as expected. # But mv (which may, according to POSIX, be implemented as atomic link/unlink), # changes the ctime again ... # So now I use two directories, remove one, and rename the other. filename=big_file subdir1=dir1 subdir2=dir2 file1=$subdir1/$filename file2=$subdir2/$filename timestamp=200602231527.00 # The test uses the "fast" behaviour, ie. where showing "?" for a file is # allowed. echo change_check=none >> $FSVS_CONF/config function L() { if [[ "$VERBOSE" != "" ]] then ls -la --full-time $file1 ls -cla --full-time $file1 fi } function T() { TS=$1 OK=$2 NOK=$3 OPT=$4 # Please note that we cannot easily substitute the grep for the filename; # for the case that the entry is a directory, we get its children, too. # Using "-N" changes the behaviour ("-C" doesn't work anymore - TODO) x=`$BINdflt st $OPT $file1 | grep "$file1"'$' || true` set -- $x if [[ "$1" = "$TS" ]] then $SUCCESS " $OK ('$TS', with '$OPT')" else if [[ "$VERBOSE" != "" ]] then $BINdflt st $OPT fi $ERROR_NB " $NOK (expected '$TS', with '$OPT')" $ERROR " '"$*"'" fi } function TestExtensively { test_ch_wo_mtime=$1 L echo " ci1" $BINq ci -m "big file" -o delay=yes # Data and mtime not changed T "......" 'Ok, not seen as changed (1)' 'Seen as changed? (1)' '-v' T "......" 'Ok, not seen as changed (2)' 'Seen as changed? (2)' '-v -C' T "......" 'Ok, not seen as changed (3)' 'Seen as changed? (3)' '-v -C -C' # Data not changed, mtime changed echo " touch, but don't change" touch $file1 L # Directories do not show a '?' if [[ $test_ch_wo_mtime == 0 ]] ; then poss='.m.?' else poss='.m..' ; fi T $poss 'Ok, possibly changed (4)' 'Not as possibly changed seen! (4)' '' T '.m..' 'Ok, not changed (5)' 'Saw more than meta-data-change! (5)' "-C" T '.m..' 'Ok, not changed (6)' 'Saw more than meta-data-change! (6)' "-C -C" # Data changed, mtime not changed echo " change, but with original timestamp" # see comment above. rm -r $subdir1 mv $subdir2 $subdir1 L # These tests fail on directories, as these are normally found # as changed (size changed). if [[ "$test_ch_wo_mtime" != 2 ]] then # this is the test that failed because of the ctime(s): T "......" 'Ok, not seen as changed (7)' 'Seen as changed? (7)' '-v' # without the -v the file is not printed, because the size and # timestamps are the same, so it's not seen as changed. T '......' 'Ok, change not seen (8)' 'Change found! (8)' '-C -v' fi if [[ "$test_ch_wo_mtime" != 1 ]] then # Only with two -C the file is checksummed (because timestamps and size # are as expected). So here it should be changed. T '....C.' 'Ok, change found (9)' 'Change not found! (9)' '-C -C -v' fi touch $file1 echo " ci2" } if [[ -e $file1 ]] then rm $file1 $BINq ci -m "delete the test-file" fi test -d $subdir1 || mkdir $subdir1 test -d $subdir2 || mkdir $subdir2 seq 1 199999 | perl -e ' open(F1,"> " .shift) || die $!; open(F2,"> " .shift) || die $!; while (<>) { print F1; # do change in line 10000 for F2 substr($_, 0, 1) = "%" if $. == 10000; print F2; } # do at "same" time. $t=time(); 1 while (time() == $t); close(F1) || die $!; close(F2) || die $!; ' $file1 $file2 # verify that exactly one line has changed if [[ `diff -U0 $file1 $file2 | egrep "^[^ ]" | wc -l` -ne 5 ]] then $ERROR 'NOK, more than one line changed??' fi # ignore the 2nd file $BINq ignore ./$subdir2 # test with big files echo "=== Testing with a big file ===" TestExtensively 0 rm $file1 $BINq ci -m "del big file" # test with empty directory echo "=== Testing with an empty directory ===" mkdir $subdir2 mkdir $file1 $file2 $BINq ci -m "empty dir 1" TestExtensively 1 $BINq ci -m "empty dir 2" # test with non-empty directory echo "=== Testing with an non-empty directory ===" rmdir $file1 mkdir $subdir2 mkdir $file1 $file2 $file2/child # make 2nd timestamp equal rsync $subdir1/ $subdir2/ -a $BINq ci -m "dir 1" TestExtensively 2 $BINq ci -m "dir 2" $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/048_warnings0000755000202400020240000000427310775065312014776 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/048.log # Test warning messages if ! $BINdflt -Wunknown-warning-string-346246262=ignore st > $logfile 2>&1 then $SUCCESS "unknown warnings are rejected" else $ERROR "unknown warnings are NOT rejected!" fi if ! $BINdflt -Wmeta-user=UNKNOWN-action st > $logfile 2>&1 then $SUCCESS "unknown warning actions are rejected" else $ERROR "unknown warning actions are NOT rejected!" fi if FSVS_WARNING="meta-user=ignore unknown-warning=ignore" $BINdflt st > $logfile 2>&1 then $ERROR "FSVS_WARNING not used?" else $SUCCESS "FSVS_WARNING seems to be used" fi if [[ 1$opt_DEBUG == 11 ]] then # We need a sub-shell, as we expect an error returned and have to remove # the error trap. # Simply remembering the error trap doesn't work here; bash doesn't # print the needed " in "trap -p ERR". # There's no easy way to return values from the sub-shell; but # as the complete output of fsvs is written to a file we simply # take STDOUT as the error code. el=$( trap '' ERR ; set +e ; $BINdflt -W_test-warning=stop > $logfile 2>&1 ; echo $? ) if [[ $el -ne 0 && `grep WARNING: $logfile` ]] then $SUCCESS "test-warning can stop fsvs" else $ERROR "Doesn't break for test-warning!" fi $BINdflt -W_test-warning=once st > $logfile 2>&1 el=$? if [[ $el -eq 0 && `grep test-warning $logfile` ]] then $SUCCESS "test-warning can be set to non-fatal" else $ERROR "non-fatal test-warning failed" fi FSVS_WARNING=_test-warning=once $BINdflt st > $logfile 2>&1 el=$? if [[ $el -eq 0 && `grep test-warning $logfile` ]] then $SUCCESS "FSVS_WARNING used" else $ERROR "FSVS_WARNING not parsed?" fi # Check whether the config file is respected echo 'warning=_test-warning=stop' > $FSVS_CONF/config if $BINdflt -d st > $logfile 2>&1 then $ERROR "Warning levels NOT read from config file." else $SUCCESS "Warning levels read from config" fi if $BINdflt -W_test-warning=ignore st > $logfile 2>&1 then $SUCCESS "Commandline overrides config file." else $ERROR "Commandline does NOT override config file." fi echo '' > $FSVS_CONF/config else $INFO "Cannot test test-warning for non-debug builds." fi fsvs-1.2.6/tests/052_hardlink0000755000202400020240000000211110757463204014724 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/052.hard mkdir X for a in * do test -d "$a" || ln "$a" X/$RANDOM-$a done $BINq st $BINq ci -m"Just hardlinks" -odelay=yes $WC2_UP_ST_COMPARE if [[ "$UID" == 0 ]] then mkdir G $INFO "Testing bind mounts" mount --bind tree G # We use a sub-shell, so that the mount is surely removed. el=$( $BINq ci -mbind_mount -odelay=yes > $logfile 2>&1 && $WC2_UP_ST_COMPARE > $logfile 2>&1 && echo OK=$? ) umount G if [[ "$el" -ne 0 ]] then cat $logfile $ERROR "Bind mount didn't work" fi # Now the same, but tell it's copied. $INFO "Testing bind mounts as copy" mkdir H $BINdflt cp tree H mount --bind tree H # We use a sub-shell, so that the mount is surely removed. el=$( $BINq ci -mbind_copy -odelay=yes > $logfile 2>&1 && $WC2_UP_ST_COMPARE > $logfile 2>&1 && echo OK=$? ) umount H if [[ "$el" -ne 0 ]] then cat $logfile $ERROR "Bind mount, telling as copy, didn't work" fi $SUCCESS "Bind mounts work." else $WARN "Cannot test binding mounts as user" fi fsvs-1.2.6/tests/039_debug0000755000202400020240000000326611256626161014235 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC if [[ "$opt_DEBUG" != "1" ]] then # This test will run in any case via ext-tests. $WARN "Cannot do debug tests without --enable-debug." exit 0 fi logfile=$LOGDIR/039.debug logfile1=$logfile-1 logfile2=$logfile-2 logfile3=$logfile-3 # We have to give some option so that the number of parameters is the same. # We cut the timestamp and the memory addresses away, as they might be # different if address space randomization is enabled. function Clean { cut -f2- -d' ' | perl -pe 's#0x\w+#0x*#g; s#\d+ KB#* KB#;' } $BINdflt -o debug_output=$logfile1 -d -D main Clean < $logfile1 > $logfile2 $BINdflt -o debug_output="cat > $logfile1" -d -D main Clean < $logfile1 > $logfile3 $BINdflt -o diff_extra=1 -d -D main | Clean > $logfile1 if [[ `md5sum $logfile-* | cut -f1 -d" " | sort -u | wc -l` -eq 1 ]] then $SUCCESS "Debug tests successful." else md5sum $logfile-* $ERROR "Debug output is different." fi # Check debug_buffer, if enabled. if ! $BINdflt -v -V | grep ENABLE_DEBUGBUFFER then $WARN "Option debugbuffer not enabled, won't test." else function fsize { ls -1ds "$1" | cut -f1 -d" " } # Check tree and a non-existing file, to an error. $BINdflt -o debug_buffer=4 st tree g > $logfile1 2> /dev/null || true if [[ `fsize $logfile1` -gt 4 ]] then $ERROR "debug_buffer doesn't limit to 4kB" fi > $logfile1 $BINdflt -o debug_output="cat > /dev/zero" -o debug_buffer=4 -d st tree g > $logfile1 2> /dev/null || true if [[ `fsize $logfile1` -lt 4 ]] then $ERROR "debug_buffer should turn debug_buffer and debug_output off." fi $SUCCESS "debug_buffer behaves as designed." fi # vi: textwidth=0 formatoptions= fsvs-1.2.6/tests/019_many_files0000755000202400020240000000504411071203523015253 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/019.log # Start counting from here, so that the length of the names is equal # (needed for sorting later). START=11 COUNT=20 # We have to escape ( ) and *, as the shell would try to interpret these. exp_count=$(($COUNT * ( $COUNT * ( $COUNT + 1 ) + 1 ) + 1)) echo "Generating $exp_count entries." # Previously this was done via the shell, and partly parallel - but was # still much slower than perl. perl -e ' ($start, $end)=@ARGV; for $a ($start .. $end) { mkdir($a) || die "$a: $!"; for $b ($start .. $end) { $d="$a/$b"; mkdir($d) || die "$d: $!"; for $c ($start .. $end) { $i++; $f="$d/$c"; open(F, "> $f") || die "$f: $!"; print F "$a-$b-$c\n", ("." x ($i % 269)), "\n"; } } } ' $START $(($START+$COUNT-1)) echo "Looking for them." # Generating them is so fast that the directory might stay in the same # second. found=`$BINdflt st -C | wc -l` if [[ $found -eq $exp_count ]] then $SUCCESS "fsvs found all $exp_count changed entries." else $ERROR "fsvs found $found instead of $exp_count entries!" fi echo "Checkin ..." $BINq ci -m many echo "Checkout ..." $WC2_UP_ST_COMPARE # Do some swapping of entries, so that the names are unchanged, but the # inode numbers are mixed. # That's to see that such changes are detected and correctly handled. function Swap { find $1 | perl -e ' @list=map { chomp; $_; } ; srand(1975007003); $last=$list[rand(@list)]; $lfn="x"; @l=($last); rename($last, $lfn) || die "$last => $lfn: $!\n"; for(2 .. shift()) { $cur=splice(@list, rand(@list), 1); rename($cur, $last) || die "$cur => $last: $!\n"; $last=$cur; push @l, $last; } rename($lfn, $last) || die "$lfn => $last: $!\n"; # Use two spaces, no tab, in picture line! format STDOUT= ^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ~~ $l . $l=join(" ",@l); write; ' $2 # Now there must be N+1 swapped entries. # We need the -C as the size might be the same. $BINdflt st -C -C -f text > $logfile if [[ `wc -l < $logfile` -eq $3 ]] then $SUCCESS "Swapping $2 entries with '$1' ok ($3 changed)" else cat $logfile $ERROR_NB "Swapping $2 entries with '$1' wrong" $ERROR "expected $3 changed, got "`wc -l $logfile` fi $BINq ci -m x -o delay=yes $WC2_UP_ST_COMPARE } # Swap files only Swap "$START -type f" 50 50 # If we swap 10 directories with 20 entries each, we get 200 changed # entries Swap ". -maxdepth 2 -mindepth 2 -type d " 10 200 # 20*20*3 == 1200 Swap ". -maxdepth 1 -mindepth 1 -type d " 3 1200 fsvs-1.2.6/tests/041_options0000755000202400020240000000230010765430560014617 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC LOG=$LOGDIR/041.options # step 1: empty settings file. CONF=$FSVS_CONF/config touch $CONF if ! $BINdflt st then $ERROR "Empty config file fails" fi echo '## comment' > $CONF echo '# comment' >> $CONF echo '' >> $CONF if ! $BINdflt st then $ERROR "Comment-only settings file gives an error" fi touch empty-file echo 'path=absolute' > $CONF if ! $BINdflt st > $LOG then $ERROR "Reading config-file fails (1)" fi if grep "$WC1/empty-file" < $LOG > /dev/null then $SUCCESS "Parameter path read and understood." else $ERROR "Parameter path not read" fi # The file is only touched, so filter=text shouldn't find it. echo 'filter=text' > $CONF if ! $BINdflt st > $LOG then $ERROR "Reading config-file fails (2)" fi if [[ `wc -l < $LOG` -eq 0 ]] then $SUCCESS "Parameter filter read and understood." else $ERROR "Parameter filter not read" fi echo 'invalid string' > $CONF if $BINdflt st > $LOG 2>&1 then $ERROR "Invalid string doesn't fail" fi echo 'invalid=option' > $CONF if $BINdflt st > $LOG 2>&1 then $ERROR "Invalid option doesn't fail" fi $SUCCESS "Config file correctly parsed." # Restore default behaviour. rm $CONF fsvs-1.2.6/tests/053_conflicts0000755000202400020240000001452011243560465015122 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/053.conflicts #### Define the test files. # A list of numbers seq 1 20 > common_ancestor # Lines *2 gets changed perl -pe 's/2$/2changed-repos/' < common_ancestor > repository # Now change line 7. perl -pe 's/^7$/7local/' < common_ancestor > locally_changed # What the merged file should look like diff3 -m locally_changed common_ancestor repository > merged_ok # Another local data, with conflict perl -pe 's/^2$/2 is changed=conflict/' < common_ancestor > will_conflict # Output of the merge conflict can be done only when we know the revision # numbers; these get encoded in the filenames. touch merged_conflict target=myfile # Put into repository. cat common_ancestor > $target $BINq ci -m 1 $target -o delay=yes > $logfile rev_old=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` cat repository > $target $BINq ci -m 2 $target -o delay=yes > $logfile rev_new=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` # Now create the merge conflict compare file. if diff3 -m -L $target.mine -L $target.r$rev_old -L $target.r$rev_new will_conflict common_ancestor repository > merged_conflict then $ERROR "merge doesn't gives a conflict?" fi if [[ $? == 1 ]] then $ERROR "Expected error code of merge not given" fi if [[ `egrep '<<<<|>>>>|====|\|\|\|\|' < merged_conflict | wc -l` -eq 4 ]] then $INFO "merged_conflict seems ok." else $ERROR "merged_conflict wrong?" fi # We start with the "old" version. $BINq up -r$rev_old # Now we change something .... cat locally_changed > $target # and try to get the newer version. if $BINq up -o conflict=stop > $logfile 2>&1 then $ERROR "Didn't stop on conflict" else $SUCCESS "Stop on conflict=stop" fi if $BINdflt st -v $target | grep "^....x. " then $ERROR "Marked as conflict on conflict=stop?" fi if $BINq up -o conflict=local > $logfile 2>&1 then if cmp $target locally_changed then md5sum $target locally_changed $SUCCESS "conflict=local works" else diff -u $target locally_changed $ERROR "conflict=local doesn't work - wrong data" fi else cat $logfile $ERROR "conflict=local doesn't work" fi if $BINdflt st -v $target | grep "^....x. " then $ERROR "Marked as conflict on conflict=local?" fi if [[ x`$BINq st $target` != x ]] then $BINq st $target $ERROR "unexpected status output on conflict=local - should be empty." fi # Goto old revision $BINq up -r$rev_old -o conflict=local if ! cmp $target locally_changed then $ERROR "Returning to old revision doesn't work - 1" fi if $BINq up -o conflict=remote > $logfile 2>&1 then if cmp $target repository then md5sum $target repository $SUCCESS "conflict=remote works" else diff -u $target repository $ERROR "conflict=remote doesn't work - wrong data" fi else cat $logfile $ERROR "conflict=repository doesn't work" fi if $BINdflt st -v $target | grep "^...x. " then $ERROR "Marked as conflict on conflict=repos?" fi # Goto old revision. There's no conflict now, as we took the committed # version. $BINq up -r$rev_old -o conflict=stop if ! cmp $target common_ancestor then $ERROR "Returning to old revision doesn't work - 2" fi # Now test getting both files. cat locally_changed > $target $BINq up -o conflict=both if ! cmp $target.mine locally_changed then $ERROR "conflict=both: wrong data for $target.mine." fi if ! cmp $target.r$rev_new repository then $ERROR "conflict=both: wrong data for $target.r$rev_new." fi if $BINdflt st -v $target | grep "^....x. " then $SUCCESS "conflict=both works." else $ERROR "No conflict on conflict=both?" fi # Revert should leave only a single file. $BINq revert $target if [[ `ls -dla $target* | wc -l` -eq 1 ]] then $SUCCESS "aux files removed after revert." else ls -dla $target* $ERROR "unexpected auxillary files after revert." fi rm $target* # Now that should just work. $BINq up -r$rev_old -o conflict=remote # Test doing a merge cat locally_changed > $target $BINq up -o conflict=merge if ! cmp $target merged_ok then $ERROR "conflict=merge: wrong data for $target." fi if [[ `ls -dla $target* | wc -l` -eq 1 ]] then $SUCCESS "conflict=merge works." else ls -dla $target* $ERROR "conflict=merge: unexpected auxillary files." fi # This should give the old file. $BINq up -r$rev_old -o conflict=merge if cmp $target locally_changed then $SUCCESS "reverse merge ok" else diff -u $target locally_changed $ERROR "reverse merge wrong?" fi # Now do a conflict cat will_conflict > $target touch time_mark_file sleep 1 $BINq up -o conflict=merge -o delay=yes > $logfile if ! cmp $target merged_conflict then diff -u $target merged_conflict $ERROR "conflict=merge with conflict: wrong data for $target." fi if [[ `ls -dla $target* | wc -l` -ne 4 ]] then ls -dla $target* $ERROR "conflict=merge: unexpected auxillary files." fi if [[ -e $target && -e $target.r$rev_new && $target.r$rev_old && $target.mine ]] then $SUCCESS "conflict=merge producing conflict has auxillary files." else ls -dla $target* $ERROR "conflict=merge: expected auxillary files missing." fi if $BINdflt st -v $target | grep "^....x. " then $SUCCESS "conflict marker set." else $ERROR "No conflict on mis-merge?" fi if $BINdflt ci $target -m conflict then $ERROR "shouldn't commit a conflicted file!" else $SUCCESS "doesn't commit a conflicted file" fi # resolve should leave only a single file, and process only known. $BINq resolve * if [[ `ls -dla $target* | wc -l` -eq 1 ]] then $SUCCESS "aux files removed after revert (2)." else ls -dla $target* $ERROR "unexpected auxillary files after revert (2)." fi # After resolve only $target should be seen - not any other files. # With just "status" we'd get a line for ".", too - which we don't want. $BINdflt st * > $logfile if [[ `grep '^N' < $logfile | wc -l` -ne 7 ]] then cat $logfile $ERROR "resolve takes unknown files, too" fi if [[ `grep -v '^N' < $logfile` == ".mC."*"$target" ]] then $SUCCESS "resolve takes only the known files." else cat $logfile $ERROR "Wrong status after resolve" fi # Look whether the merged file has a NOW timestamp - ie. newer than the # marked file if [[ $target -ot time_mark_file ]] then ls -la --full-time $target time_mark_file $ERROR "Timestamp of merged file wrong" else $SUCCESS "merged file has mtime NOW" fi if $BINdflt ci $target -m 1 then $SUCCESS "Can commit after revert." else $ERROR "Can't commit?" fi fsvs-1.2.6/tests/034_status0000755000202400020240000001034611345572336014465 0ustar marekmarek#!/bin/bash set -e $INCLUDE_FUNCS logfile=$LOGDIR/034.status file=empty-file function Filter { filt=$1 exp=$2 $BINdflt st -C -f $filt $file > $logfile if [[ `wc -l < $logfile` -ne $exp ]] then cat $logfile $ERROR "Status output wrong - filter $filt, expected $exp." fi } function FiltMTOGNDA { Filter meta $1 Filter text $2 Filter owner $3 Filter group $4 Filter new $5 Filter deleted $6 Filter any $7 $SUCCESS "Trial run $1$2$3$4$5$6$7 ok." } export FSVS_DIR_SORT # Keep the dir_sort option at the default *after* the loop; that's # necessary for tests afterwards. for FSVS_DIR_SORT in yes no do $INFO "Using dir_sort=$FSVS_DIR_SORT." $PREPARE_DEFAULT > /dev/null cd $WC # without any change? FiltMTOGNDA 0 0 0 0 0 0 0 # meta-data change. touch -t 200101270007 $file FiltMTOGNDA 1 0 0 0 0 0 1 # set as known state. $BINdflt ci -m 1 # text change, meta-data same echo aiikortv > $file touch -t 200101270007 $file FiltMTOGNDA 0 1 0 0 0 0 1 # text and meta-data change echo adehlnor > $file touch -t 200210291240 $file FiltMTOGNDA 1 1 0 0 0 0 1 # deleted rm $file FiltMTOGNDA 0 1 0 0 0 1 1 # replaced mkdir $file FiltMTOGNDA 1 1 0 0 1 1 1 $INFO "Testing removed directories." mkdir -p a/b/c/d a/b/c/e a/b/d a/h/u a/h/j ( cd a/h ; touch -d yesterday some files in dir ) $BINq ci -m 2 if [[ `$BINdflt st -o verbose=none,status a -N -N` == 'N... ' ]] then $ERROR "Hierarchy a not committed" fi rmdir a/b/c/d a/b/c/e a/b/c a/h/u $BINdflt st -C -o filter=deleted > $logfile if [[ `wc -l < $logfile` -ne 4 || `grep -w dir < $logfile | wc -l` -ne 4 || `grep a/ < $logfile | wc -l` -ne 4 ]] then cat $logfile $ERROR "Status output wrong (deleted directories #1)" fi # The parent directories are changed, and that gets counted, too. $BINdflt st -C -o filter=text > $logfile if [[ `wc -l < $logfile` -ne 6 || `grep a/ < $logfile | wc -l` -ne 6 ]] then cat $logfile $ERROR "Status output wrong (deleted directories #2)" fi date > a/h/some date > a/h/dir $BINdflt st -C > $logfile if [[ `wc -l < $logfile` -ne 8 || `grep a/ < $logfile | wc -l` -ne 8 ]] then cat $logfile $ERROR "Status output wrong (deleted directories #3)" fi done $SUCCESS "Ok, filter works." # set as known state. $BINq ci -m 2 -o delay=yes $INFO "Testing dir_exclude_mtime" mkdir -p test1 $BINdflt ci -m 3 # wait some time after checkin sleep 2 # change mtime of parent folder touch test1/test2 # remove fake temp. file rm test1/test2 # test option $BINdflt st -odir_exclude_mtime=true > $logfile if [[ `wc -l < $logfile` -gt 0 ]] then echo "logfile contains entries" cat $logfile $ERROR "dir_exclude_mtime option does not work" fi if ! $BINdflt st -ostop_change=true -odir_exclude_mtime=true then $ERROR "stop_change returns true in conjunction with dir_exclude_mtime" fi $SUCCESS "dir_exclude checks ok." $PREPARE_DEFAULT > /dev/null cd $WC $INFO "Testing color output." function HasEscape { # I cannot make grep and egrep understand \x1b. if $BINdflt st -o stat_color=yes | perl -e 'exit (0 == grep(/\x1b\[0;0m/, ))' then $SUCCESS "$1 colorized" else $ERROR "$1 not colorized" fi } echo aaa > hazgr HasEscape "New" $BINq ci -m1 echo aaar > hazgr HasEscape "Changed" $BINq ci -m1 rm hazgr HasEscape "Deleted" # Butterfly test. # This checks the assertion BUG_ON(entry already output) for various inode # number sortings. $INFO "Checking for inode dependencies." cd .. $PREPARE_CLEAN > /dev/null cd $WC1 perl -e 'for $i (0 .. 15) { $b = (($i & 5) << 1) | (($i & 0xa) >> 1); $a = (($b & 3) << 2) | (($b & 0xc) >> 2); for $c ( $a, $a ^ 0x1f) { $f= $c <= 9 ? $c : chr($c + 64+32+1-10); mkdir($f) || die "mkdir $f: $!"; open(F, "> $f/file-$f") || die "touch file-$a: $!"; } }' # Now move the directories into one another. perl -e ' while (1) { ($g, $l)=reverse sort grep(/^\w$/, ); last if !defined($l); rename($g, $l . "/" . $g) || die "rename($g, $l): $!"; } ' $BINdflt st > $logfile $BINq ci -m1 for a in z w r q k f d c 9 6 do find . -name file-$a -exec rm {} \; $BINdflt st > $logfile done $INFO "Files ok." for a in q k f d c 9 6 do find . -depth -name $a -exec rm -r {} \; $BINdflt st > $logfile done $SUCCESS "Status seems to work regardless of the inode numbering." fsvs-1.2.6/tests/003_change_type0000755000202400020240000000545611144477035015427 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/003.change_types if [[ $UID -eq 0 ]] then ONLY_ROOT= else ONLY_ROOT=ignore_func function ignore_func() { true } fi if test -d typechange then rm -r typechange fi mkdir typechange pushd typechange > /dev/null for i in file device symlink dir missing fifo do echo file $RANDOM > file-$i $ONLY_ROOT cp -a /dev/zero device-$i ln -s file-$i symlink-$i mkdir dir-$i echo sub $RANDOM > dir-$i/sub-entry mkdir dir-$i/sub echo $RANDOM > dir-$i/sub/sub-entry mkfifo fifo-$i done popd > /dev/null $BINq ci -m "inserted types" -o delay=yes > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` $INFO "initial checkin is r$rev" # now goto other wc and update pushd $WC2 > /dev/null $BINq up $BINdflt st > $logfile $COMPARE -x change/fifo- $WC/ $WC2/ popd > /dev/null rm -r typechange/* pushd typechange > /dev/null for i in file device symlink dir missing fifo do echo file $RANDOM > $i-file $ONLY_ROOT cp -a /dev/zero $i-device ln -s $i-file $i-symlink mkdir $i-dir echo sub $RANDOM > $i-dir/sub-entry mkdir $i-dir/sub echo $RANDOM > $i-dir/sub/sub-entry mkfifo $i-fifo done popd > /dev/null $BINq ci -m "changed types" -o delay=yes > $logfile if [[ `$BINdflt st -C -C | wc -l` -ne 0 ]] then $BINdflt st -C -C $ERROR "Entries left out of commit" fi $INFO "typechange done, running update" $BINdflt up $WC2 > $logfile $INFO "update done" $COMPARE -x "change/.*-fifo" $WC/ $WC2/ $SUCCESS "all types changed to other types." export FSVS_WARNING="mixed-rev-wc=ignore" $BINdflt diff -v -v -r$rev -R > $logfile # Now we change WC1 by using revert, and WC2 via update. $INFO "Using r$rev as compare point" $BINq up -r$rev $WC2 > $logfile $BINq revert -r$rev -R -R . > $logfile $SUCCESS "Revert works across type-changes too." # WC1 still has FIFOs left $COMPARE -x "change/.*-fifo" $WC/ $WC2/ $SUCCESS "Revert across type-changes ok" for wc in "$WC1" "$WC1 -rHEAD" "$WC2 -rHEAD" do $INFO "Diff on $wc" LANG=C $BINdflt -v -v diff -rHEAD -R > $logfile # The specific number depends on whether devices are done etc. # But at least we know that FSVS doesn't crash. if [[ `wc -l < $logfile` -lt 25 ]] then $ERROR "Diff over type-changes wrong?" fi # A single thing is checked, though. if perl -e 'undef $/; $srch=; $_=<>; exit !/$srch/;' $logfile <<'EOF' --- typechange/symlink-symlink\s+Rev. \d+ .* \+\+\+ typechange/symlink-symlink\s+Local version .* @@ -1 \+1 @@ -link symlink-file( . No newline at end of file)? \+link file-symlink( . No newline at end of file)? EOF then $SUCCESS "Diff with type-changes ok" else cat $logfile $ERROR "Didn't find expected chunk in diff" fi done cd $WC2 $BINdflt diff -rHEAD > $logfile $BINq up $BINdflt diff -r$rev > $logfile fsvs-1.2.6/tests/035_sorted_output0000755000202400020240000000120711073666644016063 0ustar marekmarek#!/bin/bash set -e $INCLUDE_FUNCS $PREPARE_DEFAULT > /dev/null cd $WC logfile=$LOGDIR/035.sorted_output funsort=$logfile.unsort fsort=$logfile.sort $INFO "Testing sorting" # We try to create entries with a lower inode number than their parent. touch z a y b x c w mkdir H mv z a y b x c w H for parm in "" "-v" do $BINdflt st > $funsort $BINdflt st -o dir_sort=yes > $fsort if cmp -s $funsort $fsort then $WARN "Sorted equals unsorted?" fi if sort -k3 $funsort | cmp -s - $fsort then echo "Sorting ok" else sort -k3 $funsort | diff -u - $fsort $ERROR "Didn't sort (cmdline='$parm')" fi done $SUCCESS "Sorting works." fsvs-1.2.6/tests/068_symlinks0000755000202400020240000000152211202211107014770 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/068.symlinks mkdir dirA dirB date > dirA/entryA echo A > dirA/file date > dirB/entryB echo BB > dirB/file for dest in A B do $INFO "Destination of links now $dest" ln -s dir$dest sl ln -s $WC/dir$dest sllong $BINq ci -m1 echo $RANDOM >> dir$dest/entry$dest $BINdflt st for lnk in sl sllong do $INFO "going by link $lnk" for p in "" "/." "/././////." "/*" "/entry$dest" "////entry$dest" do $BINdflt st $lnk/$p > $logfile if [[ `wc -l < $logfile` -ne 1 ]] then cat $logfile $ERROR "Wrong number of lines, wanted exactly one line." fi if grep "/entry$dest"'$' $logfile then $SUCCESS "Correct result in $lnk/$p" else cat $logfile $ERROR "Wrong result for $lnk/$p" fi done done rm sl sllong done fsvs-1.2.6/tests/049_invalid_props0000755000202400020240000000375712152036214016014 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN WC_COUNT=3 > /dev/null $INCLUDE_FUNCS cd $WC filename=asd logfile=$LOGDIR/049.iprop svndir=$WC2/f svn co $REPURL $svndir svnfile=$svndir/$filename function SetProp { test -f $svnfile && rm $svnfile svn ci -m2 $svndir echo $RANDOM > $svnfile # Grmpf ... svn: warning: W150002: '...asd' is already under version control svn add $svnfile 2> /dev/null || true test -n "$1" && svn ps fsvs:owner "$1" $svnfile test -n "$2" && svn ps fsvs:group "$2" $svnfile test -n "$3" && svn ps fsvs:text-time "$3" $svnfile test -n "$4" && svn ps fsvs:unix-mode "$4" $svnfile svn ci -m2 $svndir } SetProp $BINdflt up # Currently *no* fsvs-properties are set ... so defaults should apply. ls -la $filename > $logfile if grep -- '-rw-------' < $logfile then $SUCCESS "Unset mode gives 0600." else cat $logfile $ERROR "Mode should default to 0600" fi # We allow a few seconds off, in case the system is very busy. if perl -e 'exit 1 if -M shift() > (3/86400.0)' $filename then $SUCCESS "Timestamp is current" else date ls -la $filename $ERROR "Timestamp is wrong" fi SetProp "x" "x" "x" "x" $BINdflt up if perl -e 'exit 1 if -M shift() > (10/86400.0)' $filename then $SUCCESS "Invalid timestamps get converted to now()" else $ERROR "Invalid timestamps are wrong" fi if [[ $UID -eq 0 ]] then SetProp "x" "x" "" "" $BINdflt up # ls -la $filename SetProp "999 a" "999 a" "" "" $BINdflt up # ls -la $filename $SUCCESS "arbitrary owner/group ok." else $WARN "UID 0 needed to get arbitrary owner/group." fi svn pd fsvs:owner $svnfile svn pd fsvs:group $svnfile svn pd fsvs:text-time $svnfile svn pd fsvs:unix-mode $svnfile svn ci -m3 $svndir $BINdflt diff -r HEAD rev=`$BINdflt up | grep "revision " | tail -1 | cut -f2 -d" " | cut -f1 -d"."` for ro in `seq 3 $rev` do $BINdflt up -r $ro > /dev/null echo -n "Diff $ro: " for r in `seq 3 $rev` do echo -n " $r" $BINdflt diff -r $r > /dev/null done echo "." done $SUCCESS "All diff revision combinations ok." fsvs-1.2.6/tests/012_export0000755000202400020240000000077011212143015014434 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS # export to new directory EXPDIR=$TESTBASE/export if [[ -e $EXPDIR ]] then rm -r $EXPDIR fi dir_path=`$PATH2SPOOL $EXPDIR dir` if [[ -f $dir_path ]] then rm $dir_path fi mkdir $EXPDIR cd $EXPDIR $BINq export $REPURL if [[ -f $dir_path ]] then $ERROR "$dir_path was made" fi $COMPAREWITH $WC cd .. rm -rf $EXPDIR mkdir $EXPDIR cd $EXPDIR $BINq export -r 3 $REPURL $BINq up -r 3 $WC $COMPAREWITH $WC $SUCCESS "export works." fsvs-1.2.6/tests/050_path_resolution0000755000202400020240000000122510755223237016350 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC touch -dyesterday empty-file tree/b/2/file-x tree/a/new if [[ `$BINdflt st | wc -l` -ne 4 ]] then $BINdflt st $ERROR "Not ok for 1" fi if [[ `$BINdflt st . | wc -l` -ne 4 ]] then $BINdflt st . $ERROR "Not ok for 2" fi cd tree if [[ `$BINdflt st | wc -l` -ne 3 ]] then $BINdflt st $ERROR "Not ok for 3" fi if [[ `$BINdflt st . | wc -l` -ne 3 ]] then $BINdflt st . $ERROR "Not ok for 4" fi if [[ `$BINdflt st a | wc -l` -ne 2 ]] then $BINdflt st a $ERROR "Not ok for 5" fi if [[ `$BINdflt st b | wc -l` -ne 1 ]] then $BINdflt st b $ERROR "Not ok for 6" fi $SUCCESS "ok" fsvs-1.2.6/tests/046_copyfrom-detect-20000755000202400020240000000344110755223237016403 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS logfile=$LOGDIR/046.detect cd $WC for a in `seq 1 50` do p=dir-$a/dir1 mkdir -p $p echo $WC > $p/wcpath echo $a > $p/other-$a done $BINq ci -m 1 -o delay=yes mkdir dir2 echo $WC2 > dir2/wcpath $BINdflt copyfrom-detect > $logfile if [[ `wc -l < $logfile` -ne 33 ]] then $ERROR "Wrong number of output lines" fi if [[ `grep name: $logfile | wc -l` -ne 31 ]] then $ERROR "Filename didn't match?" fi if egrep -q '^ \.\.\.$' $logfile then $SUCCESS "'Too many matches' found" else $ERROR "'too many' indication missing" fi $PREPARE_DEFAULT > /dev/null cd $WC cp -r tree tree-2 cp -r tree/a tree-a cp -r tree/a/1 tree-a-1 $BINdflt copyfrom-detect -v > $logfile function MatchBlock { dest=$1 shift if not_found=`perl -e ' $dest=shift; while () { s#\s+$##; $inblk=1,next if $_ eq $dest; last if $inblk && m#^\S#; s#^\s+##,push(@block,$_) if $inblk; } for $q (grep(/\w/,@ARGV)) { if (!grep($q eq $_, @block)) { print $q,"\n"; exit 1; } } exit 0; ' "$dest" "$@" < $logfile` then $SUCCESS "$dest matched" else $ERROR "$dest didn't have '$not_found'" fi } MatchBlock tree-2 "dirlist=100.0%:tree" MatchBlock tree-2/c "name,dirlist=100.0%:tree/c" "dirlist=100.0%:tree/a" "dirlist=100.0%:tree/b" MatchBlock tree-2/c/3 "name,dirlist=100.0%:tree/a/3" "name,dirlist=100.0%:tree/b/3" "name,dirlist=100.0%:tree/c/3" "dirlist=100.0%:tree/b/1" "dirlist=100.0%:tree/a/1" MatchBlock tree-2/a "name,dirlist=100.0%:tree/a" "dirlist=100.0%:tree/c" "dirlist=100.0%:tree/b" MatchBlock tree-a "dirlist=100.0%:tree/a" "dirlist=100.0%:tree/b" "dirlist=100.0%:tree/c" MatchBlock tree-a-1 "dirlist=100.0%:tree/a/1" "dirlist=100.0%:tree/a/2" "dirlist=100.0%:tree/a/3" "dirlist=100.0%:tree/b/1" "dirlist=100.0%:tree/c/1" fsvs-1.2.6/tests/054_sync_revert0000755000202400020240000000023111023311767015467 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC $BINq up rm -rf * $BINq sync-repos $BINq revert -R -R . $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/028_unittests0000755000202400020240000000150211202211074015156 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC log=$LOGDIR/028.units if [[ "$opt_DEBUG" != "1" ]] then # This test will run in any case via ext-tests. $WARN "Cannot do unit tests without --enable-debug." exit 0 fi for path in `pwd`/empty-file tree/a/1/file-z tree/../tree/b/2/./file-y ././/tree/././c/3/.././//2/.././../../dir-with-perms/../tree/c/./3/file-x do $BINdflt -d -D hlp__pathcopy st $path > $log # There might be many paths build; take only the last (via tail). p=`grep finished < $log | tail -1 | cut -f6 -d" "` rl=`readlink -f $path` if [[ "$rl" != "$p" ]] then $ERROR_NB "fsvs and readlink disagree:" $ERROR_NB " readlink: $rl" $ERROR " fsvs: $p" fi $INFO "Reduced to $p" done # > $log 2>&1 $SUCCESS "Unit tests successful." # vi: textwidth=0 formatoptions= fsvs-1.2.6/tests/043_copyfrom-detect0000755000202400020240000000713510755223237016245 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC data=Testdata-$$ fn=file fn1=$fn-1 fn2=$fn-2 log=$LOGDIR/043.cp_mv echo $data > $fn1-a $BINq ci -m a -o delay=yes mkdir d1 echo $RANDOM > $fn1-b $BINdflt copyfrom-detect > $log if [[ `cat $log` != "No copyfrom relations found." ]] then $ERROR "Reported a completely unrelated entry" fi $BINdflt -v copyfrom-detect $fn1-b > $log if [[ `grep $fn1-b $log` == "- No copyfrom relation found for $fn1-b" ]] then $SUCCESS "Verbose copyfrom-detect reports none found." else $ERROR "Message 'not found' missing" fi cat $fn1-a > $fn1-b $BINdflt copyfrom-detect > $log if [[ $(echo `cat $log`) == "$fn1-b md5:$fn1-a" ]] then $SUCCESS "Simple copy found (md5)." else cat $log $ERROR "md5 check not passed" fi $BINdflt copyfrom-detect -o copyfrom_exp=no > $log if [[ `cat $log` == "No copyfrom relations found." ]] then $SUCCESS "Can avoid expensive checks." else cat $log $ERROR "md5 always done?" fi rm $fn1-b echo x$data > $fn2-a $BINq ci -m a -o delay=yes cat $fn1-a > $fn1-b cat $fn2-a > d1/$fn2-b mkdir d2 cat $fn2-a > d2/$fn2-c $BINdflt copyfrom-detect > $log # We have to sort, as the order is not defined. if [[ $(echo `sort $log`) == "d1/$fn2-b d2/$fn2-c $fn1-b md5:$fn1-a md5:$fn2-a md5:$fn2-a" ]] then $SUCCESS "Pairwise copies found (md5)." else cat $log $ERROR "md5 didn't find pairwise copies" fi # Get a distinct file for next test, and save the wc date > $fn1-22 mkdir di1 $BINq ci -m a -o delay=yes # TODO: check that copyfrom was used # Re-use the inode number. mv $fn1-22 $fn1-23 # A simple move would find the identical MD5, so change the data. echo XX >> $fn1-23 mv di1 di2 # A file and a directory may not be associated. # But how could we (reliable) get a directory with the same inode# as a # (just) removed file? The only way I see is using some hex-editor! # TODO $BINdflt copyfrom-detect > $log if [[ $(echo `sort < $log`) == "di2 file-1-23 inode:di1 inode:file-1-22" ]] then $SUCCESS "Moved, changed file found." else cat $log $ERROR "inode check didn't work." fi # Get some unique value echo $RANDOM-$RANDOM-$RANDOM-$RANDOM > $fn1-a $BINq ci -m a -o delay=yes # TODO: check that copyfrom was used # check that files not given as arguments can be a source, too. # files *not* listed should not be found. function arg_check { num=$1 arg=$2 src=${3:-$fn1-a} dst=${4:-di2/asf} msg=${5:-Argument usage check ($1)} $BINdflt copyfrom-detect $arg > $log if [[ $(echo `sort < $log`) == "$dst md5:$src" ]] then $SUCCESS "$msg" else echo "--- Expected:" echo "$dst" echo " md5:$src" echo "--- Got:" cat $log $ERROR "$msg" fi } # The names of the directory and the file must be alphabetically before # "md5", so that the sort check above works. cat $fn1-a > di2/asf cat $fn1-a > ignoreme arg_check 1 di2 "$WC/$fn1-a" arg_check 2 di2/asf "$WC/$fn1-a" arg_check 3 di2/. "$WC/$fn1-a" "di2/./asf" cd di2 arg_check 4 . "$WC/$fn1-a" "./asf" arg_check 5 "" "$WC/$fn1-a" "asf" cd .. cd $WC # Test that a copied entry has the correct md5 set, if it gets changed # before commit. $BINq cp $fn1-a di2/asf date >> di2/asf echo $RANDOM $$ >> di2/asf $BINq ci -m "copy" -o delay=yes # -d > $LOGDIR/043.ci cat di2/asf > 6uhtr arg_check 6 6uhtr "$WC/di2/asf" 6uhtr "Committing copied with change" # Test for MD5 set on a straight copy - no changes transmitted. # Make "source" file unique echo $RANDOM $$ >> 6uhtr $BINdflt cp di2/asf 6uhtr $BINq ci -m "copy" -o delay=yes # -d > $LOGDIR/043.ci cat 6uhtr > bthtrgb arg_check 7 bthtrgb "$WC/6uhtr" bthtrgb "Committing copied without change" $BINq cp 6uhtr bthtrgb fsvs-1.2.6/tests/036_status_non-recursive0000755000202400020240000000520311100577774017342 0ustar marekmarek#!/bin/bash set -e $INCLUDE_FUNCS $PREPARE_DEFAULT > /dev/null cd $WC logfile=$LOGDIR/036.status_non-recursive # Check for -N on deleted hierarchies # Set some known timestamp touch -d "2008-02-01 12:13" . $BINq ci -m1 -o delay=yes function ExpLines { parms="$1" exp_cnt="$2" $BINdflt st $parms > $logfile if [[ `wc -l < $logfile` -eq $exp_cnt ]] then $SUCCESS "found $exp_cnt for '$parms'" else $ERROR_NB "Got for $BINdflt st $parms:" cat $logfile $ERROR "expected $exp_cnt lines." fi } ExpLines "-C" 0 rm -r tree touch -d "2008-02-01 12:13" . # With -N -N, no children are looked at. ExpLines "-N -N -C" 0 # If we don't pass -C, the timestamp is looked at, and if it's still the # same no check is done. ExpLines "-N -N" 0 # We have to touch the directory; even with -C no children are seen. touch . #strace $BINdflt -N -N -C big_file > $logfile #false ExpLines "-N -N -C" 1 ExpLines "-N -N" 1 ExpLines "-N" 2 ExpLines "" 41 if $BINdflt -o stop_change=true status then $ERROR "Expected an error code - 1" fi $BINq ci -m 1 if $BINdflt -o stop_change=true status then $SUCCESS "No error code without change." else $ERROR "Expected no error code - 1" fi touch empty-file if $BINdflt -o stop_change=true status then $ERROR "Expected an error code - 2" fi if $BINdflt -o stop_change=true -f text status then $SUCCESS "Filtering for changes, stopping ok" else $ERROR "Expected no error code - 2" fi # Check change detection options; use a known timestamp, and create them # simultaneously because of the ctime. touch -d "2008-07-03 1:3" . big_file big_2 big_3 $BINq ci -m 1 big_file $INFO "1) change file, same mtime, incl. size." echo bla > big_2 touch -d "2008-07-03 1:3" big_2 mv big_2 big_file ExpLines "big_file" 1 ExpLines "-o change_check=file_mtime big_file" 1 ExpLines "-o change_check=allfiles big_file" 1 ExpLines "-o change_check=dir big_file" 1 ExpLines "-C big_file" 1 # Delay so that the ctime is different. $BINq ci -m 1 -o delay=yes $INFO "2) change file, same mtime, same size, different ctime." echo blu > big_3 touch -d "2008-07-03 1:3" big_3 mv big_3 big_file ExpLines "-o change_check=allfiles big_file" 1 ExpLines "-o change_check=file_mtime big_file" 1 ExpLines "-C -C big_file" 1 # Another test for -C is in 013_manber, where (because the directory gets # renamed) even the ctime is the same. $BINq ci -m 1 -o change_check=full $INFO "3) new entry" # . is shown as changed, as the check flag is set. echo blu > new-file-now touch -d "2008-07-03 1:3" . ExpLines "-N -N" 1 ExpLines "-o change_check=file_mtime" 2 ExpLines "-o change_check=allfiles" 2 ExpLines "-o change_check=dir" 2 ExpLines "-C" 2 ExpLines "-C -C" 2 fsvs-1.2.6/tests/064_url_readonly0000755000202400020240000000120011206705233015617 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/064.url_readonly echo $$ > $$ for ro in readonly readonly:1 ro ro:1 do echo $ro,$REPURL | $BINdflt urls load # Allow an error code. $BINq ci -mBUG || true if $BINdflt log -rHEAD | grep BUG then $ERROR "readonly flag '$ro' did commit" fi done # As the URL is the same, it should work with a simple change, too. $BINdflt urls ro:0,$REPURL if [[ `$BINdflt urls dump | wc -l` -ne 1 ]] then $ERROR "url change doesn't work" fi if $BINq ci -m1 then $SUCCESS "readonly flag with value=0 taken" else $ERROR "Expected commit with readonly=0" fi fsvs-1.2.6/tests/063_chroot0000755000202400020240000000135511111207453014424 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/063.chroot if [[ $UID -eq 0 ]] then export FSVS_CHROOT_CWD=17 export FSVS_CHROOT_ROOT=84 export FSVS_CHROOT_LIBS=m mkdir dir echo gurgle > empty-file date > dir/new-chroot # The shell doesn't do $X $logfile $FSVS_CHROOT_CWD /dev/null $INCLUDE_FUNCS cd $WC DIR=dir IGN=ign PRE=take_ file1=$DIR/${PRE}file1 file2=$DIR/${PRE}something_different file_i=$DIR/${IGN}_file logfile=$LOGDIR/016.logfile mkdir $DIR for a in $file1 $file_i $file2 do echo Sample data for $a: $$ > $a done $BINq ignore './**' # Maybe we get a ., possibly a m ... # Depends on timing of directory/inode changes (eg. same/different second) if [[ `$BINdflt st | wc -l` -le 1 ]] then $SUCCESS "all ignored" else $BINdflt st $ERROR "Not all entries ignored?" fi # The prepare_clean does a commit. # Remove the filelist, to test with a clean state, too. p=`$PATH2SPOOL . dir` # For better security if [[ "$p" == "" ]] then $ERROR "PATH2SPOOL doesn't work!" fi mv "$p" "$p.bak" $BINq add $file1 # Then move back, and try again. # The added file is un-added by this operation, so add it again. mv "$p.bak" "$p" # We cannot do a commit, because that would make the added file permanent. $BINq add $file1 ( cd $DIR && $BINq add $WC/$file2 ) # The directory is marked as added, too. $BINdflt st | egrep "^n[m.]\.\." > $logfile if [[ `wc -l < $logfile` -eq 3 ]] then $SUCCESS "3 added" else cat $logfile $ERROR "not added?" fi $BINdflt ci -m "add" > $logfile if [[ `egrep "^n\.\.\." < $logfile | grep $PRE | wc -l` -eq 2 ]] then $SUCCESS "2 committed" else cat $logfile $ERROR "adds not committed!" fi if [[ `svn ls $REPURL/$DIR | grep $PRE | wc -l` -eq 2 ]] then $SUCCESS "Files exist in repository." else $ERROR "Files not in repository!" fi if svn cat $REPURL/$file1 | cmp - $file1 then $SUCCESS "Filedata correctly sent to repository." else $ERROR "Filedata not in repository!" fi $BINq unversion $file2 > $logfile if [[ `$BINdflt st | egrep "^d\.\.\." | wc -l` -eq 1 ]] then $SUCCESS "unversioned" else $ERROR "not marked for unversioning?" fi # undo unversion $BINq add $file2 > $logfile if [[ `$BINdflt st | wc -l` -eq 0 ]] then $SUCCESS "add reverts unversioned" else $ERROR "Adding an unversioned doesn't work" fi # redo unversion $BINq unversion $file2 > $logfile if [[ `$BINdflt ci -m "unversion" | tee $logfile | egrep "^d\.\.\." | wc -l` -eq 1 ]] then $SUCCESS "unversioned committed" else $ERROR "unversioned entries not committed!" fi if [[ -e $file2 ]] then $SUCCESS "file still exists" else $ERROR "file was removed!" fi if svn ls $REPURL/$file2 > $logfile 2>&1 then $ERROR "file still exists in the repository!" else $SUCCESS "file was removed from repository." fi # try committing an added but non-existing file rm $file2 if $BINdflt add $file2 2>&1 > $logfile 2>&1 then $ERROR "Non-existing file added!" else $SUCCESS "Adding a non-existing entry stops." fi # add entry again touch $file2 $BINq add $file2 rm $file2 # We should always get an error, because the entry does not exist. if $BINdflt ci -m "non-exist, add" >> $logfile 2>&1 then $ERROR "commit sent a non-existing entry!" else $SUCCESS "commit stopped on missing entry." fi if [[ `$BINdflt st | tee $logfile | grep $file2` == "n..!"* ]] then $SUCCESS "status shows missing entry" else $ERROR "status corrupted!" fi $BINdflt add $file2 > $logfile if [[ `grep $file2 < $logfile` != "n..!"* ]] then $ERROR "add of non-existing gives wrong status" else $SUCCESS "non-existing entries can be ignored" fi if [[ `$BINdflt unversion $file2 | tee $logfile | grep $file2` != "d..!"* ]] then $ERROR "unversioning a missing entry shows wrong status!" else $SUCCESS "non-existing entries can be unversioned" fi if $BINdflt -W entry-not-found=stop unversion $file2 > $logfile 2>&1 then $ERROR "entry mistakenly believed to exist" else $SUCCESS "entry not here." fi # Here a WC2_UP_ST_COMPARE would fail, as the ignored and the unversioned files # won't get to the other wc. # We don't bother with them here. The next test should reinstate # the wc as needed. $BINq ci -m2 mkdir lolcat $BINdflt add lolcat rmdir lolcat if [[ `$BINdflt st | grep lolcat | wc -l` -eq 1 ]] then $SUCCESS "added directory shown after rmdir." else $ERROR "added directory not shown after rmdir." fi $BINdflt info lolcat | wc -l if [[ `$BINdflt info lolcat | wc -l` -eq 16 ]] then $SUCCESS "added directory info ok." else $ERROR "added directory after rmdir has wrong info output." fi # Try adding more than one file touch h5 h6 h7 $BINq add -u url h5 h6 h7 list=`$BINdflt st -o verbose=none,status,path h? | sort` # echo to normalize the whitespace if [[ `echo $list` == "n... h5 n... h6 n... h7" ]] then $SUCCESS "Multiple entries added." else echo $list $ERROR "Didn't add more than one entry?" fi if $BINdflt unversion . > $logfile 2>&1 then $ERROR "Expected error message for unversion wc root" fi if [[ `wc -l < $logfile` -ne 1 ]] then $ERROR "Too many lines printed for unversion wc root" fi if ! grep "working copy root" $logfile then $ERROR "No nice error printed for unversion wc root" fi $SUCCESS "unversion wc root ok" fsvs-1.2.6/tests/025_commit_msg0000755000202400020240000000103311146471102015255 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC1 logfile=$LOGDIR/025.commit_msg mkdir abc touch 123 $BINq ci -m1 cd abc date > uzt if $BINq ci -F uzt then $SUCCESS "Messagefile found." else $ERROR "Messagefile in non-WC-base directory not taken." fi cd $WC1 touch jj if $BINq ci -F jj -o empty_message=no then $ERROR "empty message taken" else $SUCCESS "empty message not taken" fi if $BINq ci -F jj -o empty_message=yes then $SUCCESS "empty message allowed" else $ERROR "empty message not allowed" fi fsvs-1.2.6/tests/038_multiurl_use0000755000202400020240000002006611776757175015715 0ustar marekmarek#!/bin/bash # This test tries to simulate a master/local URL relationship, # ie. that there is a "master" URL (to be updated from) and a "local" URL # which tracks local modifications. # # The "master" URL is set with a higher priority, so that it takes # precedence (binaries, and basic system); the local URL is just for # tracking the (machine-specific) local changes. # # We use the working copies as follows: # 1 for the master data (/trunk in the repository) WC_MASTER=${WCBASE}1 REP_MASTER=$REPURL # 2 for the local#1 data (/machineA) WC_LOCAL1=${WCBASE}2 # 3 for the local#2 data (/machineB) WC_LOCAL2=${WCBASE}3 # 4 as working copy for machine1 WC_MACHINE1=${WCBASE}4 # 5 as working copy for machine2 WC_MACHINE2=${WCBASE}5 # 6 as the base directory for the machine-specific repositories REPD_1=${WCBASE}6/m1 REPD_2=${WCBASE}6/m2 REP_1=file://$REPD_1/trunk REP_2=file://$REPD_2/trunk # # So we need 5 working copies. NUM_WC=6 # # We take care to commit to a directory that doesn't yet exist in the # "local" repository. # PS: is there an easier way for variable indirection? logfile=$LOGDIR/038.multi-url set -e $PREPARE_CLEAN WC_COUNT=$NUM_WC > /dev/null $INCLUDE_FUNCS file1=conf1 file2=etc/conf2 #################################################### ##### Prepare Master-data #################################################### cd $WC_MASTER mkdir bin echo got ya > bin/ls echo something > root-file echo is_base > is_base mkdir both echo master_of_desaster > both/master $BINq urls $REP_MASTER $BINq ci -m "master" #################################################### ##### Prepare machine-local data. #################################################### function makeLocal { which=$1 _rep="REP_$which" _rep=${!_rep} _repd="REPD_$which" _repd=${!_repd} # Create repository for machine svnadmin create $_repd svn mkdir "$_rep" -m "mkdir loc-$which" # Local data wc _loc="WC_LOCAL$which" cd "${!_loc}" # We have to load, as the (wrong) /trunk was already set by prepare echo "$_rep" | $BINq urls load # An empty revision 2 is needed. (1 is the mkdir above) $BINq ci -m empty -d > $logfile #> /dev/null echo "Set URL $_rep for ${!_loc}" echo "$which here" > is_$which $BINq ci -m here > /dev/null echo "is $which" > is_$which $BINq ci -m is > /dev/null # Make some empty revisions, so that the revision numbers surely disagree for a in a `seq 1 $(($RANDOM % 14))` do $BINq ci -m $a > /dev/null done mkdir both echo $which > both/m-$which $BINq ci -m b # Goto machine-WC _mac="WC_MACHINE$which" cd "${!_mac}" # Set URLs with priorities echo "N:base,P:10,$REP_MASTER" | $BINq urls load $BINq urls "N:local,P:20,$_rep" echo "set N:base,P:10,$REP_MASTER and N:local,P:20,$_rep for ${!_mac}" # Set commit-URL in config conf=`$BINdflt info | grep Conf-Path | cut -f2` if [[ "$conf" == "" ]] then $ERROR "Can't get Conf-Path for $which" fi echo "Got $conf as config path" echo commit-to=local > $conf/config } makeLocal 1 makeLocal 2 #################################################### ##### Set data #################################################### function Set { which=$1 num=$2 where=$3 delay=$4 _p=$where$which cd "${!_p}" # make and commit data. echo $which$num > $file1 test -d etc || mkdir etc echo $which$num > $file2 # We generate a unix-mode here; maybe we should just take a random number? perl -e '($fn, $data, $nr)=@ARGV; chmod( 0600+unpack("C*",$data)-97+$nr, $fn ) || die $!' $file2 $num $which ls -la $file2 $BINq ci -m "change local $which - $num" $delay echo ":: Set $which to $num" } Set 1 a WC_LOCAL Set 2 a WC_LOCAL -odelay=yes #################################################### ##### Verify 1 #################################################### function Verify { which=$1 num=$2 where=$3 _p=$where$which update_parm=$4 cd "${!_p}" # get data $BINq up $update_parm # verify data if [[ x`cat $file1`x`cat $file2`x == x${which}${num}x${which}${num}x ]] then $SUCCESS "Working copy local-$which correctly updated ($num)" else $ERROR_NB "Wrong data in $file2|$file2 ($num)" $ERROR "In "`pwd`", expected '$which$num'." fi # verify mode ls -la $file2 if ! $BINdflt info $file2 | egrep "Status:.*0x0 .unmodified" then $ERROR "Wrong mode for ${!_p}$file2" fi } Verify 1 a WC_MACHINE Verify 2 a WC_MACHINE #################################################### ##### Set/verify again #################################################### Set 1 b WC_LOCAL Set 2 b WC_LOCAL -odelay=yes Verify 1 b WC_MACHINE Verify 2 b WC_MACHINE #################################################### ##### Do changes on the machines #################################################### $INFO "Committing locals from machines" Set 1 t WC_MACHINE Set 2 t WC_MACHINE -odelay=yes #################################################### ##### Verify the data in the repository #################################################### Verify 1 t WC_LOCAL Verify 2 t WC_LOCAL $INFO "Doing partial update" # Now do a single-URL-update Set 1 g WC_LOCAL # Has still to be at the old data. Verify 1 t WC_MACHINE -ubase Verify 1 g WC_MACHINE -ulocal $SUCCESS "Partial update works" $WARN "disabled, doesn't work with current subversion" exit 0 # TODO: Get both ways of repository numbers - local higher than base, and # vice-versa. # TODO: Master overlays one of the config files -> has to get to the # machines (even if changed locally??); if changed doesn't get committed to # master unless specified via commit-to. # Checking correct creation of new directories. cd $WC_MACHINE1 NEW1=etc/date.txt NEW2=bin/date date > $NEW1 date > $NEW2 $BINq ci -m "both dirs" if [[ `svn ls $REP_1/$NEW1 | wc -l` == 1 && `svn ls $REP_1/$NEW2 | wc -l` == 1 ]] then $SUCCESS "New directory successfully created" else $ERROR "New dirs not found!" fi # Try to specify a single URL; go to the empty revision. $BINq up -u local@2 # When updating to a revision where a directory disappears, but it still # needed by other entries within, we don't know the mtime of the new # highest-priority URL (yet?). # So we have to ignore the dir-mtime here. $COMPARE -d $WC_MACHINE1/ $WC_MASTER/ # Now the is_1 file should appear. $BINdflt up -u local@3 > $logfile # The "." modified is allowed, and there's the "Updated ..." line. if [[ `wc -l < $logfile` == [23] ]] && grep -E "^N\.\.\. .* 7 .* is_1$" $logfile then $SUCCESS "Single-URL update ok" else cat $logfile $ERROR "Single-URL update wrong?" fi # There's at least an empty revision afterwards. if [[ `$BINdflt diff -u local@3 -r4` != "" ]] then $ERROR "Empty single-URL diff wrong?" fi $BINdflt diff -u local -r4 > $logfile if [[ `wc -l < $logfile` == 6 && `grep -c is_1 $logfile` == 3 ]] then $SUCCESS "Single-URL diff ok" else cat $logfile $ERROR "Single-URL diff wrong" fi $SUCCESS "Master/Local-URL usage works." # Now use the multi-URL setup to test "fsvs cat". $BINq up cd $WC_MACHINE1 for file in is_1 is_base bin/date do md5l=`md5sum - < $file` md5r=`$BINdflt cat $file | md5sum -` if [[ "$md5l" != "$md5r" ]] then $ERROR_NB "Checksums on unmodified file $file wrong:" $ERROR_NB "local $md5l" $ERROR "versus repository $md5r" fi echo doesnt matter never mind > $file md5r=`$BINdflt cat $file | md5sum -` if [[ "$md5l" != "$md5r" ]] then $ERROR_NB "Checksums on modified file $file wrong:" $ERROR_NB "local was $md5l" $ERROR "versus repository $md5r" fi done $SUCCESS "fsvs cat works." # Test -rX export FSVS_COMMIT_TO=local file=is_1 echo 1 > $file md5_1=`md5sum < $file` $BINq ci -m1 $file > $logfile rev1=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` # Change size, to be sure that the change is seen echo 22 > $file md5_2=`md5sum < $file` $BINq ci -m1 $file if [[ `$BINdflt cat $file -r$rev1 | md5sum -` != $md5_1 ]] then $ERROR "'cat -r$rev1 $file' wrong" fi if [[ `$BINdflt cat $file -rHEAD | md5sum -` != $md5_2 ]] then $ERROR "'cat -rHEAD $file' wrong" else $SUCCESS "cat -rX works, too." fi # Test data of special entries ln -s link link $BINq ci -m1 link data=`$BINdflt cat link` if [[ "$data" == "link link" ]] then $SUCCESS "cat for symlinks ok." else $ERROR "cat for symlinks wrong? '$data'" fi fsvs-1.2.6/tests/030_eperm_warn0000755000202400020240000000265211040023007015250 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC if [[ "$UID" -eq 0 ]] then # If we simply changed to another uid (eg. via perl $< = $> = 1), # how could we be sure that all needed areas (wc, wc2, repos, ...) # are writeable by that other user? # Currently only test if non-root. $WARN "Test for EPERM-warnings not done for UID 0." exit fi file=file touch $file $BINq ci -m "x" # Now change owner/group # Sadly svn won't do "svn ps file://...." svn co $REPURL tmp/ for prop in svn:owner svn:group do old=`svn pg $prop tmp/$file` new=`perl -e 'print shift() ^ 1' $old` svn ps $prop $new tmp/$file done svn ci -m "x" tmp rm -rf tmp logfile=$LOGDIR/030.logfile # We test with stop first, as with a simple warning the file could # checked out and the next update would have nothing to do. # Now we get an error: echo "Testing stopping" if $BINdflt -v up -W chown-eperm=stop > $logfile 2>&1 then cat $logfile $ERROR "not stopped!" else if [[ `grep chown-eperm $logfile | wc -l` -eq 1 ]] then $SUCCESS "stopped." else cat $logfile $ERROR "stopped for wrong reason?" fi fi # We get a warning: echo "Testing warning" $BINdflt up -v > $logfile 2>&1 # && $ERROR "no exit status!" # should a warning give an exit status? if [[ `grep chown-eperm $logfile | wc -l` -ge 1 ]] then $SUCCESS "warning given" else cat $logfile $ERROR "warning NOT given" fi rm $logfile $WC2_UP_ST_COMPARE > /dev/null 2>&1 fsvs-1.2.6/tests/001_init_dir0000755000202400020240000000365511327261475014741 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC umask 0 touch -t 200502281400.13 empty-file echo blabla > blabla chmod 2741 blabla || chmod 741 blabla # symlinks ln -s blabla blabla-symlink ln -s reclink reclink ln -s non-existant invalid-symlink umask 023 mkdir dir-with-perms ln -s dir-with-perms symlink-to-empty-dir ln -s tree symlink-to-dir mkdir long_name_dir loc-utf8 loc-loc perl -e 'print pack("c",$_) for (33..125)' > long_name_dir/chars date > long_name_dir/28374fwlifuazwlgvkaj.sbg.awilgzq3a5zgh-aenr.kahw4tgeoa34zht2l3kj4zhwq34tgza.we24ltzkn351243tgwerg echo $STG_UTF8 > loc-loc/$STG_LOC echo $STG_LOC > loc-utf8/$STG_UTF8 # just a bit above 512kB filename=big_file seq 1 99999 > $filename for i in a b c do for j in 1 2 3 do p=tree/$i/$j mkdir -p $p for k in x y z do echo $RANDOM > $p/file-$k done done done #ln /dev/urandom . if [[ `id -u` -eq 0 ]] then cp -a /dev/null device chmod 765 device chown 1.2 device cp -a `readlink -e /dev/cdrom*` block-device cp -a /dev/null device-2be-updated chmod 123 device-2be-updated fi # If we're not UID 0, that's simply a dangling link more. ln -s device device-link # encoder/decoder encoder="openssl enc -e -a" decoder="openssl enc -d -a" filename=enc-dec echo $encoder $decoder > $filename $BINq ps fsvs:commit-pipe "$encoder" $filename $BINq ps fsvs:update-pipe "$decoder" $filename $BINq ci -m "some entries" -o delay=yes if [[ `$BINdflt st` == "" ]] then $SUCCESS "No status after commit" else $ERROR "Status after commit?" fi for f in empty-file tree tree/b tree/c/2 tree/a/3/file-x do if ! $BINdflt info $f | egrep 'Revision:[[:space:]]+4$' > /dev/null then $ERROR "Wrong revision number on '$f'" fi done echo new file > newfile1 echo new file > newfile2 $BINq ci -m "newfile" -o delay=yes rm newfile2 $BINq ci -m "rm newfile" -o delay=yes cat long_name_dir/chars > copied $BINq cp long_name_dir/chars copied $BINq ci -m "copy" fsvs-1.2.6/tests/018_various0000755000202400020240000001120411202104065014604 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/018.various # Test EPIPE handling. # I could reproduce it with "strace -o /dev/null $BINdflt log | true", (or # "| head -1"), but only in 1 of 10 cases without the strace. strace_bin=`which strace || true` strace_cmd=${strace_bin:+$strace_bin -o /dev/null} for command in log st do ret=$( set -o pipefail $strace_cmd $BINdflt $command 2>$logfile | true echo $? set +o pipefail ) if [[ $ret -eq 0 ]] then # No errors on STDOUT allowed. if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "EPIPE on $command handled correctly" else $ERROR "wrong number of output lines on EPIPE $command test" fi else $ERROR "Error code on EPIPE $command" fi done # Test whether / at the end of an URL are removed $BINq init $REPURL///// if $BINdflt st > $logfile then $SUCCESS "slashes at end of URLs are ignored." else $ERROR "slashes make fsvs fail??" fi if [[ `$BINq help | wc -l` -gt 10 && `$BINq -h | wc -l` -gt 10 && `$BINq -V` == "FSVS"*"version "* ]] then $SUCCESS "version and help are given" else $ERROR "version or help are not printed?" fi if [[ "$opt_DEBUG" == "1" ]] then if FSVS_DEBUGLEVEL=1 $BINdflt -v -D main | grep LC_ALL > $logfile && test `$BINdflt -v -d -D main | egrep "^[^ ]+ +main" | wc -l` -ge 3 then $SUCCESS "Debug messages seem to work" else $ERROR "debug doesn't work?" fi fi if [[ `$BINdflt s 2>&1 ` == *"Action \"s\" is ambiguous."* && `$BINdflt invalid-action-which-will-never-exist 2>&1` == \ *"Action "*" not found."* ]] then $SUCCESS "actions are checked" else $ERROR "actions are not verified?" fi if $BINdflt st /$RANDOM/$RANDOM/$RANDOM/$RANDOM/$RANDOM > $logfile 2>&1 then $ERROR "non-existing directories don't fail?" else $SUCCESS "error for non-existing directories" fi # Look whether there's a human-readable message (1 line) for non-existing # CONF or WAA paths. # Has to include the missing path, so that the generic "no working copy" # error isn't allowed. function Check_Path { var=FSVS_$1 if $BINdflt status /bin > $logfile 2>&1 then cat $logfile $ERROR "Invalid $var doesn't stop?" else if [[ `wc -l < $logfile` -eq 1 && `grep -c "${!var}" < $logfile` -eq 1 ]] then $SUCCESS "$var checked" else cat $logfile $ERROR "Wrong message on invalid $var." fi fi } FSVS_CONF=$WC/not-here Check_Path CONF FSVS_WAA=$WC/not-here Check_Path WAA # Define an empty configuration directory, and try to do a status (without # a wc file). if FSVS_CONF=$WC $BINdflt status -N -N /sbin /bin > $logfile 2>&1 then $ERROR "Didn't expect status output" else $SUCCESS "No status output for non-committed WCs" fi # make a wc, and retry ( export FSVS_CONF=$WC FSVS_WAA=$WC cd / echo file:/// | $BINq urls load echo './*' | $BINq ignore load ) if FSVS_CONF=$WC FSVS_WAA=$WC $BINdflt status -N -N /sbin /bin > $logfile 2>&1 then if ! grep '^N\.\.\. \+dir \+/s\?bin$' $logfile > $logfile.2 then $ERROR "Wrong status output" fi if [[ `wc -l < $logfile` -eq 2 && `wc -l < $logfile.2` -eq 2 ]] then $SUCCESS "Status output for two root entries as expected." else $ERROR "Wrong status output" fi else $ERROR "No status output for non-committed WCs?" fi # Test whether an invalid/not existing $FSVS_WAA allows "help" to work if FSVS_WAA=/tmp/not-existing-$RANDOM$RANDOM$RANDOM$RANDOM$RANDOM $BINdflt help status > $logfile 2>&1 then $SUCCESS 'help needs no $FSVS_WAA' else $ERROR 'help tries to access $FSVS_WAA' fi if $BINdflt -V | grep version | grep GPL | grep Marek > /dev/null then $SUCCESS "Version is printed" else $ERROR "Version printing error" fi if $BINdflt -v -V | grep "compiled .*, with options" > /dev/null then $SUCCESS "Compile options are printed" else $ERROR "Verbose version printing error" fi if $BINdflt help | grep "Known commands:" > /dev/null then $SUCCESS "Help gets printed" else $ERROR "No help output?" fi if $BINdflt help help | grep 'Help for command "help".' > /dev/null then $SUCCESS "Help for help gets printed" else $ERROR "No help help output?" fi if $BINdflt help -h | grep 'Help for command "help".' > /dev/null then $SUCCESS "Help -h gets printed" else $ERROR "No help -h output?" fi if $BINdflt help -? | grep 'Help for command "help".' > /dev/null then $SUCCESS "Help -? gets printed" else $ERROR "No help -? output?" fi # If we have an invalid charset, ANSI_X3.4-1968 is returned (=ASCII). # So there should never be an error, unless nl_langinfo fails. #if ! LC_ALL=invalid LC_CTYPE=invalid $BINdflt -Wcharset-invalid=stop st 2> $logfile #then # $SUCCESS "invalid locales can stop fsvs" #else # $ERROR "invalid locales don't give an error?" #fi # fsvs-1.2.6/tests/031_info0000755000202400020240000000541711213125405014056 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC filename=file-$RANDOM-$RANDOM logfile=$LOGDIR/031.logfile touch $filename chmod 742 $filename # Be careful - don't do a CR/LF or something like that. printf "TEST 1" > $filename touch -t 200602231730.13 $filename md5=`md5sum - < $filename` echo Checkin ... $BINq ci -m 1 -o delay=yes > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` function Check { tag=$1 data="$2" # We need a word boundary; "grep -E" is not POSIX. So use perl. # I tried to use \Q in the patterns (svn+ssh has a + in it!), but that # doesn't work if it gets expanded from a variable into a pattern. # So we have to use the conditional. # There must be exactly one line. if perl -e '$tag=shift; $pat=shift; $pat=quotemeta($1) if $pat =~ /^=(.*)/; @fnd=grep(m#^\s+$tag:\s+$pat#, ); exit 0+(@fnd != 1);' "$tag" "$data" < $logfile then # Used to be $SUCCESS, but that's a lot of green # echo "found $tag: $data" true else echo "$tag wanted as $data, but found:" echo "'"`grep $tag $logfile || true `"'" $ERROR "Wrong data returned!" fi } function CheckAttr { Check Type file Check Status "$1" Check Flags 0x0 Check URL "=$REPURL/$filename" Check Mode 0100742 Check UID/GID "$UID .*/.*" Check MTime "Thu Feb 23 17:30:13 2006" Check Revision 4 Check Size 6 Check Repos-MD5 $md5 # f676245d2b1ee5589cd0f19401fda420 } $BINdflt info $filename > $logfile CheckAttr "0x0 .unmodified." $SUCCESS "info prints the expected data for existing files." # Check that *really* the stored data is printed, # and not just the current values! echo "ASDASAAGDGASGa" > $filename $BINdflt info $filename > $logfile CheckAttr "0x24 .changed, mtime." $SUCCESS "info prints the expected data for modified files." rm $filename $BINdflt info $filename > $logfile CheckAttr "0x2 .removed." $SUCCESS "info prints the expected data for removed files." if $BINdflt info Does-not-exist-in-this-WC > $logfile 2>&1 then $ERROR "info does not stop for non-existing entries!" else $SUCCESS "info stops for non-existing entries!" fi copydir=ta copydirs=a/av/ad copyfn=a4g copyfile=$copydir/$copydirs/$copyfn mkdir -p $copydir/$copydirs touch $copyfile $BINdflt cp $filename $copyfile $BINdflt info -o verbose=copyfrom $copyfile > $logfile Check "Copyfrom" "=rev. $rev of $REPURL/$filename" $SUCCESS "info prints the expected data for copied files 1." $BINq ci -m 1 > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` dir2=qwerhg cp -a $copydir $dir2 $BINdflt cp $copydir $dir2 $BINdflt info -o verbose=copyfrom $dir2/$copydirs/$copyfn > $logfile Check "Copyfrom" "=rev. $rev of $REPURL/$copydir/$copydirs/$copyfn" $SUCCESS "info prints the expected data for copied files 2." # set ts=2 sw=2 fsvs-1.2.6/tests/077_url_remove0000755000202400020240000000327311214371514015317 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # Make some bases BASES="1 2 3" mkdir $BASES $BINq ci -m "base dirs" URLS="" for b in $BASES do ( cd $b mkdir -p dir-$b/sub/subsub echo $RANDOM > dir-$b/sub/a_file ( echo :$b: ; seq 1 $b ) > file echo $RANDOM > file-$b ) # Make some revisions for r in `seq 0 $b` do $BINq ci -m "base $b" done URLS="$URLS\\n prio:$b,name:u$b,$REPURL/$b" done cd $WC2 echo -e "$URLS" | $BINq urls load rm -rf $WC mkdir -p $WC cd $WC echo -e "$URLS" | $BINq urls load $BINq sync $BINq revert -R -R . $WC2_UP_ST_COMPARE if grep :1: file then $SUCCESS "Correct URL priorities" else cat file $ERROR "URL 1 should be topmost" fi # Now "remove" the URLs, one by one. for b in $BASES do $INFO "removing u$b" # This is a stronger test than doing an update "-u...@0", because the # to-be-removed state must be remembered. # The other kind is tested below. $BINdflt urls N:u$b,target:0 $BINdflt up if $BINdflt urls dump | grep name:u$b then $BINdflt urls dump $ERROR "URL u$b not automatically removed" else $SUCCESS "URL u$b automatically removed" fi if [[ -e dir-$b || -e file-$b ]] then $ERROR "WC data not cleaned up?" fi # This returns an error, because the common file gets removed after the # first update to rev 0. Will be fixed with mixed-WC operation. if grep :$b: file then cat file $ERROR "file has wrong data" fi if [[ -e file ]] then $INFO "file still exists." fi $SUCCESS "URL u$b cleaned up." done echo -e "$URLS" | $BINq urls load $BINq up if [[ ! -e dir-2 ]] then $ERROR "Not correctly updated" fi $BINq up -u u2@0 if [[ -e dir-2 ]] then $ERROR "Not correctly removed on update" fi fsvs-1.2.6/tests/080_group_auto_prop_expl_commit0000755000202400020240000000135411776757151020776 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC # http://fsvs.tigris.org/ds/viewMessage.do?dsForumId=3928&dsMessageId=2981798 logfile=$LOGDIR/080.group_auto_prop_expl_commit grp_dir=`$PATH2SPOOL $WC ^`/groups mkdir $grp_dir cat < $grp_dir/secret auto-prop fsvs:commit-pipe base64 take EOT $BINq groups "group:secret,./**/f*" DATA=plain mkdir dir1 echo $DATA > dir1/file cp -a dir1 dir2 $BINq ci -m c1 dir1 # explicitly committing $BINq ci -m c1 dir2/file echo $DATA | base64 > cmp $INFO "expect this:" cat cmp for d in dir1 dir2 do $INFO "$d got this:" svn cat $REPURL/$d/file | tee f.$d if ! diff -u f.$d cmp then $ERROR "Not correctly encoded in $d" fi done $SUCCESS "group-autoprops ok." fsvs-1.2.6/tests/comp-test/0000755000202400020240000000000012554717230014532 5ustar marekmarekfsvs-1.2.6/tests/comp-test/080_find_common_base.ct0000644000202400020240000000241611320474076020734 0ustar marekmarek## Test for details in waa__find_common_base. ## Stops automatically in _do_component_tests. ## case 0: No arguments. print waa__init() set wc_path="$#$ENV{'WC'}#" set start_path=wc_path set start_path_len=strlen(start_path) ## set opt_debugprefix=0 shell mkdir a b c set charp_array_1[0]="$#$ENV{'WC'}#/a" set charp_array_1[1]="$#$ENV{'WC'}#/b" set charp_array_1[2]="$#$ENV{'WC'}#/c" #= 0 print waa__find_common_base2(3, charp_array_1, &charpp, 0) #~ 0x\w+ "a" print charpp[0] #~ 0x\w+ "b" print charpp[1] #~ 0x\w+ "c" print charpp[2] shell mkdir a/h a/j set charp_array_1[0]="$#$ENV{'WC'}#/a/h" set charp_array_1[1]="$#$ENV{'WC'}#/b" #= 0 print waa__find_common_base2(2, charp_array_1, &charpp, 0) #~ 0x\w+ "a/h" print charpp[0] #~ 0x\w+ "b" print charpp[1] set charp_array_1[0]="$#$ENV{"WC"}#/a/h" set charp_array_1[1]="$#$ENV{"WC"}#/a/j" #= 0 print waa__find_common_base2(2, charp_array_1, &charpp, 0) #~ 0x\w+ "a/h" print charpp[0] #~ 0x\w+ "a/j" print charpp[1] set charp_array_1[0]="/does_never/exist" set charp_array_1[1]="/never/so_we/get_an/error" #= 2 print waa__find_common_base2(2, charp_array_1, &charpp, 0) # test "/." set start_path="/" set charp_array_1[0]="." #= 0 print waa__find_common_base2(1, charp_array_1, &charpp, 0) #~ 0x\w+ "." print charpp[0] kill fsvs-1.2.6/tests/comp-test/003_strncmp_ul.ct0000644000202400020240000000153110654534767017645 0ustar marekmarek## Stops automatically in _do_component_tests. ## Test for the function hlp__strncmp_uline_eq_dash(). #= 0 call hlp__strncmp_uline_eq_dash("aaa", "aaa", -1) #= 0 call hlp__strncmp_uline_eq_dash("aaa", "aaa", 3) #= 0 call hlp__strncmp_uline_eq_dash("aaa", "aaa", 4) #= 1 call hlp__strncmp_uline_eq_dash("aab", "aaa", -1) #= 1 call hlp__strncmp_uline_eq_dash("aaa", "aaab", 4) #= 1 call hlp__strncmp_uline_eq_dash("aaab", "aaa", 4) #= 0 call hlp__strncmp_uline_eq_dash("aaab", "aaab", 3) #= 0 call hlp__strncmp_uline_eq_dash("aaab", "aaab", 4) ## should fail, because the first hlp__string is defined to have only _, no - #= 1 call hlp__strncmp_uline_eq_dash("aa-b", "aa_b", 4) #= 0 call hlp__strncmp_uline_eq_dash("aa_b", "aa-b", 4) #= 0 call hlp__strncmp_uline_eq_dash("aaab", "aaabsss", 4) #= 1 call hlp__strncmp_uline_eq_dash("aaab", "aaabsss", 5) fsvs-1.2.6/tests/comp-test/fail-test.ct0000644000202400020240000000022310644624306016746 0ustar marekmarek## This test *should* fail, so that we see that the matching works. b main ## Without any parameter we get a 1 (program name) r #= 2 print argc fsvs-1.2.6/tests/comp-test/050_local2utf8.ct0000644000202400020240000000170710656011645017434 0ustar marekmarek## Test for the locale/UTF-8 conversion functions. set local_codeset="latin1" ## Initialization not needed; hlp__local2utf8() and hlp__utf82local() do ## their own init. #= 0 call hlp___get_conv_handle( local_codeset, "UTF-8", voidp_array+0) #= 0 call hlp___get_conv_handle( "UTF-8", local_codeset, voidp_array+1) print voidp_array set opt_debugprefix=0 ## Basic tests #= 0 call hlp__local2utf8("aa", charp_array_1, -1) #= 0x\S+ "aa" print *charp_array_1 #= 0 call hlp__utf82local("ab", charp_array_1, -1) #= 0x\S+ "ab" print *charp_array_1 ## Invalid utf-8, should return 84 = EILSEQ for full buffer. ## Should work with correct length restriction. set buffer[0]=67 set buffer[1]=0xf0 set buffer[2]=0x00 #= 0 call hlp__utf82local(buffer, charp_array_1, 1) #= 0x\S+ "C" print *charp_array_1 #= 84 call hlp__utf82local(buffer, charp_array_1, -1) ## don't know why we get EINVAL == 22 - test for non-zero. #= [1-9] call hlp__utf82local(buffer, charp_array_1, 2) fsvs-1.2.6/tests/comp-test/100_option_parse.ct0000644000202400020240000000247111251251465020144 0ustar marekmarek## Stops automatically in _do_component_tests. ## Test for option parsing. set debuglevel=1 set opt__list[OPT__FILTER].i_val=0 set opt__list[OPT__FILTER].prio=0 #= 0 print opt__list[OPT__FILTER].i_val print opt__list[OPT__FILTER].prio # With gdb 6.8.50 or something like that this doesn't work anymore. # set buffer="filter=any" set strcpy(buffer, "filter=any") call opt__parse(buffer, 0, 1, 0) #= -1 print opt__list[OPT__FILTER].i_val # Now we have to set both strings, because strcpy() doesn't go beyond \0. # Or we could use memcpy ... but that's fragile if the value gets changed. set strcpy(buffer, "filter") set strcpy(buffer+10, "none;text") set opt_debugprefix=0 call opt__parse(buffer, buffer+10, 2, 0) #= 7 print opt__list[OPT__FILTER].i_val set strcpy(buffer, "filter=none") call opt__parse(buffer, 0, 3, 0) #= 0 print opt__list[OPT__FILTER].i_val call strcpy(buffer, "filter=mtime,owner:group") call opt__parse(buffer, 0, 4, 0) #= 0xe0 print /x opt__list[OPT__FILTER].i_val set strcpy(buffer, "delay=no") call opt__parse(buffer, 0, 1, 0) #= 0 print opt__list[OPT__DELAY].i_val set strcpy(buffer, "delay=yes") call opt__parse(buffer, 0, 2, 0) #= -1 print opt__list[OPT__DELAY].i_val set strcpy(buffer, "delay=commit,update,checkout,revert") call opt__parse(buffer, 0, 3, 0) #= 15 print opt__list[OPT__DELAY].i_val fsvs-1.2.6/tests/comp-test/001_basic.ct0000644000202400020240000000010410644624306016515 0ustar marekmarek## Comment b main r #= 1 print argc kill r 123 #= 2 print argc fsvs-1.2.6/tests/comp-test/030_alloc_free.ct0000644000202400020240000000243411002026735017530 0ustar marekmarek## Test for struct estat alloc/free. ## Stops automatically in _do_component_tests. ## We allocate 10 (struct estat)s, free some in a defined order, and check ## whether the freelist matches the expectations. set free_list=0 #= 0 call ops__allocate( 10, estat_array, int_array+0) #= 10 print int_array[0] ## set pointers set estat_array[1]=estat_array[0]+1 set estat_array[2]=estat_array[1]+1 set estat_array[3]=estat_array[2]+1 set estat_array[4]=estat_array[3]+1 set estat_array[5]=estat_array[4]+1 set estat_array[6]=estat_array[5]+1 set estat_array[7]=estat_array[6]+1 set estat_array[8]=estat_array[7]+1 set estat_array[9]=estat_array[8]+1 ## Now we have () () () () () () () () () () ## and free as 3 1 2 #= 0 call ops__free_entry(estat_array+3) #= 1 print free_list->count #= 0 print (long)free_list->next - (long)(estat_array[0]+10) #= 0 print (long)free_list - (long)(estat_array[0]+3) #= 0 call ops__free_entry(estat_array+4) ## Now test merging! #= 2 print free_list->count #= 0 print (long)free_list->next - (long)(estat_array[0]+10) ## Entry 3 was set to NULL by ops__free_entry() #= 0 print (long)free_list - (long)(estat_array[0]+3) #= 0 call ops__free_entry(estat_array+2) #= 3 print free_list->count #= 0 print (long)free_list - (long)(estat_array[0]+2) kill fsvs-1.2.6/tests/024_urls0000755000202400020240000001057211243311224014107 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/024.urls # clean up echo "" | $BINq urls load prio=15 for p in http https svn svn+ssh file svn+some_other_tunnel svn+1 do $BINq urls N:$p,prio:$prio,T:$prio,$p://$p/some/path # Do pseudo-random prio=`perl -e 'print +(shift()*11+41) % 61' $prio` done $SUCCESS "Set all known protocols" if $BINq urls unknown:///void 2> /dev/null then $ERROR "Invalid protocols are accepted" else $SUCCESS "Invalid protocols are rejected" fi if $BINq urls svn+:///void 2> /dev/null then $ERROR "'svn+' is wrong, but got accepted" else $SUCCESS "'svn+' rejected" fi # Having a invalid sequence if $BINq urls dump '\\\r\n\t\f\x01\*' > /dev/null 2>&1 || $BINq urls dump '%n%t%u%p%r%%%/' > /dev/null 2>&1 then $ERROR "Parsing escape sequences doesn't work" fi # We would have to do the sorting afterwards - if the program fails, # we wouldn't get the error. # But if there's an priority, the output order is defined. $BINq urls dump > urls.txt $BINq urls load < urls.txt $BINq urls dump > urls2.txt if cmp urls.txt urls2.txt then $SUCCESS "Dump/Load works" else $ERROR "Dump/Load gives different results!" fi url=http://egal echo "name:1qay,P:9812,target:HEAD,$url" | $BINq urls load if [[ `$BINdflt urls dump '%p'` != 9812 ]] then $ERROR "Dumping priority with '%p'" fi if [[ `$BINdflt urls dump '%u'` != $url ]] then $ERROR "Dumping url with '%u'" fi if [[ `$BINdflt urls dump '%n'` != 1qay ]] then $ERROR "Dumping name with '%n'" fi if [[ `$BINdflt urls dump '%%\x20%%'` != "% %" ]] then $ERROR "Dumping some other format" fi if [[ `$BINdflt urls dump '%t'` != "HEAD" ]] then $ERROR "Dumping target revision with '%t'" fi $SUCCESS "URL load/dump tests pass" $BINq urls "prio:22,N:XX,$url" if [[ `$BINdflt urls dump | wc -l` -ne 1 ]] then $ERROR "URL taken twice" fi if [[ `$BINdflt urls dump '%p'` != 22 ]] then $ERROR "Changing priority with '%p'" fi if [[ `$BINdflt urls dump '%n'` != XX ]] then $ERROR "Changing name with '%n'" fi $SUCCESS "URL change tests pass" if $BINq urls "N:XX,$url/g" > /dev/null 2>&1 then $ERROR "Duplicate names should not be allowed" else $SUCCESS "Duplicate names rejected" fi # Test error for non-WC directories mkdir Xt cd Xt if $BINdflt urls dump > $logfile 2>&1 then cat $logfile $ERROR "No error for non-WC-directories" else if grep $(pwd) $logfile then $SUCCESS "Error message with path printed" else cat $logfile $ERROR "No or wrong error message" fi fi # Test keeping the internal numbers U1=name:1,file:///r/1 U2=name:2,file:///r/2 U3=name:3,file:///r/3 echo -e "$U1\n$U2\n$U3\n" | $BINdflt urls load $BINdflt urls dump "%u %I\\n" | sort > f-1 echo -e "$U2\n$U3\n$U1\n" | $BINdflt urls load $BINdflt urls dump "%u %I\\n" | sort > f-2 echo -e "$U3\n$U2\n$U1\n" | $BINdflt urls load $BINdflt urls dump "%u %I\\n" | sort > f-3 md5sum f-? if [[ `md5sum f-? | cut -f1 -d" " | sort -u | wc -l` -eq 1 ]] then $SUCCESS "Internal URL numbers are kept." else $ERROR "Internal URL numbers lost." fi for a in s sv svn svn: svn:/ svn:// do if $BINq urls $a then $ERROR "invalid URL $a accepted" fi done $SUCCESS "Cut-off URLs not taken." if $BINq urls svn://x/t then $SUCCESS "Valid URL taken." else $ERROR "Valid URL not taken." fi # Test changes. function CU { dump="$1" exp="$2" decl="$3" if [[ `$BINdflt urls dump "$dump"` == $exp ]] then $SUCCESS "$decl" else $ERROR_NB "expected '$exp', got:" $BINdflt urls dump "$dump" echo $ERROR "$decl" fi } ( echo name:n1,prio:8,target:3,http://localhost/nothing ; echo prio:4,name:n2,file:///hopeless ) | $BINq urls load CU "%n-%p-%t:" 'n2-4-HEAD:n1-8-3:' "Loading with reversed priority" $BINdflt urls N:n1,prio:3 CU "%n-%p:" 'n1-3:n2-4:' "Reversing priority" $BINdflt urls N:n3,http://localhost/nothing CU "%n-%p:" 'n3-3:n2-4:' "Changing name" $BINdflt urls target:87,http://localhost/nothing CU "%n-%t:" 'n3-87:n2-HEAD:' "Changing the target revision" cd $WC1 echo "$REPURL" | $BINq urls load $BINq ci -m1 $WC2_UP_ST_COMPARE # Check that nothing below FSVS_CONF gets changed for update/commit - not # even the timestamps. mkdir -p aaa b/c/d b/k date | tee aaa/2 5g3 b/gg b/c/d/5 > $RANDOM find $FSVS_CONF -ls | sort > $logfile $BINq ci -m2 $WC2_UP_ST_COMPARE find $FSVS_CONF -ls | sort > $logfile.2 if diff -u $logfile $logfile.2 then $SUCCESS "FSVS_CONF not changed on commit/update." else $ERROR "FSVS_CONF changed" fi fsvs-1.2.6/tests/062_readonly_conf0000755000202400020240000000254311133561357015761 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/062.ro-conf $BINdflt delay echo a > x$RANDOM.$$ echo $RANDOM.$$ > empty-file function Go { run="$1" $BINdflt st > $logfile.$run,st $BINdflt diff > $logfile.$run,diff $BINdflt log -rHEAD > $logfile.$run,log $BINdflt pl -v * > $logfile.$run,pl $BINdflt pg fsvs:commit-pipe enc-dec > $logfile.$run,pg $BINdflt info -v tree > $logfile.$run,info } # Get compare values. # The chmod must be done before, to get the same status output. #chmod -R a-w . Go 1 # Don't allow any changes here; "diff" must use /tmp. chmod -R a-w $FSVS_WAA chmod -R a-w $FSVS_CONF # We must be careful to clean up after this test, so that normal testing # can resume. # So we run in a subshell, and look at the return value afterwards. if ! result=$( Go 2 if ! $BINdflt diff -r HEAD > /dev/null then echo "diff -r HEAD gives an error" exit 1 fi ) then if [[ "$result" == "" ]] then result="Exit code" fi fi # Allow removing these hierarchies chmod -R u+w $FSVS_WAA chmod -R u+w $FSVS_CONF if [[ "$result" != "" ]] then $ERROR "$result" fi for file in `/bin/ls $logfile.* | cut -f2 -d, | sort -u` do if diff -u $logfile.1,$file $logfile.2,$file then $SUCCESS "compared $file output." else $ERROR "'$file' output different" fi done $SUCCESS "read-only WAA and CONF ok." fsvs-1.2.6/tests/009_bigger_files0000755000202400020240000000533711334063060015554 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC function CheckSyntax { # verify that the byte numbers are ascending, hole-free, and that the MD5s # are correct perl -e ' use Digest::MD5 qw(md5_hex); open(DATA,shift) || die "open: $!"; $data=join("", ); close DATA; open(MD,shift) || die "open: $!"; $pos=0; while () { # line found $cnt--; ($md5, $hash, $start, $length)=split(/\s+/); die "start $start != position $pos\n" if $start != $pos; die "MD5 differs\n" if $md5 ne md5_hex(substr($data, $pos, $length)); $pos=$start+$length; } exit 0; ' $1 $2 } filename=big_file sparse=sparse if [[ -e $filename ]] then rm $filename $BINq ci -m "delete the test-file" fi sparse_md5s=`$PATH2SPOOL $sparse md5s` test -f $sparse_md5s && rm $sparse_md5s # There was a performance problem with files of many zero bytes (but not # starting with them) ... every zero byte got its own manber-block or some # such. ( echo Test1 ; dd if=/dev/zero bs=1024k count=1 ; echo Test2 ) > many_0 # TODO: do sparse files on update, and test for it. # make sure that VM usage stays sane. ulimit -v 200000 seq 1 199999 > $filename dd if=/dev/null of=$sparse bs=1024 count=1 seek=256k 2> /dev/null echo " ci1" $BINq ci -m "big files" ci_md5=`$PATH2SPOOL $filename md5s` echo $ci_md5 CheckSyntax $filename $ci_md5 if [[ -f $sparse_md5s ]] then $ERROR "For a sparse file no md5s should be written!" else $SUCCESS "No md5s for a sparse file." fi echo "Another line" >> $filename echo " ci2" $BINq ci -m "big file 2" CheckSyntax $filename $ci_md5 if [[ -e $ci_md5 ]] then $SUCCESS "Committing a big file creates the md5s-data" else $ERROR "Committing a file doesn't create the md5s-data" fi # update other wc $WC2_UP_ST_COMPARE up_md5=`$PATH2SPOOL $WC2/$filename md5s "" $WC2` if [[ ! -f $up_md5 ]] then $ERROR "PATH2SPOOL wrong - got $up_md5" fi if cmp $ci_md5 $up_md5 then $SUCCESS "Update and commit give the same manber-hashes and MD5s" else $ERROR_NB "Update and commit give DIFFERENT manber-hashes and/or MD5s!!" ls -la $ci_md5 $up_md5 2> /dev/null diff -uw $ci_md5 $up_md5 2> /dev/null $ERROR "Update and commit disagree" fi # for identical files this should always be correct, but better check ... CheckSyntax $WC2/$filename $up_md5 # now delete the file and test if the .../md5s is gone. rm $filename $BINq ci -m "delete the big test-file" $WC2_UP_ST_COMPARE if [[ -e $ci_md5 ]] then $ERROR "Committing a deleted file doesn't remove the md5s-data" else $SUCCESS "Committing a deleted file removes the md5s-data" fi if [[ -e $up_md5 ]] then $ERROR "Updating a deleted file doesn't remove the md5s-data" else $SUCCESS "Updating a deleted file removes the md5s-data" fi fsvs-1.2.6/tests/valgrind-suppressions.supp0000644000202400020240000006071210757621051020115 0ustar marekmarek# :-) vim (-: # :4,$g/ /dev/null $INCLUDE_FUNCS cd $WC # If the filename and the message overlap, the "log -v" test # "grep"s the wrong data. file=x-commit-msg-file-$RANDOM-$RANDOM-$RANDOM-x # Date in a german locale gives "Jän", in the locale codeset - to verify # the correct en/deconding! # We don't use the 1st - depending on the timezone settings this might give # us December. msg="$STG_UTF8.$STG_LOC" logfile=$LOGDIR/070.log # Exercise the commit msg editor code. Try an empty file, too. echo $RANDOM > $file EDITOR="touch" $BINdflt ci echo $msg > $file EDITOR="cp $file" $BINdflt ci -o author=NoSuchMan svn log $REPURL -rHEAD > $logfile if grep $msg < $logfile > /dev/null then $SUCCESS "Message was taken" else $ERROR "Message not fetched from editor!" fi if [[ "$PROTOCOL" != "file://" ]] then $WARN "Author only taken for file://; doesn't work for svn+ssh." else if grep NoSuchMan < $logfile > /dev/null then $SUCCESS "Author was taken" else $ERROR "Author not used on commit" fi fi $BINdflt log -rHEAD > $logfile 2>&1 if grep $msg < $logfile > /dev/null then $SUCCESS "'fsvs log -rX' works." else $ERROR "'fsvs log -rX' doesn't work." fi # For the just created file there should be only a single revision. $BINdflt log $file > $logfile 2>&1 if grep $msg < $logfile > /dev/null then $SUCCESS "'fsvs log file' works." else $ERROR "'fsvs log file' doesn't work." fi # Test log output options $BINdflt log -o log_output=normal $file > $logfile if grep "^$msg" < $logfile > /dev/null then $SUCCESS "Normal log output works" else $ERROR "Normal log output wrong" fi $BINdflt log -o log_output=indent,color $file > $logfile # I cannot make grep and egrep understand \x1b. # Double reverse logic - if 0 lines found, return non-zero. if grep "^ $msg" < $logfile > /dev/null && perl -e 'exit (0 == grep(/\x1b\[0;0m/, ))' < $logfile then $SUCCESS "Indented, colorized log output works" else $ERROR "Indented, colorized log output wrong" fi # Test -F for commit message, with a very long line. # That can fail for non-UTF8-characters, if one would get cut in the # middle. (That's why there are so many strange things in the above line!) for a in `seq 1 200` ; do echo -n $msg$msg ; done > $file $BINdflt ci -F $file if svn log $REPURL -rHEAD | grep $msg$msg > $logfile 2>&1 then $SUCCESS "message was taken" else $ERROR "message not read from file!" fi # Test log -v $BINdflt log -rHEAD:HEAD -v > $logfile 2>&1 if grep $file < $logfile > /dev/null then $SUCCESS "'fsvs log -v -rX:Y' works." else $ERROR "'fsvs log -v -rX:Y' doesn't work." fi # Test empty file > $file $BINdflt ci -F $file # Test limit parameter $BINdflt log -rHEAD:1 -o limit=1 $file > $logfile if [[ `wc -l < $logfile` -eq 5 ]] then $SUCCESS "log limit obeyed" else cat $logfile wc -l < $logfile $ERROR "log limit doesn't work" fi # Test EPIPE handling. # I could reproduce it with "strace -o /dev/null $BINdflt log | true", (or # "| head -1"), but only in 1 of 10 cases without the strace. strace_bin=`which strace || true` strace_cmd=${strace_bin:+$strace_bin -o /dev/null} for command in log st do ret=$( set -o pipefail $strace_cmd $BINdflt $command 2>$logfile | true echo $? set +o pipefail ) if [[ $ret -eq 0 ]] then # No errors on STDOUT allowed. if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "EPIPE on $command handled correctly" else $ERROR "wrong number of output lines on EPIPE $command test" fi else $ERROR "Error code on EPIPE $command" fi done $INFO "Test for filename sorting" touch `seq -fx-%g 100 200` $BINq ci -m1 $BINdflt log -r HEAD -v | grep "^ x-" > $logfile if sort < $logfile | diff -u - $logfile then $SUCCESS "Filenames are sorted." else $ERROR "Filenames not sorted" fi $INFO "Test for filename filtering" mkdir -p SOME/dir/below touch SOMEdonttakeme touch SOME/Ill_be_there SOME/dir-is-this-not touch SOME/dir/below/Take_a_Wok_on_the_wild_side $BINq ci -mZeta $BINdflt log -r HEAD -o limit=1 -v SOME > $logfile if ! grep Zeta $logfile then $ERROR "Wrong message in log?" fi if grep SOMEdonttakeme $logfile then $ERROR "Wrong entry in log" fi if ! grep -E 'Ill_be_there$' $logfile then $ERROR "Entry missing" fi if ! grep -E ' SOME/Ill_be_there$' $logfile then $ERROR "Wrong path syntax" fi $INFO "'fsvs log -v' on a file" # Test log for a file $BINdflt log -r HEAD -o limit=1 -v SOME/dir-is-this-not > $logfile if ! grep Zeta $logfile then $ERROR "Wrong message in log?" fi if ! grep '^ SOME/dir-is-this-not$' $logfile then $ERROR "Expected file not seen" fi # A file, and one line log message. if [[ `grep '^ ' $logfile | wc -l` != 2 ]] then $ERROR "Too many entries returned." fi $BINdflt log -r HEAD -o limit=1 -v $WC1/SOME/../SOME/dir-is-this-not > $logfile if ! grep "^ $WC1/SOME/../SOME/dir-is-this-not\$" $logfile then $ERROR "Full file path not given." fi $SUCCESS "'log -v' output takes parameters correctly." $INFO "Tests for -u" if $BINdflt log -u url -u url then $ERROR "Duplicate -u shouldn't be taken." fi if $BINdflt log -u not_here then $ERROR "Nonexisting URL taken" fi if ! $BINdflt log -u url -r HEAD > $logfile then $ERROR "Normal -u errors?" fi mkdir a $BINq ci -m2nd $BINq urls name:foo,$REPURL/a $BINq update $BINdflt log -u foo -r HEAD > $logfile if ! grep 2nd $logfile then $ERROR "-u foo wrong message?" fi $SUCCESS "Tests for -u successfull." fsvs-1.2.6/tests/047_revert_details0000755000202400020240000000463511100600010016130 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC src=file1 dest=file2 src_d=dir-x dest_d=dir-y logfile=$LOGDIR/047.log # I had "date > $src" here, but the german "Jän" for January # has different lengths for de_AT and de_AT.UTF8 ... echo aaeadaaaaacaaaaaaaaaaaaabaaaa > $src mkdir -p $src_d/sub touch $src_d/sub/file $BINq ci -m1 -o delay=yes failed=0 function Check { name="$1" exp="$2" msg="$3" if [[ `$BINdflt st -C "$name" | grep "$name"'$'` == "$exp "*" $name" ]] then $SUCCESS "Got $exp for $name - $msg" else $BINdflt st -C "$name" $ERROR "Expected $exp for $name - $msg" failed=1 fi } cp $src $dest cp -r $src_d $dest_d # A while loop doesn't stop the shell for a failed command? # We have to remember whether everything's ok. set -- $src $dest $src_d $dest_d while [[ $# -ge 2 ]] do cur_src="$1" cur_dest="$2" shift 2 echo $cur_src $cur_dest # Revert shouldn't do anything. Check "$cur_dest" "N..." "unversioned" $BINq revert $cur_dest Check "$cur_dest" "N..." "unversioned after revert" # Test reverting a copied entry $BINq cp $cur_src $cur_dest Check "$cur_dest" ".m.+" "copied" if test -d "$cur_dest" then echo aasdgasgasgdga > "$cur_dest/sdfh" else echo aafadaaaaacaaaaaaaaaaaaabaaaa > "$cur_dest" fi Check "$cur_dest" ".mC+" "changed copy" $BINq revert $cur_dest # This revert won't delete the new file, so it would cause the directory # to be "changed" again. if test -d "$cur_dest" then rm "$cur_dest/sdfh" # Now the mtime has changed again - do a revert once more. $BINq revert $cur_dest fi Check "$cur_dest" "...+" "revert on copied" $BINq revert $cur_dest Check "$cur_dest" "...+" "revert*2 on copied" $BINq uncopy $cur_dest Check "$cur_dest" "N..." "revert, uncopy on copied" done if [[ "$failed" == "1" ]] then $ERROR "Tests failed." fi # Check for reverting a directory # TODO: test reverting a branch in a copied directory tree # $BINq unversion $dest dir=directory mkdir -m 0777 $dir $BINq ci -m2 -odelay=yes ls -lad $dir $dest > $logfile # Test reverting meta-data if [[ $UID != 0 ]] then $WARN "Cannot test owner/group reverting" else chown bin.bin $dir $dest fi # For files with umask 000 FSVS should show "maybe changed", not fail. chmod 000 $dest $dir $BINdflt st $BINq revert $dir $BINq revert $dest if ls -lad $dir $dest | diff -u - $logfile then $SUCCESS "Meta-data reverted" else $ERROR "Meta-data not reverted" fi fsvs-1.2.6/tests/040_path_display0000755000202400020240000000433011040023007015566 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/040.paths.log seq 1 10 > tree/b/2/file-x # Locally remove WC1, so that only WC matches export WC1=barbaz # Now test the filename display function T { path=$1 pat=$2 chg=${3:-.mC.} $BINdflt st $path -opath=$pathparm > $logfile if [[ `wc -l < $logfile` -ne 1 ]] then cat $logfile $ERROR "too many lines" fi if grep "^$chg \+[a-z0-9]\+ \+$pat" < $logfile > /dev/null then true else echo "path=$pathparm of $path ~~ $pat" echo ' '`cat $logfile` $ERROR "doesn't match $pat" fi } cd tree/b pathparm=parameter $INFO "testing '-opath=$pathparm'" T "" "2/file-x" T "." "./2/file-x" T ".." "../b/2/file-x" T "../a/.." "../a/../b/2/file-x" T "2" "2/file-x" T "2/file-x" "2/file-x" T "../a/../b/2/file-x" "../a/../b/2/file-x" for pathparm in environment full-environment do $INFO "testing '-opath=$pathparm'" T "" "\$WC/tree/b/2/file-x" T "." "\$WC/tree/b/2/file-x" T ".." "\$WC/tree/b/2/file-x" T "../a/.." "\$WC/tree/b/2/file-x" # variable has only part of name export WCBLUBB=$WC/tre T "" "\$WC/tree/b/2/file-x" export WCBLUBB=$WC/tree/b/2 T "../a/.." "\$WCBLUBB/file-x" WCBLUBB= done pathparm=wcroot $INFO "testing '-opath=$pathparm'" T "" "./tree/b/2/file-x" T "." "./tree/b/2/file-x" T "../b" "./tree/b/2/file-x" T "../a/.." "./tree/b/2/file-x" T "2" "./tree/b/2/file-x" pathparm=absolute $INFO "testing '-opath=$pathparm'" T "" "$WC/tree/b/2/file-x" T "." "$WC/tree/b/2/file-x" T "../b" "$WC/tree/b/2/file-x" T "../a/.." "$WC/tree/b/2/file-x" $INFO "checking for new entries" mkdir -p h/j pathparm=environment T h/j "\$WC/tree/b/h/j" N... WCBLUBB=$WC/tree/b/h T h/j "\$WC/tree/b/h/j" N... pathparm=full-environment T h/j "\$WC/tree/b/h/j" N... WCBLUBB=$WC/tree/b/h T h/j "\$WCBLUBB/j" N... $SUCCESS "Status output ok." $INFO "Testing diff" function D { how=$1 pat=$2 $BINdflt diff 2 -opath=$how > $logfile if [[ `grep -c "$pat" < $logfile` -ne 3 ]] then $ERROR "Diff output wrong" fi } for x in environment full-environment do D $x "\$WC/tree/b/2/file-x" WCBLUBB=$WC/tree/b D $x "\$WCBLUBB/2/file-x" WC=asf D $x "$WC/tree/b/2/file-x" done $SUCCESS "Diff output ok." fsvs-1.2.6/tests/061_groupings0000755000202400020240000001073211216125755015152 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/061.groups grp_name=MightyTest grp_dir=`$PATH2SPOOL $WC ^`/groups grp_file=$grp_dir/$grp_name cntr=1000 mkdir $grp_dir # We do the changes without waiting, so we need that for the new files to # be found. export FSVS_CHANGE_CHECK=dir # Don't report the changed root directory. export FSVS_FILTER=new if $BINdflt group 'group:,./**' then $ERROR "Empty group names are invalid." fi function SetGrpAndTest { status=$1 output_lines=$2 shift shift cntr=`expr $cntr + 1` perl -e 'map { print $_,"\n"; } @ARGV' "$@" > $grp_file cur_file=file-$cntr date > $cur_file echo group:$grp_name,mode:0:0 | $BINq group load sta=1 if $BINdflt st > $logfile 2>/dev/null then sta=0 ; fi if [[ "$status" != "$sta" ]] then cat $grp_file $ERROR_NB "Wrong status $sta (exp. $status) for" $ERROR "$@" fi if [[ `wc -l < $logfile` -ne "$output_lines" ]] then cat $logfile cat $grp_file $ERROR_NB "Expected $output_lines output for" $ERROR "$@" else $SUCCESS "'$@' ok" fi rm $grp_file true | $BINdflt group load > /dev/null $BINq ci -m1 > /dev/null } SetGrpAndTest 1 0 "invalid" SetGrpAndTest 1 0 " auto-prop" SetGrpAndTest 1 0 " auto-prop aa" SetGrpAndTest 0 1 "# a comment" SetGrpAndTest 0 1 "# comment" " # a further comment" " # still another" SetGrpAndTest 0 1 " auto-prop xxx test-prop" SetGrpAndTest 0 0 "ignore" SetGrpAndTest 0 0 " ignore " SetGrpAndTest 0 1 " # ignore " SetGrpAndTest 0 1 "take" SetGrpAndTest 0 1 "take" "auto-prop aa bb" SetGrpAndTest 1 0 "take" "# comment" "invalid" SetGrpAndTest 1 0 "take" "# comment" "ignore" # Test commit-pipe, too - this must be handled at commit time. $INFO "committing a property." file=hzuta pn="hAsdd:das_3:1" pd="äöüßµ§ property value" echo "group:$grp_name,./**" | $BINdflt group load cat > $grp_file < $file $BINdflt ci -m1 > $logfile $INFO "verify" got=`svn pg "$pn" "$REPURL/$file"` if [[ "$got" != "$pd" ]] then echo "got '$got'" $ERROR "Property not correctly committed?" else $SUCCESS "Property via group committed." fi # Check whether the commit pipe is really used. $WC2_UP_ST_COMPARE # Check whether the property is stored (and used!) locally, too. $INFO "verify local use" export -n FSVS_FILTER echo $$ - another test > $file $BINdflt ci -m1 > $logfile $WC2_UP_ST_COMPARE $INFO "added file" file=Added date > $file $BINq add $file > $logfile $BINq ci -m1 $file >> $logfile $WC2_UP_ST_COMPARE # Should be different - the repository data is in base64. if svn cat $REPURL/$file | diff -u - $file > $logfile then $ERROR "Property not used on added entry." fi function TP { $BINdflt prop-list -v "$1" > $logfile if [[ `wc -l < $logfile` -ne $2 ]] then cat $logfile $ERROR "Wrong number of properties" fi # The non-ASCII characters in the property value are shown as \xXX, so we # can't compare them directly. if false # [[ `sort < $logfile` != "$pn:"*"fsvs:commit-pipe:cat"* ]] then $ERROR "Properties wrong." fi } function S { $BINq ps fsvs:commit-pipe cat $file > $logfile } function D { $BINq pd fsvs:update-pipe $file >> $logfile } for entry in Set Del do $INFO "propset and propdel; first $entry" file=Prop-$entry date > $file # A new file has no properties; just the "no properties" line is printed. TP $WC/$file 1 opt="" if [[ "$entry" == "Set" ]] ; then S TP $WC/$file 3 D else D TP $WC/$file 2 S fi TP $WC/$file 2 $BINq ci -m1 $file >> $logfile TP $WC/$file 2 if ! svn cat $REPURL/$file | cmp - $file then $ERROR "Property not used on entry changed via prop-set and prop-del." fi $WC2_UP_ST_COMPARE TP $WC2/$file 2 done $INFO "Testing the difference between ignore and group." for g in "" "group:something," do for cmd in ignore group do st=1 if echo "$g./**" | $BINq $cmd load then st=0 fi should=0 if [[ "$g" == "" && "$cmd" == "group" ]] then # FSVS 1.1.18 compatibility mode should=0 fi if [[ "$st" != "$should" ]] then $ERROR "Wrong status for '$cmd' '$g'." fi # Test whether reading the group still works. $BINq st done done $SUCCESS "group/ignore difference ok." # Test that only alphanumeric group names are allowed for char in . - : _ \; / @ do if $BINq groups "group:G$char,./**" 2> $logfile then $ERROR "Group name 'G$char' shouldn't be allowed." fi done $SUCCESS "Group name restrictions seem ok." fsvs-1.2.6/tests/058_ignore_modematch0000755000202400020240000000373311243307501016441 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC $BINdflt delay # General ignore patterns are tested somewhere else. # Here the mode match is tested. logfile=$LOGDIR/058.log ign_file=`$PATH2SPOOL $WC Ign` if $BINdflt ignore m:0700:0070 > $logfile 2>&1 then $ERROR "Wrong match pattern (masks) shouldn't work." fi if $BINdflt ignore m-7:7 > $logfile 2>&1 then $ERROR "Wrong match pattern (syntax) shouldn't work." fi if $BINdflt ignore m:8:7 > $logfile 2>&1 then $ERROR "Wrong match pattern (octal) shouldn't work." fi if $BINdflt ignore m:a > $logfile 2>&1 then $ERROR "Wrong match pattern (non-numeric) shouldn't work." fi if $BINdflt ignore m:010000:00 > $logfile 2>&1 then $ERROR "Wrong match pattern (numbers) shouldn't work." fi if $BINdflt ignore m:-2:03 > $logfile 2>&1 then $ERROR "Wrong match pattern (negative numbers) shouldn't work." fi $SUCCESS "Invalid specifications rejected." function T { exp=$1 shift $INFO "testing $@." test -e $ign_file && rm $ign_file $BINdflt ignore "$@" $BINdflt st | grep -v dir > $logfile || true if [[ `wc -l < $logfile` -eq $exp ]] then $SUCCESS "Match mode $@ ok." else cat $logfile $ERROR "Expected $exp lines output for $@." fi } date > file chmod 0750 file T 0 './**' T 1 'take,./file' './**' T 0 'mode:0700:0700,./file' T 1 'mod:0700:0500,./**' T 0 mo:0050:0050 T 1 m:0050:0000 T 0 mode:0007:0000 # Test with a directory, too. # Commit everything. true | $BINq ignore load $BINq ci -m1 mkdir -m 0700 dir touch dir/file other echo 'mode:04:0' | $BINq ignore load $BINq ci -m1 > $logfile if [[ `$BINdflt log -v -rHEAD | grep other` == " other" ]] then $SUCCESS "Mode match on directory." else cat $logfile $BINdflt log -v -rHEAD $ERROR "Too much taken - directory should be ignored." fi # Now show the entries again. true | $BINq ignore load $BINq delay touch . if [[ `$BINdflt st | wc -l` -eq 3 ]] then $SUCCESS "Not stored in local list." else $ERROR "Stored in local list? Not seen as new" fi fsvs-1.2.6/tests/027_recursive0000755000202400020240000000725211100577704015146 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC LOGFILE=$LOGDIR/027.recursive mkdir -p 1/2/3/4/5/6 mkdir 1/2/3/4/5/d list="1/2/3/4/5/6 1/2/3/4 1/2" for f in $list do echo $f > $f-changed echo $f > $f-same done # commit some of the files $BINdflt ci -m 1 -q -o delay=yes $WC2_UP_ST_COMPARE for n in new changed do for f in $list do echo $n `date` > $f-$n done done # Change directories' mtime touch $list # remove directory rmdir 1/2/3/4/5/d function check { # Action, Parameter, Nr. of expected lines, Message msg="$4" # If we do a status, we'll have a logfile anyway. # If it's a revert, it might be nice to see whe WC before. if [[ "$1" == "status" ]] then rm $LOGFILE.status 2> /dev/null || true else $BINdflt status -C -C > $LOGFILE.status fi $BINdflt $1 $2 > $LOGFILE lns=`wc -l < $LOGFILE` if [[ $lns -ne "$3" ]] then $ERROR "$msg failed - wrong number of output lines (exp $3, got $lns)." fi shift 4 while [[ "$1" != "" ]] do if [[ `grep $1 < $LOGFILE | wc -l` -ne $2 ]] then $ERROR_NB "$msg" cat $LOGFILE $ERROR "expected $2*$1." fi shift 2 done $SUCCESS "$msg" } function status { check status "$@" } function revert { check revert "$@" } status "" 13 "Recursive status from root" changed 3 new 3 same 0 status "1/2/3" 9 "Partial recursive status" changed 2 new 2 same 0 status "1/2/3 -N" 4 "Partial non-recursive status" changed 1 new 1 same 0 status "1/2/3 -N -N" 1 "Partial atomic status" changed 0 new 0 same 0 status "1/2/3/4/5/6?* -N -N" 2 "Status with wildcard" changed 1 new 1 same 0 status "1/2/3/4/5/6?* -N -N -v" 3 "Verbose status with wildcard" changed 1 new 1 same 1 # Revert prints the revision it reverts to, so we get an extra line. revert "1/2/3/4/5/d" 2 "Revert of directory deletion" /d 1 changed 0 same 0 new 0 revert "1/2/3/4/5/6-changed" 2 "Single revert" changed 1 same 0 new 0 status "1/2/3/4/5/6-changed -v" 1 "Status after revert" changed 1 same 0 new 0 "-F ....." 1 status "1/2/3/4/5 -v" 6 "Status after revert" changed 1 same 1 new 1 /d 1 \ "-F ......" 3 "-F ....C." 1 "-F .t...." 1 echo $RANDOM > 1/2/3/4/5/6-changed revert "1/2/3/" 4 "Non-recursive revert" changed 1 same 0 new 0 ".m.?" 2 dir 2 status "1/2/3/4/5 -v" 6 "Status after revert" changed 1 same 1 new 1 /d 1 \ "-F ......" 2 "-F ....C." 1 "-F .t..C." 1 "-F .t...." 1 # The two changed entries get reverted, and their directories the mtime # reset. The directory 3 has already been done, so 1, 2 and 6 get reported # as possibly changed. # WHY is 6-new not found??? revert ". -R" 6 "Recursive revert" changed 2 same 0 new 0 mC 2 "^\.m\." 3 status "." 6 "Status after revert" new 3 changed 0 same 0 status ". -v" 17 "Verbose status after revert" new 3 changed 3 same 3 "-F ......" 11 # Don't try this at home! rm -r * revert ". -R -o delay=yes" 15 "Full revert" changed 3 same 3 /d 1 new 0 dir 8 5/6 3 # Now the status is empty - all is like it was. status "." 0 "Rec st after full revert" changed 0 new 0 same 0 "-F ......" 0 status ". -v" 14 "Rec verb st after full revert" changed 3 new 0 same 3 "-F ......" 14 /d 1 $WC2_UP_ST_COMPARE for f in $list do echo something-else > $f-changed echo something-else > $f-same done # Directories whose entries got changed don't get a new mtime! revert ". -R -o delay=yes" 7 "Full revert 2" changed 3 same 3 /d 0 new 0 ./1/2 6 # But that doesn't work for the root directory ... so we touch it, then we # know exactly what to test for. touch . # Now the status is (nearly) empty - all is like it was. status "." 1 "Rec st after full revert 2" changed 0 new 0 same 0 "-F ...." 0 "-F .m.." 1 status ". -v" 14 "Rec verb st after full revert 2" changed 3 new 0 same 3 "-F ....." 13 /d 1 "-F .t...." 1 $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/014_basic_tests0000755000202400020240000000350411142755456015442 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC tmp=$LOGDIR/014.fsvs-test.tmp sleep 1 for filename in new-file1 ' ' new-file2 $STG_UTF8 $STG_LOC do if [[ `$BINdflt st | wc -l` -ne 0 ]] then $BINdflt st $ERROR "status gave unexpected lines" fi touch "$filename" if [[ `$BINdflt st | wc -l` -ne 2 ]] then $ERROR_NB "Expect: . changed, '$filename' as new" $BINdflt st $ERROR "got something else." fi echo " ci" $BINdflt ci -m "$filename-$RANDOM" -o delay=yes . > $tmp if [[ `grep -F "N... 0 ./$filename" < $tmp > /dev/null` ]] then cat $tmp $ERROR "expected '$filename' as new" else $SUCCESS "'$filename' is new" fi if [[ `tail -1 $tmp` == "committed revision"* ]] then $SUCCESS "found revision line" else cat $tmp $ERROR "expected 'committed revision'" fi $WC2_UP_ST_COMPARE done echo "delete a file" rm $filename if [[ `$BINdflt st $filename | wc -l` -ne 1 ]] then $ERROR "$filename should be shown as removed!" fi if [[ `$BINdflt st | wc -l` -ne 2 ]] then echo "Expect: . changed, $filename as deleted" $BINdflt st $ERROR "Doesn't match." fi if $BINdflt st does-not-exist-$$.$RANDOM.$RANDOM > $tmp 2>&1 then $ERROR "status doesn't stop on undefined entries" fi echo " ci" $BINdflt ci -m delete . > $tmp if [[ `grep -F "D... 0 ./$filename" < $tmp > /dev/null` ]] then cat $tmp $ERROR "expected $filename as deleted" else $SUCCESS "$filename is deleted" fi if [[ `tail -1 $tmp` == "committed revision"* ]] then $SUCCESS "found revision line" else cat $tmp $ERROR "expected 'committed revision'" fi $WC2_UP_ST_COMPARE # Test an entry with special (ASCII) characters. $INFO "Testing with a strange filename." #fn=`echo -e " :,;#*+?!'\t$\g%\\g()\n=\r[]\f{}"` fn=" :,;#*+?!'\t$\g%\\g()\n=\r[]\f{}" echo "$fn" > "$fn" $BINq ci -m1 $WC2_UP_ST_COMPARE fsvs-1.2.6/tests/compare0000755000202400020240000000247111114707007014167 0ustar marekmarek#!/usr/bin/perl use Getopt::Std; getopts("dmx:h"); $s=shift; $d=shift; die <<'EOF' Need two directories to compare, and maybe -d ignore dir-mtime -m expect mismatch in directory listing -x ignore changes that match this pattern EOF if $opt_h || !($s && $d); # find /tmp/fsvs-test-1000/wc-1/ -printf "% y%04m %7s %5G %5U %T+ %P\0\t%l\0\n" # strace -o /tmp/asdga -f -tt $cmd=qq'LANG=C rsync -ain --delete "$s" "$d" -c'; @changes=(); for (`$cmd`) { # Ignore empty lines. next if m(^\s*$); # ignore root directory next if m(\.d......... \./\s*$); # ignore mtime of links next if m(\.L\.\.t\.\.\.\.\.\. .* -> ); # and directories next if $opt_d && m(\.d\.\.t\.\.\.\.\.\. .*); next if $opt_x && m($opt_x)o; # everything else is a change. push @changes, $_; #.L..t...... typechange/device-symlink -> device-1 #>fc........ typechange/dir-file #.L..t...... typechange/file-symlink -> file-1 #cL+++++++++ typechange/symlink-symlink -> symlink-1 } $SIG{"__DIE__"} = sub { print STDERR '$ ',$cmd,"\n",@changes,@_; exit 1; }; if ($opt_m && @changes) { print "----- expected differences\n"; # pass output for further processing print @changes; exit 0; } die "----- Differences were found\n" if @changes; print "----- comparison of directory gave no differences\n"; exit 0; fsvs-1.2.6/tests/029_properties0000755000202400020240000000455111012747541015334 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC file=empty-file propname=blablubli propvalue=foobarbaz $BINdflt ps "$propname" "$propvalue" "$file" $BINq ci -m1 $WC2_UP_ST_COMPARE propvalread=`$BINdflt pg "$propname" "$WC2/$file"` if [[ "$propvalread" == "$propvalue" ]] then $SUCCESS "Property successfully read back" else $ERROR "Property not read!" fi if [[ `$BINdflt pl -v $file` == "$propname=$propvalue" ]] then $SUCCESS "Property successfully listed" else $ERROR "Property not listed!" fi # empty property $BINdflt ps "$propname" "" "$file" $BINq ci -m1 $WC2_UP_ST_COMPARE if [[ `$BINdflt pl -v $WC2/$file` == "$propname=" ]] then $SUCCESS "Property successfully emptied" else $ERROR "Property not emptied!" fi # delete property $BINdflt pd "$propname" "$file" propvalread=`$BINdflt pg "$propname" "$file"` if [[ "$propvalread" == "" ]] then $SUCCESS "Property deleted" else $ERROR "Deleted property still there" fi $BINq ci -m1 propvalread=`$BINdflt pg "$propname" "$file"` if [[ "$propvalread" == "" ]] then $SUCCESS "Property still deleted after commit" else $ERROR "Deleted property back after commit" fi $WC2_UP_ST_COMPARE if [[ `$BINdflt pl $WC2/$file` == "$file has no properties." ]] then $SUCCESS "Property successfully removed" else $ERROR "Property not removed!" fi if $BINq ps a b does-not-exist 2> /dev/null then $ERROR "Sets properties for unknown entries" else $SUCCESS "Rejects unknown entries" fi file=abc.$RANDOM function CheckStatus { exp=$1 verb=$2 # We need -C for the test with only changed mtime, but again with 0 # bytes. if [[ `$BINdflt st $file -C | cut -c1-5` != "$exp" ]] then $BINdflt st $file $ERROR "Wrong status output - expected '$exp'." fi if [[ `$BINdflt st $file -v -C | cut -c1-7` != "$verb" ]] then $BINdflt st $file -v $ERROR "Wrong verbose status output - expected '$verb'." fi } touch -d 12:00 $file CheckStatus "N... " "N..... " $BINq ps a b $file CheckStatus "nP.. " "n..P.. " $BINq ci -m2 CheckStatus "" "...... " $BINq ps a c $file CheckStatus ".P.. " "...P.. " echo aaaa > $file touch -d 12:00 $file CheckStatus ".PC. " "...PC. " rm $file touch $file CheckStatus ".m.. " ".t.P.. " $SUCCESS "All status outputs are ok." # What happens if we change a property on a deleted entry? The property # change would simply get lost - we only send the delete to the repository. fsvs-1.2.6/tests/056_all_removed0000755000202400020240000000113011073666560015427 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/056.all_removed # Testing the all_removed option. mkdir -p 1/2/3/4/5/6/7 $BINq ci -m1 rm -r 1/2 for FSVS_DIR_SORT in yes no do $INFO "testing all_removed for dir_sort=$FSVS_DIR_SORT." $BINdflt st -o all_removed=yes > $logfile if [[ `wc -l < $logfile` != 7 ]] then cat $logfile $ERROR "all_removed=yes doesn't work." fi $BINdflt st -o all_removed=no > $logfile if [[ `wc -l < $logfile` != 2 ]] then cat $logfile $ERROR "all_removed=no doesn't work." fi done $SUCCESS "all_removed seems ok." fsvs-1.2.6/tests/033_many_symlinks0000755000202400020240000000210511073666630016027 0ustar marekmarek#!/bin/bash set -e $PREPARE_CLEAN > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/033.log COUNT=1000 DIR=subdir # We set some ulimit here, so that we know see whether RAM is eaten or not. # 640k is enough for everyone! No, not here ... limit=32768 # On 64bit the libraries need much more (virtual) memory; so we don't limit # that here. ulimit -S -d $limit -m $limit -s $limit ulimit -H -d $limit -m $limit -s $limit mkdir $DIR $BINq ci -m1 $INFO "Creating symlinks" perl -e ' ($nr, $dir)=@ARGV; for(1 .. $nr) { symlink("./././././../$dir/../$dir/../$dir", sprintf("%s/%05d", $dir,$_) ) || die $!; } ' $COUNT $DIR $INFO "Looking for them." # Generating is so fast that the directory might stay in the same second. found=`$BINdflt st -o change_check=dir | wc -l` # The directory gets reported, too. if [[ $found -eq `echo $COUNT + 1 | bc` ]] then $SUCCESS "fsvs found all $COUNT changed entries." else $ERROR "fsvs found $found instead of $COUNT entries!" fi $INFO "Commit ..." $BINq ci -m many $INFO "Update ..." $WC2_UP_ST_COMPARE $INFO "sync-repos" $BINq sync-repos fsvs-1.2.6/tests/Makefile.in0000644000202400020240000001406611256603070014665 0ustar marekmarek### MUST BE EDITED AS UTF-8! #### # # BINARY needs to be set to the program to test. # # CHECKER can eg. be strace or valgrind # if VERBOSE is set, debug and verbose level is turned on. # TEST_LIST can be a list of test scripts to run. # TL_FROM can be a pattern from which to (re-)start the scripts. # ifndef BINARY $(error Which program should I test?) endif TEST_PROG_DIR := $(shell pwd) VALGRIND := valgrind --trace-children=no -v --error-exitcode=1 --log-file=/tmp/valgrind.out --suppressions=$(TEST_PROG_DIR)/valgrind-suppressions.supp CALLGRIND := valgrind -q --tool=callgrind --collect-systime=yes --collect-jumps=yes --dump-instr=yes --callgrind-out-file=/tmp/valgrind.%p.out MEMCHECK := valgrind --leak-check=full --show-reachable=yes --num-callers=15 --log-file=/tmp/valgrind-%p.out --time-stamp=yes MEMCHECK_G:= valgrind --trace-children=no -v --error-exitcode=1 --log-file=/tmp/valgrind.%p.out --suppressions=$(TEST_PROG_DIR)/valgrind-suppressions.supp --gen-suppressions=all export VALGRIND CALLGRIND MEMCHECK ifeq ($(CHECKER),valgrind) override CHECKER := $(VALGRIND) endif ifeq ($(CHECKER),callgrind) override CHECKER := $(CALLGRIND) endif ifeq ($(CHECKER),memcheck) override CHECKER := $(MEMCHECK) endif ifeq ($(CHECKER),memcheck_g) override CHECKER := $(MEMCHECK_G) endif # if $BINARY starts with /, it's an absolute path; take it. # else prepend the current path. BIN_FULLPATH := $(if $(BINARY:/%=),$(shell pwd)/$(BINARY),$(BINARY)) # Binary with no special parameters BINdflt := $(CHECKER) $(BIN_FULLPATH) # Binary quiet BINq := $(BINdflt) -q # Binary, with optional verbosity (VERBOSE=1) or quiet BIN := $(BINdflt) ifdef VERBOSE BIN += -d -v BASH_VERBOSE := -x else BIN += -q BASH_VERBOSE := endif # options from configure opt_DEBUG = @ENABLE_DEBUG@ export opt_DEBUG BASH_VERBOSE TEST_LIST ifdef TEST_LIST # only run the specified tests ... else # A common problem (at least for me) is that I write # TL_FROM=018* # analogous to # TEST_LIST=018* # But the one thing is a grep, the other a shell pattern ... # So a * at the end gets removed. TEST_LIST := $(shell cd $(TEST_PROG_DIR) && ls ???_* | sort | grep -A 2000 "$(TL_FROM:%*=%)" ) endif ifeq ($(PROTOCOL), svn+ssh) override PROTOCOL=svn+ssh://localhost endif ifeq ($(RANDOM_ORDER), 1) TEST_LIST := $(shell perl -e 'srand(); while (@ARGV) { print splice(@ARGV, rand(@ARGV), 1),"\n"; } ' $(TEST_LIST)) endif # For use by the locale-dependent tests. STG_UTF8:=xxµxWörstCäseTäßt # This gives eg. for de_AT "Jän", but in the locale encoding (eg. 8859-1) STG_LOC:=$(shell date +%b -d2001-01-27) # We don't use the 1st - depending on the timezone settings this might give # us December. export STG_UTF8 STG_LOC UID := $(shell id -u) TESTBASEx ?= /tmp TESTBASE := $(TESTBASEx)/fsvs-test-$(UID) REP := $(TESTBASE)/repos LOGDIR := $(TESTBASE)/log PROTOCOL ?= file:// REPURLBASE:= $(PROTOCOL)$(REP) REPSUBDIR := trunk REPURL := $(REPURLBASE)/$(REPSUBDIR) WCBASE := $(TESTBASE)/wc- # Please note that if your test script uses more than the normal # two working copies only WC1 (=WC) and WC2 are defined WC_COUNT ?= 2 WC := $(WCBASE)1 WC2 := $(WCBASE)2 WC1 := $(WC) DFLT_REPO := $(TESTBASE)/default-repos.dump export REP REPURL REPSUBDIR REPURLBASE WC WC1 WC2 DFLT_REPO WC_COUNT WCBASE export BIN BINdflt BINq TEST_PROG_DIR LOGDIR BIN_FULLPATH PREPARE_REPOS := $(MAKE) -s -C $(TEST_PROG_DIR) prepare_repos PREPARE_WC1 := $(MAKE) -s -C $(TEST_PROG_DIR) prepare_wc1 PREPARE_WC2 := $(MAKE) -s -C $(TEST_PROG_DIR) prepare_wc2 PREPARE_DEFAULT := $(MAKE) -s -C $(TEST_PROG_DIR) prepare_repos PREPARE_CLEAN := $(MAKE) -s -C $(TEST_PROG_DIR) prepare_clean INCLUDE_FUNCS := . $(TEST_PROG_DIR)/test_functions export PREPARE_REPOS PREPARE_WC1 PREPARE_WC2 PREPARE_CLEAN PREPARE_DEFAULT export INCLUDE_FUNCS TESTBASE WC2_UP_ST_COMPARE := $(TEST_PROG_DIR)/up_st_cmp export WC2_UP_ST_COMPARE COMPONENT_SCRIPT := $(TEST_PROG_DIR)/component-test.pl export COMPONENT_SCRIPT FSVS_WAA := $(TESTBASE)/waa FSVS_CONF := $(TESTBASE)/conf export FSVS_WAA FSVS_CONF all: $(TESTBASE) $(FSVS_WAA) $(FSVS_CONF) $(DFLT_REPO) run_tests $(TESTBASE): test -d $(TESTBASE) || mkdir -p $(TESTBASE) $(FSVS_WAA): test -d $(FSVS_WAA) || mkdir -p $(FSVS_WAA) $(FSVS_CONF): test -d $(FSVS_CONF) || mkdir -p $(FSVS_CONF) SANITIZE := perl -pe 's/([^\x20-\x7e\r\n]+)/sprintf("\\(%s)", unpack("H*", $$1))/eg;' diag: @echo "testbase: $(TESTBASE)" @echo "tests : $(TEST_LIST)" @echo "binary : $(BIN)" @echo "checker : $(CHECKER)" @echo "verbose : $(VERBOSE)" @echo "def.repo: $(DFLT_REPO)" @echo "waa : $(FSVS_WAA)" @echo "conf : $(FSVS_CONF)" @echo "stg_utf8: $(STG_UTF8)" | $(SANITIZE) @echo "stg_loc : $(STG_LOC)" | $(SANITIZE) locale_strings: @echo $(STG_UTF8) @echo $(STG_LOC) .PHONY: locale_strings diag $(DFLT_REPO): $(TEST_PROG_DIR)/001_init_dir @echo Preparing default repository. @$< @svnadmin dump -q $(REP) > $@ prepare_empty: @echo Preparing clean repository ... test -d $(REP) && rm -rf $(REP) || true rm -rf $(FSVS_CONF) $(FSVS_WAA) || true $(MAKE) $(FSVS_CONF) $(FSVS_WAA) || true test -d $(LOGDIR) || mkdir $(LOGDIR) svnadmin create $(REP) echo '' > $(FSVS_CONF)/config prepare_clean: $(MAKE) -s prepare_empty svn mkdir -m "mkdir trunk" $(REPURL) $(MAKE) -s CMD="ci -m x" prepare_wcs WC_COUNT=$(WC_COUNT) $(BIN_FULLPATH) delay $(WC1) prepare_repos: @echo Loading repository ... $(MAKE) -s prepare_empty svnadmin load -q $(REP) < $(DFLT_REPO) $(MAKE) -s CMD="up" prepare_wcs WC_COUNT=$(WC_COUNT) $(BIN_FULLPATH) delay $(WC1) prepare_wcs: for i in `seq 1 $(WC_COUNT)` ; do $(MAKE) prepare_wc "CMD=$(CMD)" _WC=$(WCBASE)$$i ; done .PHONY: prepare_repos prepare_clean prepare_empty ifdef _WC prepare_wc: @echo Preparing $(_WC) ... test -d $(_WC) && rm -rf $(_WC) || true mkdir $(_WC) rm $(shell $(TEST_PROG_DIR)/path2spool $(_WC) "")/* 2> /dev/null || true rm $(shell $(TEST_PROG_DIR)/path2spool $(_WC) "^")/* 2> /dev/null || true cd $(_WC) && echo N:url,$(REPURL) | $(BINq) urls load ifdef CMD cd $(_WC) && $(BINq) $(CMD) endif endif run_tests: @echo Running tests $(TEST_LIST) ... @echo '' > $(FSVS_CONF)/config @$(TEST_PROG_DIR)/run-tests fsvs-1.2.6/tests/066_verbosity0000755000202400020240000000417611251252110015156 0ustar marekmarek#!/bin/bash set -e $PREPARE_DEFAULT > /dev/null $INCLUDE_FUNCS cd $WC logfile=$LOGDIR/066.verbosity newfn=some-file.$$ echo LOL > $newfn # Must be more than 6 characters, so that we see whether the column width # estimation works. groupname=roflLongNameHereUsed grp_dir=`$PATH2SPOOL $WC ^`/groups mkdir $grp_dir echo take > $grp_dir/$groupname $BINq group "group:$groupname,./**" function C { opt="$1" pat="$2" $BINdflt st $opt > $logfile # The shell doesn't want a variable as pattern. if bash -c '[[ `cat '"$logfile"'` == '"$pat"' ]]' then $SUCCESS "got $opt=$pat" else cat $logfile $ERROR "expected '$pat' for '$opt'" fi } C "-q" '""' C "tree" '""' C "tree -v -N -N" '"...... "*"dir tree"' C "$newfn" "'N... 4 $newfn'" C "$newfn -o verbose=none,changes" "'N... '" C "$newfn -o verbose=none,changes,time" "'N..... '" C "$newfn -o verbose=none,path" "'$newfn'" C "$newfn -o verbose=none,name" "'$newfn'" C "$newfn -o verbose=none,size" "' 4 '" C "$newfn -o verbose=none,default" "'N... 4 $newfn'" C "$newfn -o verbose=none,group" "'$groupname '" # Entries selected on the command line (here with '*') are always given? #C "* -N -f default -o verbose=none,group" "'$groupname '" C "$newfn -o verbose=none,copyfrom" "''" C "$newfn -o verbose=none,time" "''" C "$newfn -o verbose=quiet" "''" C "$newfn -q" "''" C "$newfn -o verbose=none,stacktrace" "''" from=empty-file $BINq cp $from $newfn C "$newfn -o verbose=none,copyfrom" "' (copied from $REPURL/$from)'" $BINq ci -m1 > $logfile rev=`grep "revision " $logfile | tail -1 | cut -f2 -d" " | cut -f1 -d" "` C "$newfn -o verbose=none,urls" "' $REPURL/$newfn'" C "$newfn -o verbose=none,copyfrom" "''" for a in '-q -q' '-o verbose=veryquiet' do date > $RANDOM $BINdflt ci -m1 $a > $logfile if [[ `wc -l < $logfile` -eq 0 ]] then $SUCCESS "No output for $a." else $ERROR "Expected quietness for $a." fi done lines=`$BINdflt st -v | wc -l` all=`$BINdflt st -o verbose=all | tee $logfile | wc -l` echo "Got $all, wanted $lines" if [[ $lines -eq $all ]] then $SUCCESS "'-o verbose=all' behaves" else $ERROR "Not enough output for '-o verbose=all'" fi fsvs-1.2.6/LICENSE0000644000202400020240000010451310742435730012465 0ustar marekmarek GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read .