dovecot-2.2.9/0002755000175000017500000000000012244505323010172 500000000000000dovecot-2.2.9/compile0000755000175000017500000001624512217156113011475 00000000000000#! /bin/sh # Wrapper for compilers which do not understand '-c -o'. scriptversion=2012-10-14.11; # UTC # Copyright (C) 1999-2013 Free Software Foundation, Inc. # Written by Tom Tromey . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . nl=' ' # We need space, tab and new line, in precisely that order. Quoting is # there to prevent tools from complaining about whitespace usage. IFS=" "" $nl" file_conv= # func_file_conv build_file lazy # Convert a $build file to $host form and store it in $file # Currently only supports Windows hosts. If the determined conversion # type is listed in (the comma separated) LAZY, no conversion will # take place. func_file_conv () { file=$1 case $file in / | /[!/]*) # absolute file, and not a UNC file if test -z "$file_conv"; then # lazily determine how to convert abs files case `uname -s` in MINGW*) file_conv=mingw ;; CYGWIN*) file_conv=cygwin ;; *) file_conv=wine ;; esac fi case $file_conv/,$2, in *,$file_conv,*) ;; mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; cygwin/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) file=`winepath -w "$file" || echo "$file"` ;; esac ;; esac } # func_cl_dashL linkdir # Make cl look for libraries in LINKDIR func_cl_dashL () { func_file_conv "$1" if test -z "$lib_path"; then lib_path=$file else lib_path="$lib_path;$file" fi linker_opts="$linker_opts -LIBPATH:$file" } # func_cl_dashl library # Do a library search-path lookup for cl func_cl_dashl () { lib=$1 found=no save_IFS=$IFS IFS=';' for dir in $lib_path $LIB do IFS=$save_IFS if $shared && test -f "$dir/$lib.dll.lib"; then found=yes lib=$dir/$lib.dll.lib break fi if test -f "$dir/$lib.lib"; then found=yes lib=$dir/$lib.lib break fi if test -f "$dir/lib$lib.a"; then found=yes lib=$dir/lib$lib.a break fi done IFS=$save_IFS if test "$found" != yes; then lib=$lib.lib fi } # func_cl_wrapper cl arg... # Adjust compile command to suit cl func_cl_wrapper () { # Assume a capable shell lib_path= shared=: linker_opts= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. eat=1 case $2 in *.o | *.[oO][bB][jJ]) func_file_conv "$2" set x "$@" -Fo"$file" shift ;; *) func_file_conv "$2" set x "$@" -Fe"$file" shift ;; esac ;; -I) eat=1 func_file_conv "$2" mingw set x "$@" -I"$file" shift ;; -I*) func_file_conv "${1#-I}" mingw set x "$@" -I"$file" shift ;; -l) eat=1 func_cl_dashl "$2" set x "$@" "$lib" shift ;; -l*) func_cl_dashl "${1#-l}" set x "$@" "$lib" shift ;; -L) eat=1 func_cl_dashL "$2" ;; -L*) func_cl_dashL "${1#-L}" ;; -static) shared=false ;; -Wl,*) arg=${1#-Wl,} save_ifs="$IFS"; IFS=',' for flag in $arg; do IFS="$save_ifs" linker_opts="$linker_opts $flag" done IFS="$save_ifs" ;; -Xlinker) eat=1 linker_opts="$linker_opts $2" ;; -*) set x "$@" "$1" shift ;; *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) func_file_conv "$1" set x "$@" -Tp"$file" shift ;; *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) func_file_conv "$1" mingw set x "$@" "$file" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -n "$linker_opts"; then linker_opts="-link$linker_opts" fi exec "$@" $linker_opts exit 1 } eat= case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: compile [--help] [--version] PROGRAM [ARGS] Wrapper for compilers which do not understand '-c -o'. Remove '-o dest.o' from ARGS, run PROGRAM with the remaining arguments, and rename the output as expected. If you are trying to build a whole package this is not the right script to run: please start by reading the file 'INSTALL'. Report bugs to . EOF exit $? ;; -v | --v*) echo "compile $scriptversion" exit $? ;; cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac ofile= cfile= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. # So we strip '-o arg' only if arg is an object. eat=1 case $2 in *.o | *.obj) ofile=$2 ;; *) set x "$@" -o "$2" shift ;; esac ;; *.c) cfile=$1 set x "$@" "$1" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -z "$ofile" || test -z "$cfile"; then # If no '-o' option was seen then we might have been invoked from a # pattern rule where we don't need one. That is ok -- this is a # normal compilation that the losing compiler can handle. If no # '.c' file was seen then we are probably linking. That is also # ok. exec "$@" fi # Name of file we expect compiler to create. cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` # Create the lock directory. # Note: use '[/\\:.-]' here to ensure that we don't use the same name # that we are using for the .o file. Also, base the name on the expected # object file name, since that is what matters with a parallel build. lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d while true; do if mkdir "$lockdir" >/dev/null 2>&1; then break fi sleep 1 done # FIXME: race condition here if user kills between mkdir and trap. trap "rmdir '$lockdir'; exit 1" 1 2 15 # Run the compile. "$@" ret=$? if test -f "$cofile"; then test "$cofile" = "$ofile" || mv "$cofile" "$ofile" elif test -f "${cofile}bj"; then test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: dovecot-2.2.9/COPYING.MIT0000644000175000017500000000177712244400443011604 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dovecot-2.2.9/config.rpath0000755000175000017500000004443512244505154012434 00000000000000#! /bin/sh # Output a system dependent set of variables, describing how to set the # run time search path of shared libraries in an executable. # # Copyright 1996-2013 Free Software Foundation, Inc. # Taken from GNU libtool, 2001 # Originally by Gordon Matzigkeit , 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # # The first argument passed to this file is the canonical host specification, # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld # should be set by the caller. # # The set of defined variables is at the end of this script. # Known limitations: # - On IRIX 6.5 with CC="cc", the run time search patch must not be longer # than 256 bytes, otherwise the compiler driver will dump core. The only # known workaround is to choose shorter directory names for the build # directory and/or the installation directory. # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a shrext=.so host="$1" host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` # Code taken from libtool.m4's _LT_CC_BASENAME. for cc_temp in $CC""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'` # Code taken from libtool.m4's _LT_COMPILER_PIC. wl= if test "$GCC" = yes; then wl='-Wl,' else case "$host_os" in aix*) wl='-Wl,' ;; mingw* | cygwin* | pw32* | os2* | cegcc*) ;; hpux9* | hpux10* | hpux11*) wl='-Wl,' ;; irix5* | irix6* | nonstopux*) wl='-Wl,' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in ecc*) wl='-Wl,' ;; icc* | ifort*) wl='-Wl,' ;; lf95*) wl='-Wl,' ;; nagfor*) wl='-Wl,-Wl,,' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) wl='-Wl,' ;; ccc*) wl='-Wl,' ;; xl* | bgxl* | bgf* | mpixl*) wl='-Wl,' ;; como) wl='-lopt=' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ F* | *Sun*Fortran*) wl= ;; *Sun\ C*) wl='-Wl,' ;; esac ;; esac ;; newsos6) ;; *nto* | *qnx*) ;; osf3* | osf4* | osf5*) wl='-Wl,' ;; rdos*) ;; solaris*) case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) wl='-Qoption ld ' ;; *) wl='-Wl,' ;; esac ;; sunos4*) wl='-Qoption ld ' ;; sysv4 | sysv4.2uw2* | sysv4.3*) wl='-Wl,' ;; sysv4*MP*) ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) wl='-Wl,' ;; unicos*) wl='-Wl,' ;; uts4*) ;; esac fi # Code taken from libtool.m4's _LT_LINKER_SHLIBS. hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_direct=no hardcode_minus_L=no case "$host_os" in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. # Unlike libtool, we use -rpath here, not --rpath, since the documented # option of GNU ld is called -rpath, not --rpath. hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' case "$host_os" in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no fi ;; amigaos*) case "$host_cpu" in powerpc) ;; m68k) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then : else ld_shlibs=no fi ;; haiku*) ;; interix[3-9]*) hardcode_direct=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; netbsd*) ;; solaris*) if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then ld_shlibs=no elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' else ld_shlibs=no fi ;; esac ;; sunos4*) hardcode_direct=yes ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then hardcode_libdir_flag_spec= fi else case "$host_os" in aix3*) # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac fi hardcode_direct=yes hardcode_libdir_separator=':' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && \ strings "$collect2name" | grep resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac fi # Begin _LT_AC_SYS_LIBPATH_AIX. echo 'int main () { return 0; }' > conftest.c ${CC} ${LDFLAGS} conftest.c -o conftest aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` fi if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib" fi rm -f conftest.c conftest # End _LT_AC_SYS_LIBPATH_AIX. if test "$aix_use_runtimelinking" = yes; then hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' else hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" fi fi ;; amigaos*) case "$host_cpu" in powerpc) ;; m68k) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' libext=lib ;; darwin* | rhapsody*) hardcode_direct=no if { case $cc_basename in ifort*) true;; *) test "$GCC" = yes;; esac; }; then : else ld_shlibs=no fi ;; dgux*) hardcode_libdir_flag_spec='-L$libdir' ;; freebsd2.2*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; freebsd2*) hardcode_direct=yes hardcode_minus_L=yes ;; freebsd* | dragonfly*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; hpux9*) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; hpux10*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no ;; *) hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; netbsd*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; newsos6) hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then hardcode_libdir_flag_spec='${wl}-rpath,$libdir' else case "$host_os" in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) hardcode_libdir_flag_spec='-R$libdir' ;; *) hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; osf3*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) if test "$GCC" = yes; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else # Both cc and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi hardcode_libdir_separator=: ;; solaris*) hardcode_libdir_flag_spec='-R$libdir' ;; sunos4*) hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes ;; sysv4) case $host_vendor in sni) hardcode_direct=yes # is this really true??? ;; siemens) hardcode_direct=no ;; motorola) hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac ;; sysv4.3*) ;; sysv4*MP*) if test -d /usr/nec; then ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) ;; sysv5* | sco3.2v5* | sco5v6*) hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' hardcode_libdir_separator=':' ;; uts4*) hardcode_libdir_flag_spec='-L$libdir' ;; *) ld_shlibs=no ;; esac fi # Check dynamic linker characteristics # Code taken from libtool.m4's _LT_SYS_DYNAMIC_LINKER. # Unlike libtool.m4, here we don't care about _all_ names of the library, but # only about the one the linker finds when passed -lNAME. This is the last # element of library_names_spec in libtool.m4, or possibly two of them if the # linker has special search rules. library_names_spec= # the last element of library_names_spec in libtool.m4 libname_spec='lib$name' case "$host_os" in aix3*) library_names_spec='$libname.a' ;; aix[4-9]*) library_names_spec='$libname$shrext' ;; amigaos*) case "$host_cpu" in powerpc*) library_names_spec='$libname$shrext' ;; m68k) library_names_spec='$libname.a' ;; esac ;; beos*) library_names_spec='$libname$shrext' ;; bsdi[45]*) library_names_spec='$libname$shrext' ;; cygwin* | mingw* | pw32* | cegcc*) shrext=.dll library_names_spec='$libname.dll.a $libname.lib' ;; darwin* | rhapsody*) shrext=.dylib library_names_spec='$libname$shrext' ;; dgux*) library_names_spec='$libname$shrext' ;; freebsd* | dragonfly*) case "$host_os" in freebsd[123]*) library_names_spec='$libname$shrext$versuffix' ;; *) library_names_spec='$libname$shrext' ;; esac ;; gnu*) library_names_spec='$libname$shrext' ;; haiku*) library_names_spec='$libname$shrext' ;; hpux9* | hpux10* | hpux11*) case $host_cpu in ia64*) shrext=.so ;; hppa*64*) shrext=.sl ;; *) shrext=.sl ;; esac library_names_spec='$libname$shrext' ;; interix[3-9]*) library_names_spec='$libname$shrext' ;; irix5* | irix6* | nonstopux*) library_names_spec='$libname$shrext' case "$host_os" in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; *) libsuff= shlibsuff= ;; esac ;; esac ;; linux*oldld* | linux*aout* | linux*coff*) ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) library_names_spec='$libname$shrext' ;; knetbsd*-gnu) library_names_spec='$libname$shrext' ;; netbsd*) library_names_spec='$libname$shrext' ;; newsos6) library_names_spec='$libname$shrext' ;; *nto* | *qnx*) library_names_spec='$libname$shrext' ;; openbsd*) library_names_spec='$libname$shrext$versuffix' ;; os2*) libname_spec='$name' shrext=.dll library_names_spec='$libname.a' ;; osf3* | osf4* | osf5*) library_names_spec='$libname$shrext' ;; rdos*) ;; solaris*) library_names_spec='$libname$shrext' ;; sunos4*) library_names_spec='$libname$shrext$versuffix' ;; sysv4 | sysv4.3*) library_names_spec='$libname$shrext' ;; sysv4*MP*) library_names_spec='$libname$shrext' ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) library_names_spec='$libname$shrext' ;; tpf*) library_names_spec='$libname$shrext' ;; uts4*) library_names_spec='$libname$shrext' ;; esac sed_quote_subst='s/\(["`$\\]\)/\\\1/g' escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` shlibext=`echo "$shrext" | sed -e 's,^\.,,'` escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' < Solar Designer (src/lib/md5.c, src/auth/passdb-pam.c) Andrey Panin (src/auth/mech-apop.c, src/auth/mech-login.c, src/lib-ntlm/*, src/auth/mech-ntlm.c, src/auth/mech-rpa.c) Joshua Goodall (src/auth/mech-cram-md5.c, src/doveadm/doveadm-pw.c) Jakob Hirsch (src/lib-sql/driver-sqlite.c) Jelmer Vernooij (src/auth/mech-gssapi.c) Vaclav Haisman (src/lib/ioloop-kqueue.c, src/lib/ioloop-notify-kqueue.c) Portions Copyright (c) 2008 Apple Inc. All rights reserved. Grepping 'Patch by' from ChangeLog shows up more people. dovecot-2.2.9/install-sh0000755000175000017500000003325612017213063012120 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2011-01-19.21; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 # Protect names problematic for `test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for `test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for `test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: dovecot-2.2.9/config.guess0000755000175000017500000012743212017213063012434 00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012 Free Software Foundation, Inc. timestamp='2012-02-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Per Bothner. Please send patches (context # diff format) to and include a ChangeLog # entry. # # This script attempts to guess a canonical system name similar to # config.sub. If it succeeds, it prints the system name on stdout, and # exits with 0. Otherwise, it exits with 1. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently, or will in the future. case "${UNAME_MACHINE_ARCH}" in arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE="alpha" ;; "EV4.5 (21064)") UNAME_MACHINE="alpha" ;; "LCA4 (21066/21068)") UNAME_MACHINE="alpha" ;; "EV5 (21164)") UNAME_MACHINE="alphaev5" ;; "EV5.6 (21164A)") UNAME_MACHINE="alphaev56" ;; "EV5.6 (21164PC)") UNAME_MACHINE="alphapca56" ;; "EV5.7 (21164PC)") UNAME_MACHINE="alphapca57" ;; "EV6 (21264)") UNAME_MACHINE="alphaev6" ;; "EV6.7 (21264A)") UNAME_MACHINE="alphaev67" ;; "EV6.8CB (21264C)") UNAME_MACHINE="alphaev68" ;; "EV6.8AL (21264B)") UNAME_MACHINE="alphaev68" ;; "EV6.8CX (21264D)") UNAME_MACHINE="alphaev68" ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE="alphaev69" ;; "EV7 (21364)") UNAME_MACHINE="alphaev7" ;; "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm:riscos:*:*|arm:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH="i386" # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH="x86_64" fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH="hppa2.0n" ;; 64) HP_ARCH="hppa2.0w" ;; '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = "hppa2.0w" ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH="hppa2.0w" else HP_ARCH="hppa64" fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; i*:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-gnu else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-gnueabi else echo ${UNAME_MACHINE}-unknown-linux-gnueabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-gnu exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-gnu exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; i*86:Linux:*:*) LIBC=gnu eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __dietlibc__ LIBC=dietlibc #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` echo "${UNAME_MACHINE}-pc-linux-${LIBC}" exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } ;; or32:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; padre:Linux:*:*) echo sparc-unknown-linux-gnu exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-gnu exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-gnu ;; PA8*) echo hppa2.0-unknown-linux-gnu ;; *) echo hppa-unknown-linux-gnu ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-gnu exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-gnu exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-gnu exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configury will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown case $UNAME_PROCESSOR in i386) eval $set_cc_for_build if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then UNAME_PROCESSOR="x86_64" fi fi ;; unknown) UNAME_PROCESSOR=powerpc ;; esac echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = "x86"; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-?:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = "386"; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; esac #echo '(No uname command or uname output not recognized.)' 1>&2 #echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 eval $set_cc_for_build cat >$dummy.c < # include #endif main () { #if defined (sony) #if defined (MIPSEB) /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, I don't know.... */ printf ("mips-sony-bsd\n"); exit (0); #else #include printf ("m68k-sony-newsos%s\n", #ifdef NEWSOS4 "4" #else "" #endif ); exit (0); #endif #endif #if defined (__arm) && defined (__acorn) && defined (__unix) printf ("arm-acorn-riscix\n"); exit (0); #endif #if defined (hp300) && !defined (hpux) printf ("m68k-hp-bsd\n"); exit (0); #endif #if defined (NeXT) #if !defined (__ARCHITECTURE__) #define __ARCHITECTURE__ "m68k" #endif int version; version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; if (version < 4) printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); else printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); exit (0); #endif #if defined (MULTIMAX) || defined (n16) #if defined (UMAXV) printf ("ns32k-encore-sysv\n"); exit (0); #else #if defined (CMU) printf ("ns32k-encore-mach\n"); exit (0); #else printf ("ns32k-encore-bsd\n"); exit (0); #endif #endif #endif #if defined (__386BSD__) printf ("i386-pc-bsd\n"); exit (0); #endif #if defined (sequent) #if defined (i386) printf ("i386-sequent-dynix\n"); exit (0); #endif #if defined (ns32000) printf ("ns32k-sequent-dynix\n"); exit (0); #endif #endif #if defined (_SEQUENT_) struct utsname un; uname(&un); if (strncmp(un.version, "V2", 2) == 0) { printf ("i386-sequent-ptx2\n"); exit (0); } if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ printf ("i386-sequent-ptx1\n"); exit (0); } printf ("i386-sequent-ptx\n"); exit (0); #endif #if defined (vax) # if !defined (ultrix) # include # if defined (BSD) # if BSD == 43 printf ("vax-dec-bsd4.3\n"); exit (0); # else # if BSD == 199006 printf ("vax-dec-bsd4.3reno\n"); exit (0); # else printf ("vax-dec-bsd\n"); exit (0); # endif # endif # else printf ("vax-dec-bsd\n"); exit (0); # endif # else printf ("vax-dec-ultrix\n"); exit (0); # endif #endif #if defined (alliant) && defined (i860) printf ("i860-alliant-bsd\n"); exit (0); #endif exit (1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } # Convex versions that predate uname can use getsysinfo(1) if [ -x /usr/convex/getsysinfo ] then case `getsysinfo -f cpu_type` in c1*) echo c1-convex-bsd exit ;; c2*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; c34*) echo c34-convex-bsd exit ;; c38*) echo c38-convex-bsd exit ;; c4*) echo c4-convex-bsd exit ;; esac fi cat >&2 < in order to provide the needed information to handle your system. config.guess timestamp = $timestamp uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: dovecot-2.2.9/COPYING.LGPL0000644000175000017500000006365012012166664011716 00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. ^L Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. ^L GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. ^L Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. ^L 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. ^L 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. ^L 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. ^L 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS ^L How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! dovecot-2.2.9/COPYING0000644000175000017500000000761412244400443011150 00000000000000See AUTHORS file for list of copyright holders. Everything in src/lib/, src/auth/, src/lib-sql/ and src/lib-ntlm/ is under MIT license (see COPYING.MIT) unless otherwise mentioned at the beginning of the file. Everything else is LGPLv2.1 (see COPYING.LGPL) unless otherwise mentioned at the beginning of the file. Current exceptions are: src/lib/md5.c : Public Domain src/lib/sha1.c and sha2.c: Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. Copyright (C) 2005, 2007 Olivier Gay All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. src/lib/UnicodeData.txt: Copyright (C) 1991-2007 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html. Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that (a) the above copyright notice(s) and this permission notice appear with all copies of the Data Files or Software, (b) both the above copyright notice(s) and this permission notice appear in associated documentation, and (c) there is clear notice in each modified Data File or in the Software as well as in the documentation associated with the Data File(s) or Software that the data or software has been modified. THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE. Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder. dovecot-2.2.9/dovecot.service.in0000644000175000017500000000030312244400443013533 00000000000000[Unit] Description=Dovecot IMAP/POP3 email server After=local-fs.target network.target [Service] Type=simple ExecStart=@sbindir@/dovecot -F NonBlocking=yes [Install] WantedBy=multi-user.target dovecot-2.2.9/ltmain.sh0000644000175000017500000105202212017213060011722 00000000000000 # libtool (GNU libtool) 2.4.2 # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, # 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, # or obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Usage: $progname [OPTION]... [MODE-ARG]... # # Provide generalized library-building support services. # # --config show all configuration variables # --debug enable verbose shell tracing # -n, --dry-run display commands without modifying any files # --features display basic configuration information and exit # --mode=MODE use operation mode MODE # --preserve-dup-deps don't remove duplicate dependency libraries # --quiet, --silent don't print informational messages # --no-quiet, --no-silent # print informational messages (default) # --no-warn don't display warning messages # --tag=TAG use configuration variables from tag TAG # -v, --verbose print more informational messages than default # --no-verbose don't print the extra informational messages # --version print version information # -h, --help, --help-all print short, long, or detailed help message # # MODE must be one of the following: # # clean remove files from the build directory # compile compile a source file into a libtool object # execute automatically set library path, then run a program # finish complete the installation of libtool libraries # install install libraries or executables # link create a library or an executable # uninstall remove libraries from an installed directory # # MODE-ARGS vary depending on the MODE. When passed as first option, # `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. # Try `$progname --help --mode=MODE' for a more detailed description of MODE. # # When reporting a bug, please describe a test case to reproduce it and # include the following information: # # host-triplet: $host # shell: $SHELL # compiler: $LTCC # compiler flags: $LTCFLAGS # linker: $LD (gnu? $with_gnu_ld) # $progname: (GNU libtool) 2.4.2 Debian-2.4.2-1 # automake: $automake_version # autoconf: $autoconf_version # # Report bugs to . # GNU libtool home page: . # General help using GNU software: . PROGRAM=libtool PACKAGE=libtool VERSION="2.4.2 Debian-2.4.2-1" TIMESTAMP="" package_revision=1.3337 # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } # NLS nuisances: We save the old values to restore during execute mode. lt_user_locale= lt_safe_locale= for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${$lt_var+set}\" = set; then save_$lt_var=\$$lt_var $lt_var=C export $lt_var lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" fi" done LC_ALL=C LANGUAGE=C export LANGUAGE LC_ALL $lt_unset CDPATH # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath="$0" : ${CP="cp -f"} test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} : ${Xsed="$SED -e 1s/^X//"} # Global variables: EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. exit_status=$EXIT_SUCCESS # Make sure IFS has a sensible default lt_nl=' ' IFS=" $lt_nl" dirname="s,/[^/]*$,," basename="s,^.*/,," # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_dirname may be replaced by extended shell implementation # func_basename file func_basename () { func_basename_result=`$ECHO "${1}" | $SED "$basename"` } # func_basename may be replaced by extended shell implementation # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` } # func_dirname_and_basename may be replaced by extended shell implementation # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname may be replaced by extended shell implementation # These SED scripts presuppose an absolute path with a trailing slash. pathcar='s,^/\([^/]*\).*$,\1,' pathcdr='s,^/[^/]*,,' removedotparts=':dotsl s@/\./@/@g t dotsl s,/\.$,/,' collapseslashes='s@/\{1,\}@/@g' finalslash='s,/*$,/,' # func_normal_abspath PATH # Remove doubled-up and trailing slashes, "." path components, # and cancel out any ".." path components in PATH after making # it an absolute path. # value returned in "$func_normal_abspath_result" func_normal_abspath () { # Start from root dir and reassemble the path. func_normal_abspath_result= func_normal_abspath_tpath=$1 func_normal_abspath_altnamespace= case $func_normal_abspath_tpath in "") # Empty path, that just means $cwd. func_stripname '' '/' "`pwd`" func_normal_abspath_result=$func_stripname_result return ;; # The next three entries are used to spot a run of precisely # two leading slashes without using negated character classes; # we take advantage of case's first-match behaviour. ///*) # Unusual form of absolute path, do nothing. ;; //*) # Not necessarily an ordinary path; POSIX reserves leading '//' # and for example Cygwin uses it to access remote file shares # over CIFS/SMB, so we conserve a leading double slash if found. func_normal_abspath_altnamespace=/ ;; /*) # Absolute path, do nothing. ;; *) # Relative path, prepend $cwd. func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath ;; esac # Cancel out all the simple stuff to save iterations. We also want # the path to end with a slash for ease of parsing, so make sure # there is one (and only one) here. func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` while :; do # Processed it all yet? if test "$func_normal_abspath_tpath" = / ; then # If we ascended to the root using ".." the result may be empty now. if test -z "$func_normal_abspath_result" ; then func_normal_abspath_result=/ fi break fi func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$pathcar"` func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$pathcdr"` # Figure out what to do with it case $func_normal_abspath_tcomponent in "") # Trailing empty path component, ignore it. ;; ..) # Parent dir; strip last assembled component from result. func_dirname "$func_normal_abspath_result" func_normal_abspath_result=$func_dirname_result ;; *) # Actual path component, append it. func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent ;; esac done # Restore leading double-slash if one was found on entry. func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result } # func_relative_path SRCDIR DSTDIR # generates a relative path from SRCDIR to DSTDIR, with a trailing # slash if non-empty, suitable for immediately appending a filename # without needing to append a separator. # value returned in "$func_relative_path_result" func_relative_path () { func_relative_path_result= func_normal_abspath "$1" func_relative_path_tlibdir=$func_normal_abspath_result func_normal_abspath "$2" func_relative_path_tbindir=$func_normal_abspath_result # Ascend the tree starting from libdir while :; do # check if we have found a prefix of bindir case $func_relative_path_tbindir in $func_relative_path_tlibdir) # found an exact match func_relative_path_tcancelled= break ;; $func_relative_path_tlibdir*) # found a matching prefix func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" func_relative_path_tcancelled=$func_stripname_result if test -z "$func_relative_path_result"; then func_relative_path_result=. fi break ;; *) func_dirname $func_relative_path_tlibdir func_relative_path_tlibdir=${func_dirname_result} if test "x$func_relative_path_tlibdir" = x ; then # Have to descend all the way to the root! func_relative_path_result=../$func_relative_path_result func_relative_path_tcancelled=$func_relative_path_tbindir break fi func_relative_path_result=../$func_relative_path_result ;; esac done # Now calculate path; take care to avoid doubling-up slashes. func_stripname '' '/' "$func_relative_path_result" func_relative_path_result=$func_stripname_result func_stripname '/' '/' "$func_relative_path_tcancelled" if test "x$func_stripname_result" != x ; then func_relative_path_result=${func_relative_path_result}/${func_stripname_result} fi # Normalisation. If bindir is libdir, return empty string, # else relative path ending with a slash; either way, target # file name can be directly appended. if test ! -z "$func_relative_path_result"; then func_stripname './' '' "$func_relative_path_result/" func_relative_path_result=$func_stripname_result fi } # The name of this program: func_dirname_and_basename "$progpath" progname=$func_basename_result # Make sure we have an absolute path for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=$func_dirname_result progdir=`cd "$progdir" && pwd` progpath="$progdir/$progname" ;; *) save_IFS="$IFS" IFS=${PATH_SEPARATOR-:} for progdir in $PATH; do IFS="$save_IFS" test -x "$progdir/$progname" && break done IFS="$save_IFS" test -n "$progdir" || progdir=`pwd` progpath="$progdir/$progname" ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed="${SED}"' -e 1s/^X//' sed_quote_subst='s/\([`"$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution that turns a string into a regex matching for the # string literally. sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' # Sed substitution that converts a w32 file name or path # which contains forward slashes, into one that contains # (escaped) backslashes. A very naive implementation. lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Re-`\' parameter expansions in output of double_quote_subst that were # `\'-ed in input to the same. If an odd number of `\' preceded a '$' # in input to double_quote_subst, that '$' was protected from expansion. # Since each input `\' is now two `\'s, look for any number of runs of # four `\'s followed by two `\'s and then a '$'. `\' that '$'. bs='\\' bs2='\\\\' bs4='\\\\\\\\' dollar='\$' sed_double_backslash="\ s/$bs4/&\\ /g s/^$bs2$dollar/$bs&/ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g s/\n//g" # Standard options: opt_dry_run=false opt_help=false opt_quiet=false opt_verbose=false opt_warning=: # func_echo arg... # Echo program name prefixed message, along with the current mode # name if it has been set yet. func_echo () { $ECHO "$progname: ${opt_mode+$opt_mode: }$*" } # func_verbose arg... # Echo program name prefixed message in verbose mode only. func_verbose () { $opt_verbose && func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } # func_error arg... # Echo program name prefixed message to standard error. func_error () { $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 } # func_warning arg... # Echo program name prefixed warning message to standard error. func_warning () { $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 # bash bug again: : } # func_fatal_error arg... # Echo program name prefixed message to standard error, and exit. func_fatal_error () { func_error ${1+"$@"} exit $EXIT_FAILURE } # func_fatal_help arg... # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { func_error ${1+"$@"} func_fatal_error "$help" } help="Try \`$progname --help' for more information." ## default # func_grep expression filename # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $GREP "$1" "$2" >/dev/null 2>&1 } # func_mkdir_p directory-path # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { my_directory_path="$1" my_dir_list= if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then # Protect directory names starting with `-' case $my_directory_path in -*) my_directory_path="./$my_directory_path" ;; esac # While some portion of DIR does not yet exist... while test ! -d "$my_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. my_dir_list="$my_directory_path:$my_dir_list" # If the last portion added has no slash in it, the list is done case $my_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` done my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` save_mkdir_p_IFS="$IFS"; IFS=':' for my_dir in $my_dir_list; do IFS="$save_mkdir_p_IFS" # mkdir can fail with a `File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$my_dir" 2>/dev/null || : done IFS="$save_mkdir_p_IFS" # Bail out if we (or some other process) failed to create a directory. test -d "$my_directory_path" || \ func_fatal_error "Failed to create \`$1'" fi } # func_mktempdir [string] # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, STRING is the basename for that directory. func_mktempdir () { my_template="${TMPDIR-/tmp}/${1-$progname}" if test "$opt_dry_run" = ":"; then # Return a directory name, but don't create it in dry-run mode my_tmpdir="${my_template}-$$" else # If mktemp works, use that first and foremost my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` if test ! -d "$my_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race my_tmpdir="${my_template}-${RANDOM-0}$$" save_mktempdir_umask=`umask` umask 0077 $MKDIR "$my_tmpdir" umask $save_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$my_tmpdir" || \ func_fatal_error "cannot create temporary directory \`$my_tmpdir'" fi $ECHO "$my_tmpdir" } # func_quote_for_eval arg # Aesthetically quote ARG to be evaled later. # This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT # is double-quoted, suitable for a subsequent eval, whereas # FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters # which are still active within double quotes backslashified. func_quote_for_eval () { case $1 in *[\\\`\"\$]*) func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; *) func_quote_for_eval_unquoted_result="$1" ;; esac case $func_quote_for_eval_unquoted_result in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and and variable # expansion for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" ;; *) func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" esac } # func_quote_for_expand arg # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { case $1 in *[\\\`\"]*) my_arg=`$ECHO "$1" | $SED \ -e "$double_quote_subst" -e "$sed_double_backslash"` ;; *) my_arg="$1" ;; esac case $my_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") my_arg="\"$my_arg\"" ;; esac func_quote_for_expand_result="$my_arg" } # func_show_eval cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$my_cmd" my_status=$? if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_show_eval_locale cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$lt_user_locale $my_cmd" my_status=$? eval "$lt_safe_locale" if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_tr_sh # Turn $1 into a string suitable for a shell variable name. # Result is stored in $func_tr_sh_result. All characters # not in the set a-zA-Z0-9_ are replaced with '_'. Further, # if $1 begins with a digit, a '_' is prepended as well. func_tr_sh () { case $1 in [0-9]* | *[!a-zA-Z0-9_]*) func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` ;; * ) func_tr_sh_result=$1 ;; esac } # func_version # Echo version message to standard output and exit. func_version () { $opt_debug $SED -n '/(C)/!b go :more /\./!{ N s/\n# / / b more } :go /^# '$PROGRAM' (GNU /,/# warranty; / { s/^# // s/^# *$// s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ p }' < "$progpath" exit $? } # func_usage # Echo short help message to standard output and exit. func_usage () { $opt_debug $SED -n '/^# Usage:/,/^# *.*--help/ { s/^# // s/^# *$// s/\$progname/'$progname'/ p }' < "$progpath" echo $ECHO "run \`$progname --help | more' for full usage" exit $? } # func_help [NOEXIT] # Echo long help message to standard output and exit, # unless 'noexit' is passed as argument. func_help () { $opt_debug $SED -n '/^# Usage:/,/# Report bugs to/ { :print s/^# // s/^# *$// s*\$progname*'$progname'* s*\$host*'"$host"'* s*\$SHELL*'"$SHELL"'* s*\$LTCC*'"$LTCC"'* s*\$LTCFLAGS*'"$LTCFLAGS"'* s*\$LD*'"$LD"'* s/\$with_gnu_ld/'"$with_gnu_ld"'/ s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/ s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/ p d } /^# .* home page:/b print /^# General help using/b print ' < "$progpath" ret=$? if test -z "$1"; then exit $ret fi } # func_missing_arg argname # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { $opt_debug func_error "missing argument for $1." exit_cmd=exit } # func_split_short_opt shortopt # Set func_split_short_opt_name and func_split_short_opt_arg shell # variables after splitting SHORTOPT after the 2nd character. func_split_short_opt () { my_sed_short_opt='1s/^\(..\).*$/\1/;q' my_sed_short_rest='1s/^..\(.*\)$/\1/;q' func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` } # func_split_short_opt may be replaced by extended shell implementation # func_split_long_opt longopt # Set func_split_long_opt_name and func_split_long_opt_arg shell # variables after splitting LONGOPT at the `=' sign. func_split_long_opt () { my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' my_sed_long_arg='1s/^--[^=]*=//' func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` } # func_split_long_opt may be replaced by extended shell implementation exit_cmd=: magic="%%%MAGIC variable%%%" magic_exe="%%%MAGIC EXE variable%%%" # Global variables. nonopt= preserve_args= lo2o="s/\\.lo\$/.${objext}/" o2lo="s/\\.${objext}\$/.lo/" extracted_archives= extracted_serial=0 # If this variable is set in any of the actions, the command in it # will be execed at the end. This prevents here-documents from being # left over by shells. exec_cmd= # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "${1}=\$${1}\${2}" } # func_append may be replaced by extended shell implementation # func_append_quoted var value # Quote VALUE and append to the end of shell variable VAR, separated # by a space. func_append_quoted () { func_quote_for_eval "${2}" eval "${1}=\$${1}\\ \$func_quote_for_eval_result" } # func_append_quoted may be replaced by extended shell implementation # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "${@}"` } # func_arith may be replaced by extended shell implementation # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` } # func_len may be replaced by extended shell implementation # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` } # func_lo2o may be replaced by extended shell implementation # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` } # func_xform may be replaced by extended shell implementation # func_fatal_configuration arg... # Echo program name prefixed message to standard error, followed by # a configuration failure hint, and exit. func_fatal_configuration () { func_error ${1+"$@"} func_error "See the $PACKAGE documentation for more information." func_fatal_error "Fatal configuration error." } # func_config # Display the configuration for all the tags in this script. func_config () { re_begincf='^# ### BEGIN LIBTOOL' re_endcf='^# ### END LIBTOOL' # Default configuration. $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" # Now print the configurations for the tags. for tagname in $taglist; do $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" done exit $? } # func_features # Display the features supported by this script. func_features () { echo "host: $host" if test "$build_libtool_libs" = yes; then echo "enable shared libraries" else echo "disable shared libraries" fi if test "$build_old_libs" = yes; then echo "enable static libraries" else echo "disable static libraries" fi exit $? } # func_enable_tag tagname # Verify that TAGNAME is valid, and either flag an error and exit, or # enable the TAGNAME tag. We also add TAGNAME to the global $taglist # variable here. func_enable_tag () { # Global variable: tagname="$1" re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" sed_extractcf="/$re_begincf/,/$re_endcf/p" # Validate tagname. case $tagname in *[!-_A-Za-z0-9,/]*) func_fatal_error "invalid tag name: $tagname" ;; esac # Don't test for the "default" C tag, as we know it's # there but not specially marked. case $tagname in CC) ;; *) if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # func_check_version_match # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Option defaults: opt_debug=: opt_dry_run=false opt_config=false opt_preserve_dup_deps=false opt_features=false opt_finish=false opt_help=false opt_help_all=false opt_silent=: opt_warning=: opt_verbose=: opt_silent=false opt_verbose=false # Parse options once, thoroughly. This comes as soon as possible in the # script to make things like `--version' happen as quickly as we can. { # this just eases exit handling while test $# -gt 0; do opt="$1" shift case $opt in --debug|-x) opt_debug='set -x' func_echo "enabling shell trace mode" $opt_debug ;; --dry-run|--dryrun|-n) opt_dry_run=: ;; --config) opt_config=: func_config ;; --dlopen|-dlopen) optarg="$1" opt_dlopen="${opt_dlopen+$opt_dlopen }$optarg" shift ;; --preserve-dup-deps) opt_preserve_dup_deps=: ;; --features) opt_features=: func_features ;; --finish) opt_finish=: set dummy --mode finish ${1+"$@"}; shift ;; --help) opt_help=: ;; --help-all) opt_help_all=: opt_help=': help-all' ;; --mode) test $# = 0 && func_missing_arg $opt && break optarg="$1" opt_mode="$optarg" case $optarg in # Valid mode arguments: clean|compile|execute|finish|install|link|relink|uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $opt" exit_cmd=exit break ;; esac shift ;; --no-silent|--no-quiet) opt_silent=false func_append preserve_args " $opt" ;; --no-warning|--no-warn) opt_warning=false func_append preserve_args " $opt" ;; --no-verbose) opt_verbose=false func_append preserve_args " $opt" ;; --silent|--quiet) opt_silent=: func_append preserve_args " $opt" opt_verbose=false ;; --verbose|-v) opt_verbose=: func_append preserve_args " $opt" opt_silent=false ;; --tag) test $# = 0 && func_missing_arg $opt && break optarg="$1" opt_tag="$optarg" func_append preserve_args " $opt $optarg" func_enable_tag "$optarg" shift ;; -\?|-h) func_usage ;; --help) func_help ;; --version) func_version ;; # Separate optargs to long options: --*=*) func_split_long_opt "$opt" set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} shift ;; # Separate non-argument short options: -\?*|-h*|-n*|-v*) func_split_short_opt "$opt" set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} shift ;; --) break ;; -*) func_fatal_help "unrecognized option \`$opt'" ;; *) set dummy "$opt" ${1+"$@"}; shift; break ;; esac done # Validate options: # save first non-option argument if test "$#" -gt 0; then nonopt="$opt" shift fi # preserve --debug test "$opt_debug" = : || func_append preserve_args " --debug" case $host in *cygwin* | *mingw* | *pw32* | *cegcc*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps ;; esac $opt_help || { # Sanity checks first: func_check_version_match if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then func_fatal_configuration "not configured to build any kind of library" fi # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$opt_dlopen" && test "$opt_mode" != execute; then func_error "unrecognized option \`-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help="$help" help="Try \`$progname --help --mode=$opt_mode' for more information." } # Bail if the options were screwed $exit_cmd $EXIT_FAILURE } ## ----------- ## ## Main. ## ## ----------- ## # func_lalib_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null \ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_unsafe_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if `file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case "$lalib_p_line" in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test "$lalib_p" = yes } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { func_lalib_p "$1" } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $opt_debug save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$save_ifs eval cmd=\"$cmd\" func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # `FILE.' does not work on cygwin managed mounts. func_source () { $opt_debug case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_resolve_sysroot PATH # Replace a leading = in PATH with a sysroot. Store the result into # func_resolve_sysroot_result func_resolve_sysroot () { func_resolve_sysroot_result=$1 case $func_resolve_sysroot_result in =*) func_stripname '=' '' "$func_resolve_sysroot_result" func_resolve_sysroot_result=$lt_sysroot$func_stripname_result ;; esac } # func_replace_sysroot PATH # If PATH begins with the sysroot, replace it with = and # store the result into func_replace_sysroot_result. func_replace_sysroot () { case "$lt_sysroot:$1" in ?*:"$lt_sysroot"*) func_stripname "$lt_sysroot" '' "$1" func_replace_sysroot_result="=$func_stripname_result" ;; *) # Including no sysroot. func_replace_sysroot_result=$1 ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $opt_debug if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case "$@ " in " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with \`--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=${1} if test "$build_libtool_libs" = yes; then write_lobj=\'${2}\' else write_lobj=none fi if test "$build_old_libs" = yes; then write_oldobj=\'${3}\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T </dev/null` if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | $SED -e "$lt_sed_naive_backslashify"` else func_convert_core_file_wine_to_w32_result= fi fi } # end: func_convert_core_file_wine_to_w32 # func_convert_core_path_wine_to_w32 ARG # Helper function used by path conversion functions when $build is *nix, and # $host is mingw, cygwin, or some other w32 environment. Relies on a correctly # configured wine environment available, with the winepath program in $build's # $PATH. Assumes ARG has no leading or trailing path separator characters. # # ARG is path to be converted from $build format to win32. # Result is available in $func_convert_core_path_wine_to_w32_result. # Unconvertible file (directory) names in ARG are skipped; if no directory names # are convertible, then the result may be empty. func_convert_core_path_wine_to_w32 () { $opt_debug # unfortunately, winepath doesn't convert paths, only file names func_convert_core_path_wine_to_w32_result="" if test -n "$1"; then oldIFS=$IFS IFS=: for func_convert_core_path_wine_to_w32_f in $1; do IFS=$oldIFS func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" if test -n "$func_convert_core_file_wine_to_w32_result" ; then if test -z "$func_convert_core_path_wine_to_w32_result"; then func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" else func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" fi fi done IFS=$oldIFS fi } # end: func_convert_core_path_wine_to_w32 # func_cygpath ARGS... # Wrapper around calling the cygpath program via LT_CYGPATH. This is used when # when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) # $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or # (2), returns the Cygwin file name or path in func_cygpath_result (input # file name or path is assumed to be in w32 format, as previously converted # from $build's *nix or MSYS format). In case (3), returns the w32 file name # or path in func_cygpath_result (input file name or path is assumed to be in # Cygwin format). Returns an empty string on error. # # ARGS are passed to cygpath, with the last one being the file name or path to # be converted. # # Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH # environment variable; do not put it in $PATH. func_cygpath () { $opt_debug if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` if test "$?" -ne 0; then # on failure, ensure result is empty func_cygpath_result= fi else func_cygpath_result= func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" fi } #end: func_cygpath # func_convert_core_msys_to_w32 ARG # Convert file name or path ARG from MSYS format to w32 format. Return # result in func_convert_core_msys_to_w32_result. func_convert_core_msys_to_w32 () { $opt_debug # awkward: cmd appends spaces to result func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` } #end: func_convert_core_msys_to_w32 # func_convert_file_check ARG1 ARG2 # Verify that ARG1 (a file name in $build format) was converted to $host # format in ARG2. Otherwise, emit an error message, but continue (resetting # func_to_host_file_result to ARG1). func_convert_file_check () { $opt_debug if test -z "$2" && test -n "$1" ; then func_error "Could not determine host file name corresponding to" func_error " \`$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_file_result="$1" fi } # end func_convert_file_check # func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH # Verify that FROM_PATH (a path in $build format) was converted to $host # format in TO_PATH. Otherwise, emit an error message, but continue, resetting # func_to_host_file_result to a simplistic fallback value (see below). func_convert_path_check () { $opt_debug if test -z "$4" && test -n "$3"; then func_error "Could not determine the host path corresponding to" func_error " \`$3'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This is a deliberately simplistic "conversion" and # should not be "improved". See libtool.info. if test "x$1" != "x$2"; then lt_replace_pathsep_chars="s|$1|$2|g" func_to_host_path_result=`echo "$3" | $SED -e "$lt_replace_pathsep_chars"` else func_to_host_path_result="$3" fi fi } # end func_convert_path_check # func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG # Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT # and appending REPL if ORIG matches BACKPAT. func_convert_path_front_back_pathsep () { $opt_debug case $4 in $1 ) func_to_host_path_result="$3$func_to_host_path_result" ;; esac case $4 in $2 ) func_append func_to_host_path_result "$3" ;; esac } # end func_convert_path_front_back_pathsep ################################################## # $build to $host FILE NAME CONVERSION FUNCTIONS # ################################################## # invoked via `$to_host_file_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # Result will be available in $func_to_host_file_result. # func_to_host_file ARG # Converts the file name ARG from $build format to $host format. Return result # in func_to_host_file_result. func_to_host_file () { $opt_debug $to_host_file_cmd "$1" } # end func_to_host_file # func_to_tool_file ARG LAZY # converts the file name ARG from $build format to toolchain format. Return # result in func_to_tool_file_result. If the conversion in use is listed # in (the comma separated) LAZY, no conversion takes place. func_to_tool_file () { $opt_debug case ,$2, in *,"$to_tool_file_cmd",*) func_to_tool_file_result=$1 ;; *) $to_tool_file_cmd "$1" func_to_tool_file_result=$func_to_host_file_result ;; esac } # end func_to_tool_file # func_convert_file_noop ARG # Copy ARG to func_to_host_file_result. func_convert_file_noop () { func_to_host_file_result="$1" } # end func_convert_file_noop # func_convert_file_msys_to_w32 ARG # Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_file_result. func_convert_file_msys_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_to_host_file_result="$func_convert_core_msys_to_w32_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_w32 # func_convert_file_cygwin_to_w32 ARG # Convert file name ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_file_cygwin_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then # because $build is cygwin, we call "the" cygpath in $PATH; no need to use # LT_CYGPATH in this case. func_to_host_file_result=`cygpath -m "$1"` fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_cygwin_to_w32 # func_convert_file_nix_to_w32 ARG # Convert file name ARG from *nix to w32 format. Requires a wine environment # and a working winepath. Returns result in func_to_host_file_result. func_convert_file_nix_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_file_wine_to_w32 "$1" func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_w32 # func_convert_file_msys_to_cygwin ARG # Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_file_msys_to_cygwin () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_cygpath -u "$func_convert_core_msys_to_w32_result" func_to_host_file_result="$func_cygpath_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_cygwin # func_convert_file_nix_to_cygwin ARG # Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed # in a wine environment, working winepath, and LT_CYGPATH set. Returns result # in func_to_host_file_result. func_convert_file_nix_to_cygwin () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. func_convert_core_file_wine_to_w32 "$1" func_cygpath -u "$func_convert_core_file_wine_to_w32_result" func_to_host_file_result="$func_cygpath_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_cygwin ############################################# # $build to $host PATH CONVERSION FUNCTIONS # ############################################# # invoked via `$to_host_path_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # The result will be available in $func_to_host_path_result. # # Path separators are also converted from $build format to $host format. If # ARG begins or ends with a path separator character, it is preserved (but # converted to $host format) on output. # # All path conversion functions are named using the following convention: # file name conversion function : func_convert_file_X_to_Y () # path conversion function : func_convert_path_X_to_Y () # where, for any given $build/$host combination the 'X_to_Y' value is the # same. If conversion functions are added for new $build/$host combinations, # the two new functions must follow this pattern, or func_init_to_host_path_cmd # will break. # func_init_to_host_path_cmd # Ensures that function "pointer" variable $to_host_path_cmd is set to the # appropriate value, based on the value of $to_host_file_cmd. to_host_path_cmd= func_init_to_host_path_cmd () { $opt_debug if test -z "$to_host_path_cmd"; then func_stripname 'func_convert_file_' '' "$to_host_file_cmd" to_host_path_cmd="func_convert_path_${func_stripname_result}" fi } # func_to_host_path ARG # Converts the path ARG from $build format to $host format. Return result # in func_to_host_path_result. func_to_host_path () { $opt_debug func_init_to_host_path_cmd $to_host_path_cmd "$1" } # end func_to_host_path # func_convert_path_noop ARG # Copy ARG to func_to_host_path_result. func_convert_path_noop () { func_to_host_path_result="$1" } # end func_convert_path_noop # func_convert_path_msys_to_w32 ARG # Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_path_result. func_convert_path_msys_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # Remove leading and trailing path separator characters from ARG. MSYS # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; # and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result="$func_convert_core_msys_to_w32_result" func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_msys_to_w32 # func_convert_path_cygwin_to_w32 ARG # Convert path ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_path_cygwin_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_cygwin_to_w32 # func_convert_path_nix_to_w32 ARG # Convert path ARG from *nix to w32 format. Requires a wine environment and # a working winepath. Returns result in func_to_host_file_result. func_convert_path_nix_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_nix_to_w32 # func_convert_path_msys_to_cygwin ARG # Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_path_msys_to_cygwin () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_msys_to_w32_result" func_to_host_path_result="$func_cygpath_result" func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_msys_to_cygwin # func_convert_path_nix_to_cygwin ARG # Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a # a wine environment, working winepath, and LT_CYGPATH set. Returns result in # func_to_host_file_result. func_convert_path_nix_to_cygwin () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" func_to_host_path_result="$func_cygpath_result" func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_nix_to_cygwin # func_mode_compile arg... func_mode_compile () { $opt_debug # Get the compilation command and the source file. base_compile= srcfile="$nonopt" # always keep a non-empty value in "srcfile" suppress_opt=yes suppress_output= arg_mode=normal libobj= later= pie_flag= for arg do case $arg_mode in arg ) # do not "continue". Instead, add this to base_compile lastarg="$arg" arg_mode=normal ;; target ) libobj="$arg" arg_mode=normal continue ;; normal ) # Accept any command-line options. case $arg in -o) test -n "$libobj" && \ func_fatal_error "you cannot specify \`-o' more than once" arg_mode=target continue ;; -pie | -fpie | -fPIE) func_append pie_flag " $arg" continue ;; -shared | -static | -prefer-pic | -prefer-non-pic) func_append later " $arg" continue ;; -no-suppress) suppress_opt=no continue ;; -Xcompiler) arg_mode=arg # the next one goes into the "base_compile" arg list continue # The current "srcfile" will either be retained or ;; # replaced later. I would guess that would be a bug. -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result lastarg= save_ifs="$IFS"; IFS=',' for arg in $args; do IFS="$save_ifs" func_append_quoted lastarg "$arg" done IFS="$save_ifs" func_stripname ' ' '' "$lastarg" lastarg=$func_stripname_result # Add the arguments to base_compile. func_append base_compile " $lastarg" continue ;; *) # Accept the current argument as the source file. # The previous "srcfile" becomes the current argument. # lastarg="$srcfile" srcfile="$arg" ;; esac # case $arg ;; esac # case $arg_mode # Aesthetically quote the previous argument. func_append_quoted base_compile "$lastarg" done # for arg case $arg_mode in arg) func_fatal_error "you must specify an argument for -Xcompile" ;; target) func_fatal_error "you must specify a target with \`-o'" ;; *) # Get the name of the library object. test -z "$libobj" && { func_basename "$srcfile" libobj="$func_basename_result" } ;; esac # Recognize several different file suffixes. # If the user specifies -o file.o, it is replaced with file.lo case $libobj in *.[cCFSifmso] | \ *.ada | *.adb | *.ads | *.asm | \ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) func_xform "$libobj" libobj=$func_xform_result ;; esac case $libobj in *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; *) func_fatal_error "cannot determine name of library object from \`$libobj'" ;; esac func_infer_tag $base_compile for arg in $later; do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no continue ;; -static) build_libtool_libs=no build_old_libs=yes continue ;; -prefer-pic) pic_mode=yes continue ;; -prefer-non-pic) pic_mode=no continue ;; esac done func_quote_for_eval "$libobj" test "X$libobj" != "X$func_quote_for_eval_result" \ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ && func_warning "libobj name \`$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname="$func_basename_result" xdir="$func_dirname_result" lobj=${xdir}$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test "$build_old_libs" = yes; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test "$compiler_c_o" = no; then output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test "$need_locks" = warn; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi func_append removelist " $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist func_append removelist " $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 srcfile=$func_to_tool_file_result func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test "$build_libtool_libs" = yes; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test "$pic_mode" != no; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir func_append command " -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test "$suppress_opt" = yes; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test "$build_old_libs" = yes; then if test "$pic_mode" != yes; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test "$compiler_c_o" = yes; then func_append command " -o $obj" fi # Suppress compiler output if we already did a PIC compilation. func_append command "$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test "$need_locks" != no; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test "$opt_mode" = compile && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $opt_mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to build PIC objects only -prefer-non-pic try to build non-PIC objects only -shared do not build a \`.o' file suitable for static linking -static only build a \`.o' file suitable for static linking -Wc,FLAG pass FLAG directly to the compiler COMPILE-COMMAND is a command to be used in creating a \`standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix \`.c' with the library object suffix, \`.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to \`-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the \`--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the \`install' or \`cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -bindir BINDIR specify path to binaries directory (for systems where libraries must be found in the PATH setting at runtime) -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE Use a list of object files found in FILE to specify objects -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface -Wc,FLAG -Xcompiler FLAG pass linker-specific FLAG directly to the compiler -Wl,FLAG -Xlinker FLAG pass linker-specific FLAG directly to the linker -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) All other options (arguments beginning with \`-') are ignored. Every other argument is treated as a filename. Files ending in \`.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in \`.la', then a libtool library is created, only library objects (\`.lo' files) may be specified, and \`-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created using \`ar' and \`ranlib', or on Windows using \`lib'. If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode \`$opt_mode'" ;; esac echo $ECHO "Try \`$progname --help' for more information about other modes." } # Now that we've collected a possible --mode arg, show help if necessary if $opt_help; then if test "$opt_help" = :; then func_mode_help else { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do func_mode_help done } | sed -n '1p; 2,$s/^Usage:/ or: /p' { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do echo func_mode_help done } | sed '1d /^When reporting/,/^Report/{ H d } $x /information about other modes/d /more detailed .*MODE/d s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' fi exit $? fi # func_mode_execute arg... func_mode_execute () { $opt_debug # The first argument is the command name. cmd="$nonopt" test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $opt_dlopen; do test -f "$file" \ || func_fatal_help "\`$file' is not a file" dir= case $file in *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "\`$file' was not linked with \`-export-dynamic'" continue fi func_dirname "$file" "" "." dir="$func_dirname_result" if test -f "$dir/$objdir/$dlname"; then func_append dir "/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir="$func_dirname_result" ;; *) func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir="$absdir" # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic="$magic" # Check if any of the arguments is a wrapper script. args= for file do case $file in -* | *.la | *.lo ) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file="$progdir/$program" elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file="$progdir/$program" fi ;; esac # Quote arguments (to preserve shell metacharacters). func_append_quoted args "$file" done if test "X$opt_dry_run" = Xfalse; then if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd="\$cmd$args" else # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" echo "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS fi } test "$opt_mode" = execute && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $opt_debug libs= libdirs= admincmds= for opt in "$nonopt" ${1+"$@"} do if test -d "$opt"; then func_append libdirs " $opt" elif test -f "$opt"; then if func_lalib_unsafe_p "$opt"; then func_append libs " $opt" else func_warning "\`$opt' is not a valid libtool archive" fi else func_fatal_error "invalid argument \`$opt'" fi done if test -n "$libs"; then if test -n "$lt_sysroot"; then sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" else sysroot_cmd= fi # Remove sysroot references if $opt_dry_run; then for lib in $libs; do echo "removing references to $lt_sysroot and \`=' prefixes from $lib" done else tmpdir=`func_mktempdir` for lib in $libs; do sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ > $tmpdir/tmp-la mv -f $tmpdir/tmp-la $lib done ${RM}r "$tmpdir" fi fi if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || func_append admincmds " $cmds" fi done fi # Exit here if they wanted silent mode. $opt_silent && exit $EXIT_SUCCESS if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then echo "----------------------------------------------------------------------" echo "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done echo echo "If you ever happen to want to link against installed libraries" echo "in a given directory, LIBDIR, you must either use libtool, and" echo "specify the full pathname of the library, or use the \`-LLIBDIR'" echo "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then echo " - add LIBDIR to the \`$shlibpath_var' environment variable" echo " during execution" fi if test -n "$runpath_var"; then echo " - add LIBDIR to the \`$runpath_var' environment variable" echo " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the \`$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" fi echo echo "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" echo "pages." ;; *) echo "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac echo "----------------------------------------------------------------------" fi exit $EXIT_SUCCESS } test "$opt_mode" = finish && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $opt_debug # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || # Allow the use of GNU shtool's install command. case $nonopt in *shtool*) :;; *) false;; esac; then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" func_append install_prog "$func_quote_for_eval_result" install_shared_prog=$install_prog case " $install_prog " in *[\\\ /]cp\ *) install_cp=: ;; *) install_cp=false ;; esac # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=no stripme= no_mode=: for arg do arg2= if test -n "$dest"; then func_append files " $dest" dest=$arg continue fi case $arg in -d) isdir=yes ;; -f) if $install_cp; then :; else prev=$arg fi ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then if test "x$prev" = x-m && test -n "$install_override_mode"; then arg2=$install_override_mode no_mode=false fi prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" func_append install_prog " $func_quote_for_eval_result" if test -n "$arg2"; then func_quote_for_eval "$arg2" fi func_append install_shared_prog " $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the \`$prev' option requires an argument" if test -n "$install_override_mode" && $no_mode; then if $install_cp; then :; else func_quote_for_eval "$install_override_mode" func_append install_shared_prog " -m $func_quote_for_eval_result" fi fi if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=yes if test "$isdir" = yes; then destdir="$dest" destname= else func_dirname_and_basename "$dest" "" "." destdir="$func_dirname_result" destname="$func_basename_result" # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "\`$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "\`$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. func_append staticlibs " $file" ;; *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) func_append current_libdirs " $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) func_append future_libdirs " $libdir" ;; esac fi func_dirname "$file" "/" "" dir="$func_dirname_result" func_append dir "$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` fi func_warning "relinking \`$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname="$1" shift srcname="$realname" test -n "$relink_command" && srcname="$realname"T # Install the shared library and build the symlinks. func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme="$stripme" case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme="" ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try `ln -sf' first, because the `ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib="$destdir/$realname" func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name="$func_basename_result" instname="$dir/$name"i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && func_append staticlibs " $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest="$destfile" destfile= ;; *) func_fatal_help "cannot copy a libtool object to \`$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test "$build_old_libs" = yes; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext="" case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=".exe" fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script \`$wrapper'" finalize=yes for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test if test -n "$libdir" && test ! -f "$libfile"; then func_warning "\`$lib' has not been installed in \`$libdir'" finalize=no fi done relink_command= func_source "$wrapper" outputname= if test "$fast_install" = no && test -n "$relink_command"; then $opt_dry_run || { if test "$finalize" = yes; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file="$func_basename_result" outputname="$tmpdir/$file" # Replace the output file specification. relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` $opt_silent || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink \`$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file="$outputname" else func_warning "cannot relink \`$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name="$func_basename_result" # Set up the ranlib parameters. oldlib="$destdir/$name" func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $tool_oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run \`$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test "$opt_mode" = install && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $opt_debug my_outputname="$1" my_originator="$2" my_pic_p="${3-no}" my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms="${my_outputname}S.c" else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist="$output_objdir/${my_outputname}.nm" func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif #if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" #endif /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif /* External symbol declarations for the compiler. */\ " if test "$dlself" = yes; then func_verbose "generating symbol list for \`$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` for progfile in $progfiles; do func_to_tool_file "$progfile" func_convert_file_msys_to_w32 func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols="$output_objdir/$outputname.exp" $opt_dry_run || { $RM $export_symbols eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from \`$dlprefile'" func_basename "$dlprefile" name="$func_basename_result" case $host in *cygwin* | *mingw* | *cegcc* ) # if an import library, we need to obtain dlname if func_win32_import_lib_p "$dlprefile"; then func_tr_sh "$dlprefile" eval "curr_lafile=\$libfile_$func_tr_sh_result" dlprefile_dlbasename="" if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then # Use subshell, to avoid clobbering current variable values dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` if test -n "$dlprefile_dlname" ; then func_basename "$dlprefile_dlname" dlprefile_dlbasename="$func_basename_result" else # no lafile. user explicitly requested -dlpreopen . $sharedlib_from_linklib_cmd "$dlprefile" dlprefile_dlbasename=$sharedlib_from_linklib_result fi fi $opt_dry_run || { if test -n "$dlprefile_dlbasename" ; then eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' else func_warning "Could not compute DLL name from $name" eval '$ECHO ": $name " >> "$nlist"' fi func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" } else # not an import lib $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } fi ;; *) $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } ;; esac done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else echo '/* NONE */' >> "$output_objdir/$my_dlsyms" fi echo >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; extern LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[]; LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = {\ { \"$my_originator\", (void *) 0 }," case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac echo >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) if test "X$my_pic_p" != Xno; then pic_flag_for_symtable=" $pic_flag" fi ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) func_append symtab_cflags " $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' # Transform the symbol file into the correct name. symfileobj="$output_objdir/${my_outputname}S.$objext" case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for \`$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` fi } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. # Despite the name, also deal with 64 bit binaries. func_win32_libid () { $opt_debug win32_libid_type="unknown" win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then func_to_tool_file "$1" func_convert_file_msys_to_w32 win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | $SED -n -e ' 1,100{ / I /{ s,.*,import, p q } }'` case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_cygming_dll_for_implib ARG # # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib () { $opt_debug sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` } # func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs # # The is the core of a fallback implementation of a # platform-specific function to extract the name of the # DLL associated with the specified import library LIBNAME. # # SECTION_NAME is either .idata$6 or .idata$7, depending # on the platform and compiler that created the implib. # # Echos the name of the DLL associated with the # specified import library. func_cygming_dll_for_implib_fallback_core () { $opt_debug match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` $OBJDUMP -s --section "$1" "$2" 2>/dev/null | $SED '/^Contents of section '"$match_literal"':/{ # Place marker at beginning of archive member dllname section s/.*/====MARK====/ p d } # These lines can sometimes be longer than 43 characters, but # are always uninteresting /:[ ]*file format pe[i]\{,1\}-/d /^In archive [^:]*:/d # Ensure marker is printed /^====MARK====/p # Remove all lines with less than 43 characters /^.\{43\}/!d # From remaining lines, remove first 43 characters s/^.\{43\}//' | $SED -n ' # Join marker and all lines until next marker into a single line /^====MARK====/ b para H $ b para b :para x s/\n//g # Remove the marker s/^====MARK====// # Remove trailing dots and whitespace s/[\. \t]*$// # Print /./p' | # we now have a list, one entry per line, of the stringified # contents of the appropriate section of all members of the # archive which possess that section. Heuristic: eliminate # all those which have a first or second character that is # a '.' (that is, objdump's representation of an unprintable # character.) This should work for all archives with less than # 0x302f exports -- but will fail for DLLs whose name actually # begins with a literal '.' or a single character followed by # a '.'. # # Of those that remain, print the first one. $SED -e '/^\./d;/^.\./d;q' } # func_cygming_gnu_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is a GNU/binutils-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_gnu_implib_p () { $opt_debug func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` test -n "$func_cygming_gnu_implib_tmp" } # func_cygming_ms_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is an MS-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_ms_implib_p () { $opt_debug func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` test -n "$func_cygming_ms_implib_tmp" } # func_cygming_dll_for_implib_fallback ARG # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # # This fallback implementation is for use when $DLLTOOL # does not support the --identify-strict option. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib_fallback () { $opt_debug if func_cygming_gnu_implib_p "$1" ; then # binutils import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` elif func_cygming_ms_implib_p "$1" ; then # ms-generated import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` else # unknown sharedlib_from_linklib_result="" fi } # func_extract_an_archive dir oldlib func_extract_an_archive () { $opt_debug f_ex_an_ar_dir="$1"; shift f_ex_an_ar_oldlib="$1" if test "$lock_old_archive_extraction" = yes; then lockfile=$f_ex_an_ar_oldlib.lock until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done fi func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ 'stat=$?; rm -f "$lockfile"; exit $stat' if test "$lock_old_archive_extraction" = yes; then $opt_dry_run || rm -f "$lockfile" fi if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $opt_debug my_gentop="$1"; shift my_oldlibs=${1+"$@"} my_oldobjs="" my_xlib="" my_xabs="" my_xdir="" for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib="$func_basename_result" my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir="$my_gentop/$my_xlib_u" func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` darwin_base_archive=`basename "$darwin_archive"` darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches ; do func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" func_extract_an_archive "`pwd`" "${darwin_base_archive}" cd "$darwin_curdir" $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` done func_extract_archives_result="$my_oldobjs" } # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory in which it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=${1-no} $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then file=\"\$0\"" qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` $ECHO "\ # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } ECHO=\"$qECHO\" fi # Very basic option parsing. These options are (a) specific to # the libtool wrapper, (b) are identical between the wrapper # /script/ and the wrapper /executable/ which is used only on # windows platforms, and (c) all begin with the string "--lt-" # (application programs are unlikely to have options which match # this pattern). # # There are only two supported options: --lt-debug and # --lt-dump-script. There is, deliberately, no --lt-help. # # The first argument to this parsing function should be the # script's $0 value, followed by "$@". lt_option_debug= func_parse_lt_options () { lt_script_arg0=\$0 shift for lt_opt do case \"\$lt_opt\" in --lt-debug) lt_option_debug=1 ;; --lt-dump-script) lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` cat \"\$lt_dump_D/\$lt_dump_F\" exit 0 ;; --lt-*) \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 exit 1 ;; esac done # Print the debug banner immediately: if test -n \"\$lt_option_debug\"; then echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 fi } # Used when --lt-debug. Prints its arguments to stdout # (redirection is the responsibility of the caller) func_lt_dump_args () { lt_dump_args_N=1; for lt_arg do \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` done } # Core function for launching the target application func_exec_program_core () { " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 } # A function to encapsulate launching the target application # Strips options in the --lt-* namespace from \$@ and # launches target application with the remaining arguments. func_exec_program () { case \" \$* \" in *\\ --lt-*) for lt_wr_arg do case \$lt_wr_arg in --lt-*) ;; *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; esac shift done ;; esac func_exec_program_core \${1+\"\$@\"} } # Parse options func_parse_lt_options \"\$0\" \${1+\"\$@\"} # Find the directory that this script lives in. thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` done # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test "$fast_install" = yes; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else $ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # fixup the dll searchpath if we need to. # # Fix the DLL searchpath if we need to. Do this before prepending # to shlibpath, because on Windows, both are PATH and uninstalled # libraries must come first. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi # Export our shlibpath_var if we have one. if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` export $shlibpath_var " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. func_exec_program \${1+\"\$@\"} fi else # The program doesn't exist. \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include #else # include # include # ifdef __CYGWIN__ # include # endif #endif #include #include #include #include #include #include #include #include /* declarations of non-ANSI functions */ #if defined(__MINGW32__) # ifdef __STRICT_ANSI__ int _putenv (const char *); # endif #elif defined(__CYGWIN__) # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif /* #elif defined (other platforms) ... */ #endif /* portability defines, excluding path handling macros */ #if defined(_MSC_VER) # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv # define S_IXUSR _S_IEXEC # ifndef _INTPTR_T_DEFINED # define _INTPTR_T_DEFINED # define intptr_t int # endif #elif defined(__MINGW32__) # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv #elif defined(__CYGWIN__) # define HAVE_SETENV # define FOPEN_WB "wb" /* #elif defined (other platforms) ... */ #endif #if defined(PATH_MAX) # define LT_PATHMAX PATH_MAX #elif defined(MAXPATHLEN) # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif /* path handling portability macros */ #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free ((void *) stale); stale = 0; } \ } while (0) #if defined(LT_DEBUGWRAPPER) static int lt_debug = 1; #else static int lt_debug = 0; #endif const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_debugprintf (const char *file, int line, const char *fmt, ...); void lt_fatal (const char *file, int line, const char *message, ...); static const char *nonnull (const char *s); static const char *nonempty (const char *s); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); char **prepare_spawn (char **argv); void lt_dump_script (FILE *f); EOF cat <= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", nonempty (path)); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; int tmp_len; char *concat_name; lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", nonempty (wrapper)); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined (HAVE_DOS_BASED_FILE_SYSTEM) if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined (HAVE_DOS_BASED_FILE_SYSTEM) } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = q - p; p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { lt_debugprintf (__FILE__, __LINE__, "checking path component for symlinks: %s\n", tmp_pathspec); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { lt_fatal (__FILE__, __LINE__, "error accessing file \"%s\": %s", tmp_pathspec, nonnull (strerror (errno))); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal (__FILE__, __LINE__, "could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (strcmp (str, pat) == 0) *str = '\0'; } return str; } void lt_debugprintf (const char *file, int line, const char *fmt, ...) { va_list args; if (lt_debug) { (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } } static void lt_error_core (int exit_status, const char *file, int line, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *file, int line, const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); va_end (ap); } static const char * nonnull (const char *s) { return s ? s : "(null)"; } static const char * nonempty (const char *s) { return (s && !*s) ? "(empty)" : nonnull (s); } void lt_setenv (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_setenv) setting '%s' to '%s'\n", nonnull (name), nonnull (value)); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else int len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { int orig_value_len = strlen (orig_value); int add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } void lt_update_exe_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ int len = strlen (new_value); while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[len-1] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF case $host_os in mingw*) cat <<"EOF" /* Prepares an argument vector before calling spawn(). Note that spawn() does not by itself call the command interpreter (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); GetVersionEx(&v); v.dwPlatformId == VER_PLATFORM_WIN32_NT; }) ? "cmd.exe" : "command.com"). Instead it simply concatenates the arguments, separated by ' ', and calls CreateProcess(). We must quote the arguments since Win32 CreateProcess() interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a special way: - Space and tab are interpreted as delimiters. They are not treated as delimiters if they are surrounded by double quotes: "...". - Unescaped double quotes are removed from the input. Their only effect is that within double quotes, space and tab are treated like normal characters. - Backslashes not followed by double quotes are not special. - But 2*n+1 backslashes followed by a double quote become n backslashes followed by a double quote (n >= 0): \" -> " \\\" -> \" \\\\\" -> \\" */ #define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" #define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" char ** prepare_spawn (char **argv) { size_t argc; char **new_argv; size_t i; /* Count number of arguments. */ for (argc = 0; argv[argc] != NULL; argc++) ; /* Allocate new argument vector. */ new_argv = XMALLOC (char *, argc + 1); /* Put quoted arguments into the new argument vector. */ for (i = 0; i < argc; i++) { const char *string = argv[i]; if (string[0] == '\0') new_argv[i] = xstrdup ("\"\""); else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) { int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); size_t length; unsigned int backslashes; const char *s; char *quoted_string; char *p; length = 0; backslashes = 0; if (quote_around) length++; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') length += backslashes + 1; length++; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) length += backslashes + 1; quoted_string = XMALLOC (char, length + 1); p = quoted_string; backslashes = 0; if (quote_around) *p++ = '"'; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') { unsigned int j; for (j = backslashes + 1; j > 0; j--) *p++ = '\\'; } *p++ = c; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) { unsigned int j; for (j = backslashes; j > 0; j--) *p++ = '\\'; *p++ = '"'; } *p = '\0'; new_argv[i] = quoted_string; } else new_argv[i] = (char *) string; } new_argv[argc] = NULL; return new_argv; } EOF ;; esac cat <<"EOF" void lt_dump_script (FILE* f) { EOF func_emit_wrapper yes | $SED -n -e ' s/^\(.\{79\}\)\(..*\)/\1\ \2/ h s/\([\\"]\)/\\\1/g s/$/\\n/ s/\([^\n]*\).*/ fputs ("\1", f);/p g D' cat <<"EOF" } EOF } # end: func_emit_cwrapperexe_src # func_win32_import_lib_p ARG # True if ARG is an import lib, as indicated by $file_magic_cmd func_win32_import_lib_p () { $opt_debug case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in *import*) : ;; *) false ;; esac } # func_mode_link arg... func_mode_link () { $opt_debug case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # which system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll which has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no bindir= dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=no prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module="${wl}-single_module" func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg="$1" shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in bindir) bindir="$arg" prev= continue ;; dlfiles|dlprefiles) if test "$preload" = no; then # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=yes fi case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test "$dlself" = no; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test "$prev" = dlprefiles; then dlself=yes elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test "$prev" = dlfiles; then func_append dlfiles " $arg" else func_append dlprefiles " $arg" fi prev= continue ;; esac ;; expsyms) export_symbols="$arg" test -f "$arg" \ || func_fatal_error "symbol file \`$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex="$arg" prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) func_append deplibs " $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir="$arg" prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # func_append moreargs " $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file \`$arg' does not exist" fi arg=$save_arg prev= continue ;; precious_regex) precious_files_regex="$arg" prev= continue ;; release) release="-$arg" prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test "$prev" = rpath; then case "$rpath " in *" $arg "*) ;; *) func_append rpath " $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) func_append xrpath " $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds="$arg" prev= continue ;; weak) func_append weak_libs " $arg" prev= continue ;; xcclinker) func_append linker_flags " $qarg" func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) func_append linker_flags " $qarg" func_append compiler_flags " $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg="$arg" case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "\`-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -bindir) prev=bindir continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test "X$arg" = "X-export-symbols"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname "-L" '' "$arg" if test -z "$func_stripname_result"; then if test "$#" -gt 0; then func_fatal_error "require no space between \`-L' and \`$1'" else func_fatal_error "need path for \`-L' option" fi fi func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of \`$dir'" dir="$absdir" ;; esac case "$deplibs " in *" -L$dir "* | *" $arg "*) # Will only happen for absolute or sysroot arguments ;; *) # Preserve sysroot, but never include relative directories case $dir in [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; *) func_append deplibs " -L$dir" ;; esac func_append lib_search_path " $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) func_append dllsearchpath ":$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac continue ;; -l*) if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test "X$arg" = "X-lc" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. test "X$arg" = "X-lc" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework func_append deplibs " System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test "X$arg" = "X-lc" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test "X$arg" = "X-lc" && continue ;; esac elif test "X$arg" = "X-lc_r"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi func_append deplibs " $arg" continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot|--sysroot) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) func_append new_inherited_linker_flags " $arg" ;; esac continue ;; -multi_module) single_module="${wl}-multi_module" continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "\`-no-install' is ignored for $host" func_warning "assuming \`-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; =*) func_stripname '=' '' "$dir" dir=$lt_sysroot$func_stripname_result ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" func_append arg " $func_quote_for_eval_result" func_append compiler_flags " $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" func_append arg " $wl$func_quote_for_eval_result" func_append compiler_flags " $wl$func_quote_for_eval_result" func_append linker_flags " $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; # Flags to be passed through unchanged, with rationale: # -64, -mips[0-9] enable 64-bit mode for the SGI compiler # -r[0-9][0-9]* specify processor for the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler # +DA*, +DD* enable 64-bit mode for the HP compiler # -q* compiler args for the IBM compiler # -m*, -t[45]*, -txscale* architecture-specific flags for GCC # -F/path path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* profiling flags for GCC # @file GCC response files # -tp=* Portland pgcc target processor selection # --sysroot=* for sysroot support # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -O*|-flto*|-fwhopr*|-fuse-linker-plugin) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" func_append compile_command " $arg" func_append finalize_command " $arg" func_append compiler_flags " $arg" continue ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; *.$objext) # A standard object. func_append objs " $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. func_append deplibs " $arg" func_append old_deplibs " $arg" continue ;; *.la) # A libtool-controlled library. func_resolve_sysroot "$arg" if test "$prev" = dlfiles; then # This library was specified with -dlopen. func_append dlfiles " $func_resolve_sysroot_result" prev= elif test "$prev" = dlprefiles; then # The library was specified with -dlpreopen. func_append dlprefiles " $func_resolve_sysroot_result" prev= else func_append deplibs " $func_resolve_sysroot_result" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the \`$prevarg' option requires an argument" if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname="$func_basename_result" libobjs_save="$libobjs" if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" func_dirname "$output" "/" "" output_objdir="$func_dirname_result$objdir" func_to_tool_file "$output_objdir/" tool_output_objdir=$func_to_tool_file_result # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_preserve_dup_deps ; then case "$libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append libs " $deplib" done if test "$linkmode" = lib; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; esac func_append pre_post_deps " $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=no newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test "$linkmode,$pass" = "lib,link"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs="$tmp_deplibs" fi if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan"; then libs="$deplibs" deplibs= fi if test "$linkmode" = prog; then case $pass in dlopen) libs="$dlfiles" ;; dlpreopen) libs="$dlprefiles" ;; link) libs="$deplibs %DEPLIBS%" test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" ;; esac fi if test "$linkmode,$pass" = "lib,dlpreopen"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= func_resolve_sysroot "$lib" case $lib in *.la) func_source "$func_resolve_sysroot_result" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do func_basename "$deplib" deplib_base=$func_basename_result case " $weak_libs " in *" $deplib_base "*) ;; *) func_append deplibs " $deplib" ;; esac done done libs="$dlprefiles" fi if test "$pass" = dlopen; then # Collect dlpreopened libraries save_deplibs="$deplibs" deplibs= fi for deplib in $libs; do lib= found=no case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append compiler_flags " $deplib" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -l*) if test "$linkmode" != lib && test "$linkmode" != prog; then func_warning "\`-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test "$linkmode" = lib; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib="$searchdir/lib${name}${search_ext}" if test -f "$lib"; then if test "$search_ext" = ".la"; then found=yes else found=no fi break 2 fi done done if test "$found" != yes; then # deplib doesn't seem to be a libtool library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue else # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll="$l" done if test "X$ll" = "X$old_library" ; then # only static version available found=no func_dirname "$lib" "" "." ladir="$func_dirname_result" lib=$ladir/$old_library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi fi ;; # -l *.ltframework) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test "$pass" = conv && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; prog) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi if test "$pass" = scan; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; *) func_warning "\`-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test "$pass" = link; then func_stripname '-R' '' "$deplib" func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) func_resolve_sysroot "$deplib" lib=$func_resolve_sysroot_result ;; *.$libext) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=no case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=yes fi ;; pass_all) valid_a_lib=yes ;; esac if test "$valid_a_lib" != yes; then echo $ECHO "*** Warning: Trying to link with static lib archive $deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because the file extensions .$libext of this argument makes me believe" echo "*** that it is just a static archive that I should not use here." else echo $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" fi ;; esac continue ;; prog) if test "$pass" != link; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test "$pass" = conv; then deplibs="$deplib $deplibs" elif test "$linkmode" = prog; then if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlopen support or we're linking statically, # we need to preload. func_append newdlprefiles " $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append newdlfiles " $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=yes continue ;; esac # case $deplib if test "$found" = yes || test -f "$lib"; then : else func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" fi # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "\`$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir="$func_dirname_result" dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan" || { test "$linkmode" != prog && test "$linkmode" != lib; }; then test -n "$dlopen" && func_append dlfiles " $dlopen" test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" fi if test "$pass" = conv; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # It is a libtool convenience library, so add in its objects. func_append convenience " $ladir/$objdir/$old_library" func_append old_convenience " $ladir/$objdir/$old_library" tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done elif test "$linkmode" != prog && test "$linkmode" != lib; then func_fatal_error "\`$lib' is not a convenience library" fi continue fi # $pass = conv # Get the name of the library we link against. linklib= if test -n "$old_library" && { test "$prefer_static_libs" = yes || test "$prefer_static_libs,$installed" = "built,no"; }; then linklib=$old_library else for l in $old_library $library_names; do linklib="$l" done fi if test -z "$linklib"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # This library was specified with -dlopen. if test "$pass" = dlopen; then if test -z "$libdir"; then func_fatal_error "cannot -dlopen a convenience library: \`$lib'" fi if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. func_append dlprefiles " $lib $dependency_libs" else func_append newdlfiles " $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of \`$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir="$ladir" fi ;; esac func_basename "$lib" laname="$func_basename_result" # Find the relevant object directory and library name. if test "X$installed" = Xyes; then if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library \`$lib' was moved." dir="$ladir" absdir="$abs_ladir" libdir="$abs_ladir" else dir="$lt_sysroot$libdir" absdir="$lt_sysroot$libdir" fi test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir="$ladir" absdir="$abs_ladir" # Remove this search path later func_append notinst_path " $abs_ladir" else dir="$ladir/$objdir" absdir="$abs_ladir/$objdir" # Remove this search path later func_append notinst_path " $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test "$pass" = dlpreopen; then if test -z "$libdir" && test "$linkmode" = prog; then func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" fi case "$host" in # special handling for platforms with PE-DLLs. *cygwin* | *mingw* | *cegcc* ) # Linker will automatically link against shared library if both # static and shared are present. Therefore, ensure we extract # symbols from the import library if a shared library is present # (otherwise, the dlopen module name will be incorrect). We do # this by putting the import library name into $newdlprefiles. # We recover the dlopen module name by 'saving' the la file # name in a special purpose variable, and (later) extracting the # dlname from the la file. if test -n "$dlname"; then func_tr_sh "$dir/$linklib" eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" func_append newdlprefiles " $dir/$linklib" else func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" fi ;; * ) # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then func_append newdlprefiles " $dir/$dlname" else func_append newdlprefiles " $dir/$linklib" fi ;; esac fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test "$linkmode" = lib; then deplibs="$dir/$old_library $deplibs" elif test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test "$linkmode" = prog && test "$pass" != link; then func_append newlib_search_path " $ladir" deplibs="$lib $deplibs" linkalldeplibs=no if test "$link_all_deplibs" != no || test -z "$library_names" || test "$build_libtool_libs" = no; then linkalldeplibs=yes fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; esac # Need to link against all dependency_libs? if test "$linkalldeplibs" = yes; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done # for deplib continue fi # $linkmode = prog... if test "$linkmode,$pass" = "prog,link"; then if test -n "$library_names" && { { test "$prefer_static_libs" = no || test "$prefer_static_libs,$installed" = "built,yes"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then # Make sure the rpath contains only unique directories. case "$temp_rpath:" in *"$absdir:"*) ;; *) func_append temp_rpath "$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi # $linkmode,$pass = prog,link... if test "$alldeplibs" = yes && { test "$deplibs_check_method" = pass_all || { test "$build_libtool_libs" = yes && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test "$use_static_libs" = built && test "$installed" = yes; then use_static_libs=no fi if test -n "$library_names" && { test "$use_static_libs" = no || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc*) # No point in relinking DLLs because paths are not encoded func_append notinst_deplibs " $lib" need_relink=no ;; *) if test "$installed" = no; then func_append notinst_deplibs " $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule="" for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule="$dlpremoduletest" break fi done if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then echo if test "$linkmode" = prog; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test "$linkmode" = lib && test "$hardcode_into_libs" = yes; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname="$1" shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname="$dlname" elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc*) func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; esac eval soname=\"$soname_spec\" else soname="$realname" fi # Make a new name for the extract_expsyms_cmds to use soroot="$soname" func_basename "$soroot" soname="$func_basename_result" func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from \`$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for \`$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test "$linkmode" = prog || test "$opt_mode" != relink; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test "$hardcode_direct" = no; then add="$dir/$linklib" case $host in *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; *-*-sysv4*uw2*) add_dir="-L$dir" ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir="-L$dir" ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we can not # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null ; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library" ; then echo echo "*** And there doesn't seem to be a static archive available" echo "*** The link will probably fail, sorry" else add="$dir/$old_library" fi elif test -n "$old_library"; then add="$dir/$old_library" fi fi esac elif test "$hardcode_minus_L" = no; then case $host in *-*-sunos*) add_shlibpath="$dir" ;; esac add_dir="-L$dir" add="-l$name" elif test "$hardcode_shlibpath_var" = no; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; relink) if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$dir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$absdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; *) lib_linked=no ;; esac if test "$lib_linked" != yes; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) func_append compile_shlibpath "$add_shlibpath:" ;; esac fi if test "$linkmode" = prog; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test "$hardcode_direct" != yes && test "$hardcode_minus_L" != yes && test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac fi fi fi if test "$linkmode" = prog || test "$opt_mode" = relink; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$libdir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$libdir" add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac add="-l$name" elif test "$hardcode_automatic" = yes; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib" ; then add="$inst_prefix_dir$libdir/$linklib" else add="$libdir/$linklib" fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir="-L$libdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" fi if test "$linkmode" = prog; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test "$linkmode" = prog; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test "$hardcode_direct" != unsupported; then test -n "$old_library" && linklib="$old_library" compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test "$build_libtool_libs" = yes; then # Not a shared library if test "$deplibs_check_method" != pass_all; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. echo $ECHO "*** Warning: This system can not link to static lib archive $lib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have." if test "$module" = yes; then echo "*** But as you try to build a module library, libtool will still create " echo "*** a static module, that should work as long as the dlopening application" echo "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using \`nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test "$linkmode" = lib; then if test -n "$dependency_libs" && { test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes || test "$link_static" = yes; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) func_append xrpath " $temp_xrpath";; esac;; *) func_append temp_deplibs " $libdir";; esac done dependency_libs="$temp_deplibs" fi func_append newlib_search_path " $absdir" # Link against this library test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result";; *) func_resolve_sysroot "$deplib" ;; esac if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $func_resolve_sysroot_result "*) func_append specialdeplibs " $func_resolve_sysroot_result" ;; esac fi func_append tmp_libs " $func_resolve_sysroot_result" done if test "$link_all_deplibs" != no; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do path= case $deplib in -L*) path="$deplib" ;; *.la) func_resolve_sysroot "$deplib" deplib=$func_resolve_sysroot_result func_dirname "$deplib" "" "." dir=$func_dirname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of \`$dir'" absdir="$dir" fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl" ; then depdepl="$absdir/$objdir/$depdepl" darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" path= fi fi ;; *) path="-L$absdir/$objdir" ;; esac else eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "\`$deplib' seems to be moved" path="-L$absdir" fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test "$pass" = link; then if test "$linkmode" = "prog"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs="$newdependency_libs" if test "$pass" = dlpreopen; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test "$pass" != dlopen; then if test "$pass" != conv; then # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) func_append lib_search_path " $dir" ;; esac done newlib_search_path= fi if test "$linkmode,$pass" != "prog,link"; then vars="deplibs" else vars="compile_deplibs finalize_deplibs" fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) func_append tmp_libs " $deplib" ;; esac ;; *) func_append tmp_libs " $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs ; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i="" ;; esac if test -n "$i" ; then func_append tmp_libs " $i" fi done dependency_libs=$tmp_libs done # for pass if test "$linkmode" = prog; then dlfiles="$newdlfiles" fi if test "$linkmode" = prog || test "$linkmode" = lib; then dlprefiles="$newdlprefiles" fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "\`-R' is ignored for archives" test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "\`-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "\`-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs="$output" func_append objs "$old_deplibs" ;; lib) # Make sure we only generate libraries of the form `libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test "$module" = no && \ func_fatal_help "libtool library \`$output' must begin with \`lib'" if test "$need_lib_prefix" != no; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test "$deplibs_check_method" != pass_all; then func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" else echo $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" func_append libobjs " $objs" fi fi test "$dlself" != no && \ func_warning "\`-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test "$#" -gt 1 && \ func_warning "ignoring multiple \`-rpath's for a libtool library" install_libdir="$1" oldlibs= if test -z "$rpath"; then if test "$build_libtool_libs" = yes; then # Building a libtool convenience library. # Some compilers have problems with a `.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "\`-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs="$IFS"; IFS=':' set dummy $vinfo 0 0 0 shift IFS="$save_ifs" test -n "$7" && \ func_fatal_help "too many parameters to \`-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major="$1" number_minor="$2" number_revision="$3" # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # which has an extra 1 added just for fun # case $version_type in # correct linux to gnu/linux during the next big refactor darwin|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_revision" ;; freebsd-aout|freebsd-elf|qnx|sunos) current="$number_major" revision="$number_minor" age="0" ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_minor" lt_irix_increment=no ;; *) func_fatal_configuration "$modename: unknown library version type \`$version_type'" ;; esac ;; no) current="$1" revision="$2" age="$3" ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT \`$current' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION \`$revision' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE \`$age' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE \`$age' is greater than the current interface number \`$current'" func_fatal_error "\`$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; freebsd-aout) major=".$current" versuffix=".$current.$revision"; ;; freebsd-elf) major=".$current" versuffix=".$current" ;; irix | nonstopux) if test "X$lt_irix_increment" = "Xno"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring="$verstring_prefix$major.$revision" # Add in all the interfaces that we are compatible with. loop=$revision while test "$loop" -ne 0; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring_prefix$major.$iface:$verstring" done # Before this point, $major must not contain `.'. major=.$major versuffix="$major.$revision" ;; linux) # correct to gnu/linux during the next big refactor func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=".$current.$age.$revision" verstring="$current.$age.$revision" # Add in all the interfaces that we are compatible with. loop=$age while test "$loop" -ne 0; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring:${iface}.0" done # Make executables depend on our current version. func_append verstring ":${current}.0" ;; qnx) major=".$current" versuffix=".$current" ;; sunos) major=".$current" versuffix=".$current.$revision" ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 filesystems. func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; *) func_fatal_configuration "unknown library version type \`$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring="0.0" ;; esac if test "$need_version" = no; then versuffix= else versuffix=".0.0" fi fi # Remove version info from name if versioning should be avoided if test "$avoid_version" = yes && test "$need_version" = no; then major= versuffix= verstring="" fi # Check to see if the archive will have undefined symbols. if test "$allow_undefined" = yes; then if test "$allow_undefined_flag" = unsupported; then func_warning "undefined symbols not allowed in $host shared libraries" build_libtool_libs=no build_old_libs=yes fi else # Don't allow undefined symbols. allow_undefined_flag="$no_undefined_flag" fi fi func_generate_dlsyms "$libname" "$libname" "yes" func_append libobjs " $symfileobj" test "X$libobjs" = "X " && libobjs= if test "$opt_mode" != relink; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) if test "X$precious_files_regex" != "X"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi func_append removelist " $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then func_append oldlibs " $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do func_replace_sysroot "$libdir" func_append temp_xrpath " -R$func_replace_sysroot_result" case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles="$dlfiles" dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) func_append dlfiles " $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles="$dlprefiles" dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) func_append dlprefiles " $lib" ;; esac done if test "$build_libtool_libs" = yes; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework func_append deplibs " System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test "$build_libtool_need_lc" = "yes"; then func_append deplibs " -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release="" versuffix="" major="" newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` $nocaseglob else potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` fi for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib="$potent_lib" while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $a_deplib "*) func_append newdeplibs " $a_deplib" a_deplib="" ;; esac fi if test -n "$a_deplib" ; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib="$potent_lib" # see symlink-check above in file_magic test if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs="" tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then for i in $predeps $postdeps ; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` done fi case $tmp_deplibs in *[!\ \ ]*) echo if test "X$deplibs_check_method" = "Xnone"; then echo "*** Warning: inter-library dependencies are not supported in this platform." else echo "*** Warning: inter-library dependencies are not known to be supported." fi echo "*** All declared inter-library dependencies are being dropped." droppeddeps=yes ;; esac ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac if test "$droppeddeps" = yes; then if test "$module" = yes; then echo echo "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" echo "*** a static module, that should work as long as the dlopening" echo "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using \`nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else echo "*** The inter-library dependencies that have been dropped here will be" echo "*** automatically added whenever a program is linked with this library" echo "*** or is declared to -dlopen it." if test "$allow_undefined" = no; then echo echo "*** Since this library must not contain undefined symbols," echo "*** because either the platform does not support them or" echo "*** it was explicitly requested with -no-undefined," echo "*** libtool will only create a static version of it." if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done deplibs="$new_libs" # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test "$build_libtool_libs" = yes; then # Remove ${wl} instances when linking with ld. # FIXME: should test the right _cmds variable. case $archive_cmds in *\$LD\ *) wl= ;; esac if test "$hardcode_into_libs" = yes; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath="$finalize_rpath" test "$opt_mode" != relink && rpath="$compile_rpath$rpath" for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then func_replace_sysroot "$libdir" libdir=$func_replace_sysroot_result if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append dep_rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath="$finalize_shlibpath" test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname="$1" shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname="$realname" fi if test -z "$dlname"; then dlname=$soname fi lib="$output_objdir/$realname" linknames= for link do func_append linknames " $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols="$output_objdir/$libname.uexp" func_append delfiles " $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile if test "x`$SED 1q $export_symbols`" != xEXPORTS; then # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols="$export_symbols" export_symbols= always_export_symbols=yes fi fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs="$IFS"; IFS='~' for cmd1 in $cmds; do IFS="$save_ifs" # Take the normal branch if the nm_file_list_spec branch # doesn't work or if tool conversion is not needed. case $nm_file_list_spec~$to_tool_file_cmd in *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) try_normal_branch=yes eval cmd=\"$cmd1\" func_len " $cmd" len=$func_len_result ;; *) try_normal_branch=no ;; esac if test "$try_normal_branch" = yes \ && { test "$len" -lt "$max_cmd_len" \ || test "$max_cmd_len" -le -1; } then func_show_eval "$cmd" 'exit $?' skipped_export=false elif test -n "$nm_file_list_spec"; then func_basename "$output" output_la=$func_basename_result save_libobjs=$libobjs save_output=$output output=${output_objdir}/${output_la}.nm func_to_tool_file "$output" libobjs=$nm_file_list_spec$func_to_tool_file_result func_append delfiles " $output" func_verbose "creating $NM input file list: $output" for obj in $save_libobjs; do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > "$output" eval cmd=\"$cmd1\" func_show_eval "$cmd" 'exit $?' output=$save_output libobjs=$save_libobjs skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS="$save_ifs" if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) func_append tmp_deplibs " $test_deplib" ;; esac done deplibs="$tmp_deplibs" if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test "$compiler_needs_object" = yes && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $convenience func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" func_append linker_flags " $flag" fi # Make a backup of the uninstalled library when relinking if test "$opt_mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test "X$skipped_export" != "X:" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output func_basename "$output" output_la=$func_basename_result # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then output=${output_objdir}/${output_la}.lnkscript func_verbose "creating GNU ld script: $output" echo 'INPUT (' > $output for obj in $save_libobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done echo ')' >> $output func_append delfiles " $output" func_to_tool_file "$output" output=$func_to_tool_file_result elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then output=${output_objdir}/${output_la}.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test "$compiler_needs_object" = yes; then firstobj="$1 " shift fi for obj do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done func_append delfiles " $output" func_to_tool_file "$output" output=$firstobj\"$file_list_spec$func_to_tool_file_result\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-${k}.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test "X$objlist" = X || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test "$k" -eq 1 ; then # The first file doesn't have a previous command to add. reload_objs=$objlist eval concat_cmds=\"$reload_cmds\" else # All subsequent reloadable object files will link in # the last one created. reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-${k}.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-${k}.$objext objlist=" $obj" func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ reload_objs="$objlist $last_robj" eval concat_cmds=\"\${concat_cmds}$reload_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" fi func_append delfiles " $output" else output= fi if ${skipped_export-false}; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi fi test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs="$IFS"; IFS='~' for cmd in $concat_cmds; do IFS="$save_ifs" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$opt_mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi if ${skipped_export-false}; then if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi fi libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$opt_mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" # Restore the uninstalled library and exit if test "$opt_mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test "$module" = yes || test "$export_dynamic" = yes; then # On all known operating systems, these are identical. dlname="$soname" fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "\`-R' is ignored for objects" test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for objects" test -n "$release" && \ func_warning "\`-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object \`$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj="$output" ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # reload_cmds runs $LD directly, so let us get rid of # -Wl from whole_archive_flag_spec and hope we can get by with # turning comma into space.. wl= if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` else gentop="$output_objdir/${obj}x" func_append generated " $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # If we're not building shared, we need to use non_pic_objs test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" # Create the old-style object. reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test output="$obj" func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi if test "$build_libtool_libs" != yes; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS fi if test -n "$pic_flag" || test "$pic_mode" != default; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output="$libobj" func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for programs" test -n "$release" && \ func_warning "\`-release' is ignored for programs" test "$preload" = yes \ && test "$dlopen_support" = unknown \ && test "$dlopen_self" = unknown \ && test "$dlopen_self_static" = unknown && \ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test "$tagname" = CXX ; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) func_append compile_command " ${wl}-bind_at_load" func_append finalize_command " ${wl}-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done compile_deplibs="$new_libs" func_append compile_command " $compile_deplibs" func_append finalize_command " $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) func_append dllsearchpath ":$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath="$rpath" rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) func_append finalize_perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath="$rpath" if test -n "$libobjs" && test "$build_old_libs" = yes; then # Transform all the library objects into standard objects. compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" "no" # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=yes case $host in *cegcc* | *mingw32ce*) # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. wrappers_required=no ;; *cygwin* | *mingw* ) if test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; *) if test "$need_relink" = no || test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; esac if test "$wrappers_required" = no; then # Replace the output file specification. compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` link_command="$compile_command$compile_rpath" # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Delete the generated files. if test -f "$output_objdir/${outputname}S.${objext}"; then func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' fi exit $exit_status fi if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do func_append rpath "$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test "$no_install" = yes; then # We don't need to create a wrapper script. link_command="$compile_var$compile_command$compile_rpath" # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi exit $EXIT_SUCCESS fi if test "$hardcode_action" = relink; then # Fast installation is not supported link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" func_warning "this platform does not like uninstalled shared libraries" func_warning "\`$output' will be relinked during installation" else if test "$fast_install" != no; then link_command="$finalize_var$compile_command$finalize_rpath" if test "$fast_install" = yes; then relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` else # fast_install is set to needless relink_command= fi else link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" fi fi # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output_objdir/$outputname" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource="$output_path/$objdir/lt-$output_name.c" cwrapper="$output_path/$output_name.exe" $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host" ; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do if test "$build_libtool_libs" = convenience; then oldobjs="$libobjs_save $symfileobj" addlibs="$convenience" build_libtool_libs=no else if test "$build_libtool_libs" = module; then oldobjs="$libobjs_save" build_libtool_libs=no else oldobjs="$old_deplibs $non_pic_objects" if test "$preload" = yes && test -f "$symfileobj"; then func_append oldobjs " $symfileobj" fi fi addlibs="$old_convenience" fi if test -n "$addlibs"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $addlibs func_append oldobjs " $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append oldobjs " $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else echo "copying selected object files to avoid basename conflicts..." gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase="$func_basename_result" case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" func_append oldobjs " $gentop/$newobj" ;; *) func_append oldobjs " $obj" ;; esac done fi func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds elif test -n "$archiver_list_spec"; then func_verbose "using command file archive linking..." for obj in $oldobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > $output_objdir/$libname.libcmd func_to_tool_file "$output_objdir/$libname.libcmd" oldobjs=" $archiver_list_spec$func_to_tool_file_result" cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj" ; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test "X$oldobjs" = "X" ; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test "$build_old_libs" = yes && old_library="$libname.$libext" func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` if test "$hardcode_automatic" = yes ; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test "$installed" = yes; then if test -z "$install_libdir"; then break fi output="$output_objdir/$outputname"i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name="$func_basename_result" func_resolve_sysroot "$deplib" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" ;; -L*) func_stripname -L '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -L$func_replace_sysroot_result" ;; -R*) func_stripname -R '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -R$func_replace_sysroot_result" ;; *) func_append newdependency_libs " $deplib" ;; esac done dependency_libs="$newdependency_libs" newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" ;; *) func_append newdlfiles " $lib" ;; esac done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" ;; esac done dlprefiles="$newdlprefiles" else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlfiles " $abs" done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlprefiles " $abs" done dlprefiles="$newdlprefiles" fi $RM $output # place dlname in correct position for cygwin # In fact, it would be nice if we could use this code for all target # systems that can't hard-code library paths into their executables # and that have no shared library path variable independent of PATH, # but it turns out we can't easily determine that from inspecting # libtool variables, so we have to hard-code the OSs to which it # applies here; at the moment, that means platforms that use the PE # object format with DLL files. See the long comment at the top of # tests/bindir.at for full details. tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) # If a -bindir argument was supplied, place the dll there. if test "x$bindir" != x ; then func_relative_path "$install_libdir" "$bindir" tdlname=$func_relative_path_result$dlname else # Otherwise fall back on heuristic. tdlname=../bin/$dlname fi ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that can not go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test "$installed" = no && test "$need_relink" = yes; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } { test "$opt_mode" = link || test "$opt_mode" = relink; } && func_mode_link ${1+"$@"} # func_mode_uninstall arg... func_mode_uninstall () { $opt_debug RM="$nonopt" files= rmforce= exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" for arg do case $arg in -f) func_append RM " $arg"; rmforce=yes ;; -*) func_append RM " $arg" ;; *) func_append files " $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= for file in $files; do func_dirname "$file" "" "." dir="$func_dirname_result" if test "X$dir" = X.; then odir="$objdir" else odir="$dir/$objdir" fi func_basename "$file" name="$func_basename_result" test "$opt_mode" = uninstall && odir="$dir" # Remember odir for removal later, being careful to avoid duplicates if test "$opt_mode" = clean; then case " $rmdirs " in *" $odir "*) ;; *) func_append rmdirs " $odir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif test "$rmforce" = yes; then continue fi rmfiles="$file" case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do func_append rmfiles " $odir/$n" done test -n "$old_library" && func_append rmfiles " $odir/$old_library" case "$opt_mode" in clean) case " $library_names " in *" $dlname "*) ;; *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; esac test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test "$pic_object" != none; then func_append rmfiles " $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test "$non_pic_object" != none; then func_append rmfiles " $dir/$non_pic_object" fi fi ;; *) if test "$opt_mode" = clean ; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe func_append rmfiles " $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result func_append rmfiles " $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles func_append rmfiles " $odir/$name $odir/${name}S.${objext}" if test "$fast_install" = yes && test -n "$relink_command"; then func_append rmfiles " $odir/lt-$name" fi if test "X$noexename" != "X$name" ; then func_append rmfiles " $odir/lt-${noexename}.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done # Try to remove the ${objdir}s in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } { test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && func_mode_uninstall ${1+"$@"} test -z "$opt_mode" && { help="$generic_help" func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode \`$opt_mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: # vi:sw=2 dovecot-2.2.9/README0000644000175000017500000000557112244400443010775 00000000000000Installation ------------ See INSTALL file. Configuration ------------- See doc/documentation.txt or http://wiki2.dovecot.org/ RFCs conformed -------------- email: 822 - Standard for ARPA Internet Text Messages 2822 - Internet Message Format (updated rfc822) 2045..2049 - Multipurpose Internet Mail Extensions (MIME) auth: 2245 - Anonymous SASL Mechanism. 2595 - Using TLS with IMAP, POP3 and ACAP 2831 - Using Digest Authentication as a SASL Mechanism (DIGEST-MD5) 5802 - Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms POP3: 1939 - Post Office Protocol - Version 3 2449 - POP3 Extension Mechanism 3206 - The SYS and AUTH POP Response Codes IMAP base: 3501 - IMAP4rev1 2180 - IMAP4 Multi-Accessed Mailbox Practice 2683 - IMAP4 Implementation Recommendations IMAP extensions: 2087 - IMAP4 QUOTA extension 2088 - IMAP4 non-synchronizing literals (LITERAL+) 2177 - IMAP4 IDLE command 2221 - IMAP4 Login Referrals 2342 - IMAP4 Namespace 2971 - IMAP4 ID extension 3348 - IMAP4 Child Mailbox Extension 3502 - IMAP4 MULTIAPPEND Extension 3691 - IMAP4 UNSELECT command 4314 - IMAP4 Access Control List (ACL) Extension 4315 - IMAP UIDPLUS extension 4467 - IMAP URLAUTH Extension 4469 - IMAP CATENATE Extension 4551 - IMAP Extension for Conditional STORE Operation or Quick Flag Changes Resynchronization 4731 - IMAP4 Extension to SEARCH Command for Controlling What Kind of Information Is Returned 4959 - IMAP Extension for Simple Authentication and Security Layer (SASL) Initial Client Response 4978 - The IMAP COMPRESS Extension 5032 - WITHIN Search Extension to the IMAP Protocol 5162 - IMAP4 Extensions for Quick Mailbox Resynchronization 5182 - IMAP Extension for Referencing the Last SEARCH Result 5255 - IMAP Internationalization 5256 - IMAP SORT and THREAD Extensions 5258 - IMAP4 - LIST Command Extensions 5267 - Contexts for IMAP4 5524 - Extended URLFETCH for Binary and Converted Parts 5530 - IMAP Response Codes 5819 - IMAP4 Extension for Returning STATUS Information in Extended LIST 5957 - Display-Based Address Sorting for the IMAP4 SORT Extension 6154 - IMAP LIST Extension for Special-Use Mailboxes 6203 - IMAP4 Extension for Fuzzy Search 6851 - Internet Message Access Protocol (IMAP) - MOVE Extension Contact info ------------ Timo Sirainen , http://www.dovecot.org/ Please use the Dovecot mailing list for questions about Dovecot. You can post to the list without subscribing, the mail then waits in a moderator queue for a while. See http://dovecot.org/mailinglists.html dovecot-2.2.9/dovecot.socket0000644000175000017500000000044612244400443012766 00000000000000[Unit] Description=Dovecot IMAP/POP3 email server activation socket [Socket] #dovecot expects separate IPv4 and IPv6 sockets BindIPv6Only=ipv6-only ListenStream=0.0.0.0:143 ListenStream=[::]:143 ListenStream=0.0.0.0:993 ListenStream=[::]:993 KeepAlive=true [Install] WantedBy=sockets.target dovecot-2.2.9/dovecot.m40000644000175000017500000000644012244400443012016 00000000000000# dovecot.m4 - Check presence of dovecot -*-Autoconf-*- # # Copyright (C) 2010 Dennis Schridde # # This file is free software; the authors give # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 11 AC_DEFUN([DC_DOVECOT_MODULEDIR],[ AC_ARG_WITH(moduledir, [ --with-moduledir=DIR Base directory for dynamically loadable modules], moduledir="$withval", moduledir=$libdir/dovecot ) AC_SUBST(moduledir) ]) AC_DEFUN([DC_PLUGIN_DEPS],[ _plugin_deps=yes AC_MSG_CHECKING([whether OS supports plugin dependencies]) case "$host_os" in darwin*) # OSX loads the plugins twice, which breaks stuff _plugin_deps=no ;; esac AC_MSG_RESULT([$_plugin_deps]) AM_CONDITIONAL([DOVECOT_PLUGIN_DEPS], [test "x$_plugin_deps" = "xyes"]) unset _plugin_deps ]) # Substitute every var in the given comma seperated list AC_DEFUN([AX_SUBST_L],[ m4_foreach([__var__], [$@], [AC_SUBST(__var__)]) ]) AC_DEFUN([DC_DOVECOT],[ AC_ARG_WITH(dovecot, [ --with-dovecot=DIR Dovecot base directory], [ dovecotdir="$withval" ], [ dc_prefix=$prefix test "x$dc_prefix" = xNONE && dc_prefix=$ac_default_prefix dovecotdir="$dc_prefix/lib/dovecot" ] ) AC_ARG_WITH(dovecot-install-dirs, [AC_HELP_STRING([--with-dovecot-install-dirs], [Use install directories configured for Dovecot (default)])], if test x$withval = xno; then use_install_dirs=no else use_install_dirs=yes fi, use_install_dirs=yes) AC_MSG_CHECKING([for dovecot-config in "$dovecotdir"]) if test -f "$dovecotdir/dovecot-config"; then AC_MSG_RESULT([$dovecotdir/dovecot-config]) else AC_MSG_RESULT([not found]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([Use --with-dovecot=DIR to provide the path to the dovecot-config file.]) AC_MSG_ERROR([dovecot-config not found]) fi old=`pwd` cd $dovecotdir abs_dovecotdir=`pwd` cd $old DISTCHECK_CONFIGURE_FLAGS="--with-dovecot=$abs_dovecotdir --without-dovecot-install-dirs" eval `grep -i '^dovecot_[[a-z_]]*=' "$dovecotdir"/dovecot-config` eval `grep '^LIBDOVECOT[[A-Z_]]*=' "$dovecotdir"/dovecot-config` if test "$use_install_dirs" = "no"; then # the main purpose of these is to fix make distcheck for plugins # other than that, they don't really make much sense dovecot_pkgincludedir='$(pkgincludedir)' dovecot_pkglibdir='$(pkglibdir)' dovecot_pkglibexecdir='$(libexecdir)/dovecot' dovecot_docdir='$(docdir)' dovecot_moduledir='$(moduledir)' fi AX_SUBST_L([DISTCHECK_CONFIGURE_FLAGS], [dovecotdir], [dovecot_moduledir], [dovecot_pkgincludedir], [dovecot_pkglibexecdir], [dovecot_pkglibdir], [dovecot_docdir]) AX_SUBST_L([DOVECOT_CFLAGS], [DOVECOT_LIBS], [DOVECOT_SSL_LIBS], [DOVECOT_SQL_LIBS], [DOVECOT_COMPRESS_LIBS]) AX_SUBST_L([LIBDOVECOT], [LIBDOVECOT_LOGIN], [LIBDOVECOT_SQL], [LIBDOVECOT_SSL], [LIBDOVECOT_COMPRESS], [LIBDOVECOT_LDA], [LIBDOVECOT_STORAGE]) AX_SUBST_L([LIBDOVECOT_DEPS], [LIBDOVECOT_LOGIN_DEPS], [LIBDOVECOT_SQL_DEPS], [LIBDOVECOT_SSL_DEPS], [LIBDOVECOT_COMPRESS_DEPS], [LIBDOVECOT_LDA_DEPS], [LIBDOVECOT_STORAGE_DEPS]) AX_SUBST_L([LIBDOVECOT_INCLUDE], [LIBDOVECOT_LDA_INCLUDE], [LIBDOVECOT_DOVEADM_INCLUDE], [LIBDOVECOT_SERVICE_INCLUDE], [LIBDOVECOT_STORAGE_INCLUDE], [LIBDOVECOT_LOGIN_INCLUDE], [LIBDOVECOT_CONFIG_INCLUDE], [LIBDOVECOT_IMAP_INCLUDE]) DC_PLUGIN_DEPS ]) dovecot-2.2.9/config.h.in0000644000175000017500000004430512244477302012146 00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ #undef AC_APPLE_UNIVERSAL_BUILD /* Define if you have buggy CMSG macros */ #undef BUGGY_CMSG_MACROS /* Build with CDB support */ #undef BUILD_CDB /* Build with Berkeley DB support */ #undef BUILD_DB /* Built-in MySQL support */ #undef BUILD_MYSQL /* Built-in PostgreSQL support */ #undef BUILD_PGSQL /* Built-in SQLite support */ #undef BUILD_SQLITE /* GSSAPI support is built in */ #undef BUILTIN_GSSAPI /* LDAP support is built in */ #undef BUILTIN_LDAP /* IMAP capabilities advertised in banner */ #undef CAPABILITY_BANNER_STRING /* IMAP capabilities */ #undef CAPABILITY_STRING /* Define if _XPG6 macro is needed for crypt() */ #undef CRYPT_USE_XPG6 /* Build with extra debugging checks */ #undef DEBUG /* Define if your dev_t is a structure instead of integer type */ #undef DEV_T_STRUCT /* Path to /dev/urandom */ #undef DEV_URANDOM_PATH /* Disable asserts */ #undef DISABLE_ASSERTS /* Dovecot ABI version */ #undef DOVECOT_ABI_VERSION /* Dovecot name */ #undef DOVECOT_NAME /* Dovecot string */ #undef DOVECOT_STRING /* Dovecot version */ #undef DOVECOT_VERSION /* Dovecot major version */ #undef DOVECOT_VERSION_MAJOR /* Dovecot minor version */ #undef DOVECOT_VERSION_MINOR /* How to define flexible array members in structs */ #undef FLEXIBLE_ARRAY_MEMBER /* Define to 1 if you have the `backtrace_symbols' function. */ #undef HAVE_BACKTRACE_SYMBOLS /* Define if you have bzlib library */ #undef HAVE_BZLIB /* Define to 1 if you have the `clearenv' function. */ #undef HAVE_CLEARENV /* Define if you have the clock_gettime function */ #undef HAVE_CLOCK_GETTIME /* Define if you have struct dirent->d_type */ #undef HAVE_DIRENT_D_TYPE /* Define to 1 if you have the header file. */ #undef HAVE_DIRENT_H /* Define to 1 if you have the `dirfd' function. */ #undef HAVE_DIRFD /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* Define to 1 if you have the header file. */ #undef HAVE_EXECINFO_H /* Define to 1 if you have the `fallocate' function. */ #undef HAVE_FALLOCATE /* Define to 1 if you have the `fcntl' function. */ #undef HAVE_FCNTL /* Define if you have fdatasync() */ #undef HAVE_FDATASYNC /* Define to 1 if you have the `flock' function. */ #undef HAVE_FLOCK /* Define if you have FreeBSD-compatible sendfile() */ #undef HAVE_FREEBSD_SENDFILE /* Define to 1 if you have the header file. */ #undef HAVE_GC_GC_H /* Define to 1 if you have the header file. */ #undef HAVE_GC_H /* Define to 1 if you have the `getmntent' function. */ #undef HAVE_GETMNTENT /* Define to 1 if you have the `getmntinfo' function. */ #undef HAVE_GETMNTINFO /* Define to 1 if you have the `getpagesize' function. */ #undef HAVE_GETPAGESIZE /* Define to 1 if you have the `getpeereid' function. */ #undef HAVE_GETPEEREID /* Define to 1 if you have the `getpeerucred' function. */ #undef HAVE_GETPEERUCRED /* Define to 1 if you have the `glob' function. */ #undef HAVE_GLOB /* Define to 1 if you have the header file. */ #undef HAVE_GLOB_H /* Build with GNUTLS support */ #undef HAVE_GNUTLS /* Build with GSSAPI support */ #undef HAVE_GSSAPI /* Define to 1 if you have the header file. */ #undef HAVE_GSSAPI_GSSAPI_EXT_H /* GSSAPI headers in gssapi/gssapi.h */ #undef HAVE_GSSAPI_GSSAPI_H /* Define to 1 if you have the header file. */ #undef HAVE_GSSAPI_GSSAPI_KRB5_H /* GSSAPI headers in gssapi.h */ #undef HAVE_GSSAPI_H /* Define to 1 if you have the header file. */ #undef HAVE_GSSAPI_KRB5_H /* GSSAPI supports SPNEGO */ #undef HAVE_GSSAPI_SPNEGO /* Define to 1 if you have the `gsskrb5_register_acceptor_identity' function. */ #undef HAVE_GSSKRB5_REGISTER_ACCEPTOR_IDENTITY /* Define if you have the iconv() function and it works. */ #undef HAVE_ICONV /* Define to 1 if you have the `inet_aton' function. */ #undef HAVE_INET_ATON /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Build with IPv6 support */ #undef HAVE_IPV6 /* Define to 1 if you have the header file. */ #undef HAVE_JFS_QUOTA_H /* Define to 1 if you have the `kevent' function. */ #undef HAVE_KEVENT /* Define to 1 if you have the `kqueue' function. */ #undef HAVE_KQUEUE /* Define to 1 if you have the `krb5_gss_register_acceptor_identity' function. */ #undef HAVE_KRB5_GSS_REGISTER_ACCEPTOR_IDENTITY /* libcap is installed for cap_init() */ #undef HAVE_LIBCAP /* Define to 1 if you have the header file. */ #undef HAVE_LIBGEN_H /* Define if you have libwrap */ #undef HAVE_LIBWRAP /* Define to 1 if you have the header file. */ #undef HAVE_LINUX_DQBLK_XFS_H /* Define to 1 if you have the header file. */ #undef HAVE_LINUX_FALLOC_H /* Define if you have Linux-compatible mremap() */ #undef HAVE_LINUX_MREMAP /* Define if you have Linux-compatible sendfile() */ #undef HAVE_LINUX_SENDFILE /* Define to 1 if you have the `lockf' function. */ #undef HAVE_LOCKF /* Define if you want textcat (Debian version) support for CLucene */ #undef HAVE_LUCENE_EXTTEXTCAT /* Define if you want stemming support for CLucene */ #undef HAVE_LUCENE_STEMMER /* Define if you want textcat support for CLucene */ #undef HAVE_LUCENE_TEXTCAT /* Define if you have lzma library */ #undef HAVE_LZMA /* Define to 1 if you have the `madvise' function. */ #undef HAVE_MADVISE /* Define to 1 if you have the header file. */ #undef HAVE_MALLOC_H /* Define to 1 if you have the header file. */ #undef HAVE_MALLOC_NP_H /* Define to 1 if you have the `malloc_usable_size' function. */ #undef HAVE_MALLOC_USABLE_SIZE /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the header file. */ #undef HAVE_MNTENT_H /* Define if you have dynamic module support */ #undef HAVE_MODULES /* Build with MySQL support */ #undef HAVE_MYSQL /* Define if your MySQL library has SSL functions */ #undef HAVE_MYSQL_SSL /* Define if your MySQL library supports setting cipher */ #undef HAVE_MYSQL_SSL_CIPHER /* Define if you don't have C99 compatible vsnprintf() call */ #undef HAVE_OLD_VSNPRINTF /* Build with OpenSSL support */ #undef HAVE_OPENSSL /* Define to 1 if you have the header file. */ #undef HAVE_OPENSSL_ERR_H /* Define if you have openssl/rand.h */ #undef HAVE_OPENSSL_RAND_H /* Define to 1 if you have the header file. */ #undef HAVE_OPENSSL_SSL_H /* Define if you have pam/pam_appl.h */ #undef HAVE_PAM_PAM_APPL_H /* Define if you have pam_setcred() */ #undef HAVE_PAM_SETCRED /* Build with PostgreSQL support */ #undef HAVE_PGSQL /* Define to 1 if you have the `posix_fadvise' function. */ #undef HAVE_POSIX_FADVISE /* Define if you have a working posix_fallocate() */ #undef HAVE_POSIX_FALLOCATE /* Define if libpq has PQescapeStringConn function */ #undef HAVE_PQESCAPE_STRING_CONN /* Define to 1 if you have the `pread' function. */ #undef HAVE_PREAD /* Define if you have prctl(PR_SET_DUMPABLE) */ #undef HAVE_PR_SET_DUMPABLE /* Define to 1 if you have the `quotactl' function. */ #undef HAVE_QUOTACTL /* Define to 1 if you have the header file. */ #undef HAVE_QUOTA_H /* Define if you have quota_open() */ #undef HAVE_QUOTA_OPEN /* Define if Q_QUOTACTL exists */ #undef HAVE_Q_QUOTACTL /* Define if you have RLIMIT_AS for setrlimit() */ #undef HAVE_RLIMIT_AS /* Define if you have RLIMIT_CORE for getrlimit() */ #undef HAVE_RLIMIT_CORE /* Define if you have RLIMIT_NPROC for setrlimit() */ #undef HAVE_RLIMIT_NPROC /* Define if you wish to retrieve quota of NFS mounted mailboxes */ #undef HAVE_RQUOTA /* Define to 1 if you have the header file. */ #undef HAVE_SASL_H /* Define to 1 if you have the header file. */ #undef HAVE_SASL_SASL_H /* Define if you have security/pam_appl.h */ #undef HAVE_SECURITY_PAM_APPL_H /* Define to 1 if you have the `setegid' function. */ #undef HAVE_SETEGID /* Define to 1 if you have the `seteuid' function. */ #undef HAVE_SETEUID /* Define to 1 if you have the `setpriority' function. */ #undef HAVE_SETPRIORITY /* Define to 1 if you have the `setproctitle' function. */ #undef HAVE_SETPROCTITLE /* Define to 1 if you have the `setresgid' function. */ #undef HAVE_SETRESGID /* Define to 1 if you have the `setreuid' function. */ #undef HAVE_SETREUID /* Define to 1 if you have the `setrlimit' function. */ #undef HAVE_SETRLIMIT /* Define to 1 if you have the `sigaction' function. */ #undef HAVE_SIGACTION /* Define to 'int' if you don't have socklen_t */ #undef HAVE_SOCKLEN_T /* Define if you have Solaris-compatible sendfile() */ #undef HAVE_SOLARIS_SENDFILE /* Build with SQLite3 support */ #undef HAVE_SQLITE /* Build with SSL/TLS support */ #undef HAVE_SSL /* Build with OpenSSL compression */ #undef HAVE_SSL_COMPRESSION /* Build with TLS hostname support */ #undef HAVE_SSL_GET_SERVERNAME /* Define if you have statfs.f_mntfromname */ #undef HAVE_STATFS_MNTFROMNAME /* Define if you have statvfs.f_mntfromname */ #undef HAVE_STATVFS_MNTFROMNAME /* Define if you have st_?tim timespec fields in struct stat */ #undef HAVE_STAT_XTIM /* Define if you have st_?timespec fields in struct stat */ #undef HAVE_STAT_XTIMESPEC /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the `strcasecmp' function. */ #undef HAVE_STRCASECMP /* Define to 1 if you have the `stricmp' function. */ #undef HAVE_STRICMP /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define if you have strtoimax function */ #undef HAVE_STRTOIMAX /* Define to 1 if you have the `strtoll' function. */ #undef HAVE_STRTOLL /* Define to 1 if you have the `strtoq' function. */ #undef HAVE_STRTOQ /* Define to 1 if you have the `strtoull' function. */ #undef HAVE_STRTOULL /* Define if you have strtoumax function */ #undef HAVE_STRTOUMAX /* Define to 1 if you have the `strtouq' function. */ #undef HAVE_STRTOUQ /* Define if struct sqblk.dqb_curblocks exists */ #undef HAVE_STRUCT_DQBLK_CURBLOCKS /* Define if struct sqblk.dqb_curspace exists */ #undef HAVE_STRUCT_DQBLK_CURSPACE /* Define if you have struct iovec */ #undef HAVE_STRUCT_IOVEC /* Define to 1 if the system has the type `struct sockpeercred'. */ #undef HAVE_STRUCT_SOCKPEERCRED /* Define if you want to use systemd socket activation */ #undef HAVE_SYSTEMD /* Define to 1 if you have the header file. */ #undef HAVE_SYS_EVENT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_QUOTA_COMMON_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_UFS_QUOTA_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MKDEV_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MNTTAB_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_QUOTA_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_RESOURCE_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SELECT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SYSMACROS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TIME_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_UCRED_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_UIO_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_UTSNAME_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_VMOUNT_H /* Define if you have struct tm->tm_gmtoff */ #undef HAVE_TM_GMTOFF /* Define if you have typeof() */ #undef HAVE_TYPEOF /* Define to 1 if you have the header file. */ #undef HAVE_UCONTEXT_H /* Define to 1 if you have the header file. */ #undef HAVE_UCRED_H /* Define to 1 if you have the header file. */ #undef HAVE_UFS_UFS_QUOTA_H /* Define if you have uintmax_t (C99 type) */ #undef HAVE_UINTMAX_T /* Define if you have uint_fast32_t (C99 type) */ #undef HAVE_UINT_FAST32_T /* Define to 1 if you have the `uname' function. */ #undef HAVE_UNAME /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the `unsetenv' function. */ #undef HAVE_UNSETENV /* Define if you have a native uoff_t type */ #undef HAVE_UOFF_T /* Define to 1 if you have the `vsyslog' function. */ #undef HAVE_VSYSLOG /* Define to 1 if you have the `walkcontext' function. */ #undef HAVE_WALKCONTEXT /* Define to 1 if you have the `writev' function. */ #undef HAVE_WRITEV /* Define to 1 if you have the header file. */ #undef HAVE_XFS_XQM_H /* Define if you have zlib library */ #undef HAVE_ZLIB /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* Define if you have __gss_userok() */ #undef HAVE___GSS_USEROK /* Define as const if the declaration of iconv() needs const. */ #undef ICONV_CONST /* Implement I/O loop with Linux 2.6 epoll() */ #undef IOLOOP_EPOLL /* Implement I/O loop with BSD kqueue() */ #undef IOLOOP_KQUEUE /* Use Linux dnotify */ #undef IOLOOP_NOTIFY_DNOTIFY /* Use Linux inotify */ #undef IOLOOP_NOTIFY_INOTIFY /* Use BSD kqueue directory changes notificaton */ #undef IOLOOP_NOTIFY_KQUEUE /* No special notify support */ #undef IOLOOP_NOTIFY_NONE /* Implement I/O loop with poll() */ #undef IOLOOP_POLL /* Implement I/O loop with select() */ #undef IOLOOP_SELECT /* Define if you have ldap_initialize */ #undef LDAP_HAVE_INITIALIZE /* Define if you have ldap_start_tls_s */ #undef LDAP_HAVE_START_TLS_S /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* List of compiled in mail storages */ #undef MAIL_STORAGES /* Required memory alignment */ #undef MEM_ALIGN_SIZE /* Define if shared mmaps don't get updated by write()s */ #undef MMAP_CONFLICTS_WRITE /* Dynamic module suffix */ #undef MODULE_SUFFIX /* Maximum value of off_t */ #undef OFF_T_MAX /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Support URL */ #undef PACKAGE_WEBPAGE /* Build with BSD authentication support */ #undef PASSDB_BSDAUTH /* Build with checkpassword passdb support */ #undef PASSDB_CHECKPASSWORD /* Build with LDAP support */ #undef PASSDB_LDAP /* Build with PAM support */ #undef PASSDB_PAM /* Build with passwd support */ #undef PASSDB_PASSWD /* Build with passwd-file support */ #undef PASSDB_PASSWD_FILE /* Build with shadow support */ #undef PASSDB_SHADOW /* Build with Tru64 SIA support */ #undef PASSDB_SIA /* Build with SQL support */ #undef PASSDB_SQL /* Build with vpopmail support */ #undef PASSDB_VPOPMAIL /* Defint if pread/pwrite implementation is broken */ #undef PREAD_BROKEN /* Define if pread/pwrite needs _XOPEN_SOURCE 500 */ #undef PREAD_WRAPPERS /* printf() format for size_t */ #undef PRIuSIZE_T /* printf() format for uoff_t */ #undef PRIuUOFF_T /* Define if process title can be changed by modifying argv */ #undef PROCTITLE_HACK /* The size of `int', as computed by sizeof. */ #undef SIZEOF_INT /* The size of `long', as computed by sizeof. */ #undef SIZEOF_LONG /* The size of `long long', as computed by sizeof. */ #undef SIZEOF_LONG_LONG /* The size of `void *', as computed by sizeof. */ #undef SIZEOF_VOID_P /* Build SQL drivers as plugins */ #undef SQL_DRIVER_PLUGINS /* Maximum value of ssize_t */ #undef SSIZE_T_MAX /* reasonable mntctl buffer size */ #undef STATIC_MTAB_SIZE /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* max. time_t bits gmtime() can handle */ #undef TIME_T_MAX_BITS /* Define if your time_t is signed */ #undef TIME_T_SIGNED /* Define if unsetenv() returns int */ #undef UNSETENV_RET_INT /* Define if off_t is int */ #undef UOFF_T_INT /* Define if off_t is long */ #undef UOFF_T_LONG /* Define if off_t is long long */ #undef UOFF_T_LONG_LONG /* Build with checkpassword userdb support */ #undef USERDB_CHECKPASSWORD /* Build with LDAP support */ #undef USERDB_LDAP /* Build with NSS module support */ #undef USERDB_NSS /* Build with passwd support */ #undef USERDB_PASSWD /* Build with passwd-file support */ #undef USERDB_PASSWD_FILE /* Build with prefetch userdb support */ #undef USERDB_PREFETCH /* Build with SQL support */ #undef USERDB_SQL /* Build with vpopmail support */ #undef USERDB_VPOPMAIL /* Define if you want to use Boehm GC */ #undef USE_GC /* A 'va_copy' style function */ #undef VA_COPY /* 'va_lists' cannot be copies as values */ #undef VA_COPY_AS_ARRAY /* Version number of package */ #undef VERSION /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN # undef WORDS_BIGENDIAN # endif #endif /* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus #undef inline #endif /* Define to 'unsigned int' if you don't have it */ #undef size_t /* Define to 'int' if you don't have it */ #undef ssize_t dovecot-2.2.9/Makefile.am0000644000175000017500000000534012244400443012143 00000000000000aclocaldir = $(datadir)/aclocal SUBDIRS = \ . \ src \ doc dist_pkginclude_HEADERS = \ dovecot-version.h EXTRA_DIST = \ COPYING.LGPL \ COPYING.MIT \ ChangeLog \ is-tagged.py \ run-test.sh \ cc-wrapper.sh.in \ update-version.sh \ $(conf_DATA) noinst_DATA = dovecot-config nodist_pkginclude_HEADERS = config.h if MAINTAINER_MODE ChangeLog: .hg/dirstate hg log -r tip:fef8259e7277 --style=changelog > ChangeLog endif aclocal_DATA = dovecot.m4 dovecot-version.h: noop $(SHELL) $(top_srcdir)/update-version.sh $(top_srcdir) $(top_builddir) noop: dovecot-config: dovecot-config.in Makefile old=`pwd` && cd $(top_builddir) && abs_builddir=`pwd` && cd $$old && \ cd $(top_srcdir) && abs_srcdir=`pwd` && cd $$old && \ cat dovecot-config.in | sed \ -e "s|\$$(top_builddir)|$$abs_builddir|g" \ -e "s|\$$(incdir)|$$abs_srcdir|g" \ -e "s|\$$(LIBICONV)|$(LIBICONV)|g" \ -e "s|\$$(MODULE_LIBS)|$(MODULE_LIBS)|g" \ -e "s|^\(dovecot_pkgincludedir\)=|\1=$(pkgincludedir)|" \ -e "s|^\(dovecot_pkglibdir\)=|\1=$(pkglibdir)|" \ -e "s|^\(dovecot_pkglibexecdir\)=|\1=$(libexecdir)/dovecot|" \ -e "s|^\(dovecot_docdir\)=|\1=$(docdir)|" \ -e "s|^\(dovecot_moduledir\)=|\1=$(moduledir)|" \ > dovecot-config if HAVE_SYSTEMD %.service: %.service.in $(AM_V_GEN)sed -e 's,@sbindir\@,$(sbindir),g' $< > $@ systemdsystemunit_DATA = \ dovecot.socket \ dovecot.service else EXTRA_DIST += dovecot.socket dovecot.service.in endif install-exec-hook: $(mkdir_p) $(DESTDIR)$(pkglibdir); \ grep -v '^LIBDOVECOT_.*_INCLUDE' dovecot-config | \ grep -v '^LIBDOVECOT.*_DEPS' | sed \ -e "s|^\(LIBDOVECOT\)=.*$$|\1='-L$(pkglibdir) -ldovecot'|" \ -e "s|^\(LIBDOVECOT_LOGIN\)=.*$$|\1='-ldovecot-login $(SSL_LIBS)'|" \ -e "s|^\(LIBDOVECOT_SQL\)=.*$$|\1=-ldovecot-sql|" \ -e "s|^\(LIBDOVECOT_COMPRESS\)=.*$$|\1=-ldovecot-compression|" \ -e "s|^\(LIBDOVECOT_LDA\)=.*$$|\1=-ldovecot-lda|" \ -e "s|^\(LIBDOVECOT_STORAGE\)=.*$$|\1='-ldovecot-storage $(LINKED_STORAGE_LDADD)'|" \ -e "s|^\(LIBDOVECOT_INCLUDE\)=.*$$|\1=-I$(pkgincludedir)|" \ > $(DESTDIR)$(pkglibdir)/dovecot-config uninstall-hook: rm $(DESTDIR)$(pkglibdir)/dovecot-config CLEANFILES = $(datafiles) if HAVE_SYSTEMD CLEANFILES += $systedmsystemunit_DATA endif DISTCLEANFILES = \ $(top_builddir)/dovecot-version.h \ $(top_builddir)/dovecot-config distcheck-hook: if which scan-build > /dev/null; then \ cd $(distdir)/_build; \ scan-build -o scan-reports ../configure --with-ldap=auto --with-pgsql=auto --with-mysql=auto --with-sqlite=auto --with-solr=auto --with-gssapi=auto --with-libwrap=auto; \ rm -rf scan-reports; \ scan-build -o scan-reports make 2>&1 || exit 1; \ if ! rmdir scan-reports 2>/dev/null; then \ exit 1; \ fi; \ cd ../..; rm -rf $(distdir)/_build/*; \ fi dovecot-2.2.9/missing0000755000175000017500000001533112225740561011516 00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2012-06-26.16; # UTC # Copyright (C) 1996-2013 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=http://www.perl.org/ flex_URL=http://flex.sourceforge.net/ gnu_software_URL=http://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'automa4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: dovecot-2.2.9/aclocal.m40000644000175000017500000144567712244477265012015 00000000000000# generated automatically by aclocal 1.14 -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # iconv.m4 serial 18 (gettext-0.18.2) dnl Copyright (C) 2000-2002, 2007-2013 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. AC_DEFUN([AM_ICONV_LINKFLAGS_BODY], [ dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_LIB_LINKFLAGS_BODY([iconv]) ]) AC_DEFUN([AM_ICONV_LINK], [ dnl Some systems have iconv in libc, some have it in libiconv (OSF/1 and dnl those with the standalone portable GNU libiconv installed). AC_REQUIRE([AC_CANONICAL_HOST]) dnl for cross-compiles dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) dnl Add $INCICONV to CPPFLAGS before performing the following checks, dnl because if the user has installed libiconv and not disabled its use dnl via --without-libiconv-prefix, he wants to use it. The first dnl AC_LINK_IFELSE will then fail, the second AC_LINK_IFELSE will succeed. am_save_CPPFLAGS="$CPPFLAGS" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCICONV]) AC_CACHE_CHECK([for iconv], [am_cv_func_iconv], [ am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #include ]], [[iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);]])], [am_cv_func_iconv=yes]) if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #include ]], [[iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);]])], [am_cv_lib_iconv=yes] [am_cv_func_iconv=yes]) LIBS="$am_save_LIBS" fi ]) if test "$am_cv_func_iconv" = yes; then AC_CACHE_CHECK([for working iconv], [am_cv_func_iconv_works], [ dnl This tests against bugs in AIX 5.1, AIX 6.1..7.1, HP-UX 11.11, dnl Solaris 10. am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi AC_RUN_IFELSE( [AC_LANG_SOURCE([[ #include #include int main () { int result = 0; /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static const char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 1; iconv_close (cd_utf8_to_88591); } } /* Test against Solaris 10 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); if (cd_ascii_to_88591 != (iconv_t)(-1)) { static const char input[] = "\263"; char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_ascii_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 2; iconv_close (cd_ascii_to_88591); } } /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ { iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static const char input[] = "\304"; static char buf[2] = { (char)0xDE, (char)0xAD }; const char *inptr = input; size_t inbytesleft = 1; char *outptr = buf; size_t outbytesleft = 1; size_t res = iconv (cd_88591_to_utf8, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) result |= 4; iconv_close (cd_88591_to_utf8); } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static const char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) result |= 8; iconv_close (cd_88591_to_utf8); } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ if (/* Try standardized names. */ iconv_open ("UTF-8", "EUC-JP") == (iconv_t)(-1) /* Try IRIX, OSF/1 names. */ && iconv_open ("UTF-8", "eucJP") == (iconv_t)(-1) /* Try AIX names. */ && iconv_open ("UTF-8", "IBM-eucJP") == (iconv_t)(-1) /* Try HP-UX names. */ && iconv_open ("utf8", "eucJP") == (iconv_t)(-1)) result |= 16; return result; }]])], [am_cv_func_iconv_works=yes], [am_cv_func_iconv_works=no], [ changequote(,)dnl case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac changequote([,])dnl ]) LIBS="$am_save_LIBS" ]) case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then AC_DEFINE([HAVE_ICONV], [1], [Define if you have the iconv() function and it works.]) fi if test "$am_cv_lib_iconv" = yes; then AC_MSG_CHECKING([how to link with libiconv]) AC_MSG_RESULT([$LIBICONV]) else dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV dnl either. CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi AC_SUBST([LIBICONV]) AC_SUBST([LTLIBICONV]) ]) dnl Define AM_ICONV using AC_DEFUN_ONCE for Autoconf >= 2.64, in order to dnl avoid warnings like dnl "warning: AC_REQUIRE: `AM_ICONV' was expanded before it was required". dnl This is tricky because of the way 'aclocal' is implemented: dnl - It requires defining an auxiliary macro whose name ends in AC_DEFUN. dnl Otherwise aclocal's initial scan pass would miss the macro definition. dnl - It requires a line break inside the AC_DEFUN_ONCE and AC_DEFUN expansions. dnl Otherwise aclocal would emit many "Use of uninitialized value $1" dnl warnings. m4_define([gl_iconv_AC_DEFUN], m4_version_prereq([2.64], [[AC_DEFUN_ONCE( [$1], [$2])]], [m4_ifdef([gl_00GNULIB], [[AC_DEFUN_ONCE( [$1], [$2])]], [[AC_DEFUN( [$1], [$2])]])])) gl_iconv_AC_DEFUN([AM_ICONV], [ AM_ICONV_LINK if test "$am_cv_func_iconv" = yes; then AC_MSG_CHECKING([for iconv declaration]) AC_CACHE_VAL([am_cv_proto_iconv], [ AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [[ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif ]], [[]])], [am_cv_proto_iconv_arg1=""], [am_cv_proto_iconv_arg1="const"]) am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` AC_MSG_RESULT([ $am_cv_proto_iconv]) AC_DEFINE_UNQUOTED([ICONV_CONST], [$am_cv_proto_iconv_arg1], [Define as const if the declaration of iconv() needs const.]) dnl Also substitute ICONV_CONST in the gnulib generated . m4_ifdef([gl_ICONV_H_DEFAULTS], [AC_REQUIRE([gl_ICONV_H_DEFAULTS]) if test -n "$am_cv_proto_iconv_arg1"; then ICONV_CONST="const" fi ]) fi ]) # lib-ld.m4 serial 6 dnl Copyright (C) 1996-2003, 2009-2013 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl Subroutines of libtool.m4, dnl with replacements s/_*LT_PATH/AC_LIB_PROG/ and s/lt_/acl_/ to avoid dnl collision with libtool.m4. dnl From libtool-2.4. Sets the variable with_gnu_ld to yes or no. AC_DEFUN([AC_LIB_PROG_LD_GNU], [AC_CACHE_CHECK([if the linker ($LD) is GNU ld], [acl_cv_prog_gnu_ld], [# I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 /dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`echo "$ac_prog"| sed 's%\\\\%/%g'` while echo "$ac_prog" | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL([acl_cv_path_LD], [if test -z "$LD"; then acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$acl_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 = 1.10 to complain if config.rpath is missing. m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([config.rpath])]) AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir AC_CACHE_CHECK([for shared library run path origin], [acl_cv_rpath], [ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done ]) wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" dnl Determine whether the user wants rpath handling at all. AC_ARG_ENABLE([rpath], [ --disable-rpath do not hardcode runtime library paths], :, enable_rpath=yes) ]) dnl AC_LIB_FROMPACKAGE(name, package) dnl declares that libname comes from the given package. The configure file dnl will then not have a --with-libname-prefix option but a dnl --with-package-prefix option. Several libraries can come from the same dnl package. This declaration must occur before an AC_LIB_LINKFLAGS or similar dnl macro call that searches for libname. AC_DEFUN([AC_LIB_FROMPACKAGE], [ pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_frompackage_]NAME, [$2]) popdef([NAME]) pushdef([PACK],[$2]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_libsinpackage_]PACKUP, m4_ifdef([acl_libsinpackage_]PACKUP, [m4_defn([acl_libsinpackage_]PACKUP)[, ]],)[lib$1]) popdef([PACKUP]) popdef([PACK]) ]) dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_LINKFLAGS_BODY], [ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACK],[m4_ifdef([acl_frompackage_]NAME, [acl_frompackage_]NAME, lib[$1])]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACKLIBS],[m4_ifdef([acl_frompackage_]NAME, [acl_libsinpackage_]PACKUP, lib[$1])]) dnl Autoconf >= 2.61 supports dots in --with options. pushdef([P_A_C_K],[m4_if(m4_version_compare(m4_defn([m4_PACKAGE_VERSION]),[2.61]),[-1],[m4_translit(PACK,[.],[_])],PACK)]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_ARG_WITH(P_A_C_K[-prefix], [[ --with-]]P_A_C_K[[-prefix[=DIR] search for ]PACKLIBS[ in DIR/include and DIR/lib --without-]]P_A_C_K[[-prefix don't search for ]PACKLIBS[ in includedir and libdir]], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" if test "$acl_libdirstem2" != "$acl_libdirstem" \ && ! test -d "$withval/$acl_libdirstem"; then additional_libdir="$withval/$acl_libdirstem2" fi fi fi ]) dnl Search the library and its dependencies in $additional_libdir and dnl $LDFLAGS. Using breadth-first-seach. LIB[]NAME= LTLIB[]NAME= INC[]NAME= LIB[]NAME[]_PREFIX= dnl HAVE_LIB${NAME} is an indicator that LIB${NAME}, LTLIB${NAME} have been dnl computed. So it has to be reset here. HAVE_LIB[]NAME= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='$1 $2' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" dnl See if it was already located by an earlier AC_LIB_LINKFLAGS dnl or AC_LIB_HAVE_LINKFLAGS call. uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" else dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined dnl that this library doesn't exist. So just drop it. : fi else dnl Search the library lib$name in $additional_libdir and $LDFLAGS dnl and the already constructed $LIBNAME/$LTLIBNAME. found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" dnl The same code as in the loop below: dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then dnl Found the library. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then dnl Linking with a shared library. We attempt to hardcode its dnl directory into the executable's runpath, unless it's the dnl standard /usr/lib. if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2"; then dnl No hardcoding is needed. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl Use an explicit option to hardcode DIR into the resulting dnl binary. dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi dnl The hardcoding into $LIBNAME is system dependent. if test "$acl_hardcode_direct" = yes; then dnl Using DIR/libNAME.so during linking hardcodes DIR into the dnl resulting binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode DIR into the resulting dnl binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else dnl Rely on "-L$found_dir". dnl But don't add it if it's already contained in the LDFLAGS dnl or the already constructed $LIBNAME haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH dnl here, because this doesn't fit in flags passed to the dnl compiler. So give up. No hardcoding. This affects only dnl very old systems. dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then dnl Linking with a static library. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" else dnl We shouldn't come here, but anyway it's good to have a dnl fallback. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" fi fi dnl Assume the include files are nearby. additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then dnl Potentially add $additional_includedir to $INCNAME. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's /usr/local/include and we are using GCC on Linux, dnl 3. if it's already present in $CPPFLAGS or the already dnl constructed $INCNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INC[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $INCNAME. INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" fi fi fi fi fi dnl Look for dependencies. if test -n "$found_la"; then dnl Read the .la file. It defines the variables dnl dlname, library_names, old_library, dependency_libs, current, dnl age, revision, installed, dlopen, dlpreopen, libdir. save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" dnl We use only dependency_libs. for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's /usr/local/lib and we are using GCC on Linux, dnl 3. if it's already present in $LDFLAGS or the already dnl constructed $LIBNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem" \ && test "X$additional_libdir" != "X/usr/$acl_libdirstem2"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$additional_libdir" = "X/usr/local/$acl_libdirstem2"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LIBNAME. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LTLIBNAME. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) dnl Handle this in the next round. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) dnl Handle this in the next round. Throw away the .la's dnl directory; it is already contained in a preceding -L dnl option. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) dnl Most likely an immediate library name. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" ;; esac done fi else dnl Didn't find the library; assume it is in the system directories dnl known to the linker and runtime loader. (All the system dnl directories known to the linker should also be known to the dnl runtime loader, otherwise the system is severely misconfigured.) LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user must dnl pass all path elements in one option. We can arrange that for a dnl single library, but not when more than one $LIBNAMEs are used. alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" else dnl The -rpath options are cumulative. for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then dnl When using libtool, the option that works for both libraries and dnl executables is -R. The -R options are cumulative. for found_dir in $ltrpathdirs; do LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" done fi popdef([P_A_C_K]) popdef([PACKLIBS]) popdef([PACKUP]) popdef([PACK]) popdef([NAME]) ]) dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, dnl unless already present in VAR. dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes dnl contains two or three consecutive elements that belong together. AC_DEFUN([AC_LIB_APPENDTOVAR], [ for element in [$2]; do haveit= for x in $[$1]; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then [$1]="${[$1]}${[$1]:+ }$element" fi done ]) dnl For those cases where a variable contains several -L and -l options dnl referring to unknown libraries and directories, this macro determines the dnl necessary additional linker options for the runtime path. dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) dnl sets LDADDVAR to linker options needed together with LIBSVALUE. dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, dnl otherwise linking without libtool is assumed. AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], [ AC_REQUIRE([AC_LIB_RPATH]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) $1= if test "$enable_rpath" != no; then if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode directories into the resulting dnl binary. rpathdirs= next= for opt in $2; do if test -n "$next"; then dir="$next" dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2"; then rpathdirs="$rpathdirs $dir" fi next= else case $opt in -L) next=yes ;; -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2"; then rpathdirs="$rpathdirs $dir" fi next= ;; *) next= ;; esac fi done if test "X$rpathdirs" != "X"; then if test -n ""$3""; then dnl libtool is used for linking. Use -R options. for dir in $rpathdirs; do $1="${$1}${$1:+ }-R$dir" done else dnl The linker is used for linking directly. if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user dnl must pass all path elements in one option. alldirs= for dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="$flag" else dnl The -rpath options are cumulative. for dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="${$1}${$1:+ }$flag" done fi fi fi fi fi AC_SUBST([$1]) ]) # lib-prefix.m4 serial 7 (gettext-0.18) dnl Copyright (C) 2001-2005, 2008-2013 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't dnl require excessive bracketing. ifdef([AC_HELP_STRING], [AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])], [AC_DEFUN([AC_][LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])]) dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed dnl to access previously installed libraries. The basic assumption is that dnl a user will want packages to use other packages he previously installed dnl with the same --prefix option. dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate dnl libraries, but is otherwise very convenient. AC_DEFUN([AC_LIB_PREFIX], [ AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) AC_REQUIRE([AC_PROG_CC]) AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib-prefix], [ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib --without-lib-prefix don't search for libraries in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi ]) if test $use_additional = yes; then dnl Potentially add $additional_includedir to $CPPFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's already present in $CPPFLAGS, dnl 3. if it's /usr/local/include and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= for x in $CPPFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $CPPFLAGS. CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" fi fi fi fi dnl Potentially add $additional_libdir to $LDFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's already present in $LDFLAGS, dnl 3. if it's /usr/local/lib and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= for x in $LDFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LDFLAGS. LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" fi fi fi fi fi ]) dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, dnl acl_final_exec_prefix, containing the values to which $prefix and dnl $exec_prefix will expand at the end of the configure script. AC_DEFUN([AC_LIB_PREPARE_PREFIX], [ dnl Unfortunately, prefix and exec_prefix get only finally determined dnl at the end of configure. if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" ]) dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the dnl variables prefix and exec_prefix bound to the values they will have dnl at the end of the configure script. AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], [ acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" $1 exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" ]) dnl AC_LIB_PREPARE_MULTILIB creates dnl - a variable acl_libdirstem, containing the basename of the libdir, either dnl "lib" or "lib64" or "lib/64", dnl - a variable acl_libdirstem2, as a secondary possible value for dnl acl_libdirstem, either the same as acl_libdirstem or "lib/sparcv9" or dnl "lib/amd64". AC_DEFUN([AC_LIB_PREPARE_MULTILIB], [ dnl There is no formal standard regarding lib and lib64. dnl On glibc systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib64 and 32-bit libraries go under $prefix/lib. We determine dnl the compiler's default mode by looking at the compiler's library search dnl path. If at least one of its elements ends in /lib64 or points to a dnl directory whose absolute pathname ends in /lib64, we assume a 64-bit ABI. dnl Otherwise we use the default, namely "lib". dnl On Solaris systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib/64 (which is a symlink to either $prefix/lib/sparcv9 or dnl $prefix/lib/amd64) and 32-bit libraries go under $prefix/lib. AC_REQUIRE([AC_CANONICAL_HOST]) acl_libdirstem=lib acl_libdirstem2= case "$host_os" in solaris*) dnl See Solaris 10 Software Developer Collection > Solaris 64-bit Developer's Guide > The Development Environment dnl . dnl "Portable Makefiles should refer to any library directories using the 64 symbolic link." dnl But we want to recognize the sparcv9 or amd64 subdirectory also if the dnl symlink is missing, so we set acl_libdirstem2 too. AC_CACHE_CHECK([for 64-bit host], [gl_cv_solaris_64bit], [AC_EGREP_CPP([sixtyfour bits], [ #ifdef _LP64 sixtyfour bits #endif ], [gl_cv_solaris_64bit=yes], [gl_cv_solaris_64bit=no]) ]) if test $gl_cv_solaris_64bit = yes; then acl_libdirstem=lib/64 case "$host_cpu" in sparc*) acl_libdirstem2=lib/sparcv9 ;; i*86 | x86_64) acl_libdirstem2=lib/amd64 ;; esac fi ;; *) searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib64/ | */lib64 ) acl_libdirstem=lib64 ;; */../ | */.. ) # Better ignore directories of this form. They are misleading. ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib64 ) acl_libdirstem=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" fi ;; esac test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" ]) # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ]) # serial 57 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl _LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_CC_BASENAME(CC) # ------------------- # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. m4_defun([_LT_CC_BASENAME], [for cc_temp in $1""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl _LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_WITH_SYSROOT])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PREPARE_SED_QUOTE_VARS # -------------------------- # Define a few sed substitution that help us do robust quoting. m4_defun([_LT_PREPARE_SED_QUOTE_VARS], [# Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ]) # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from `configure', and `config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # `config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain="$ac_aux_dir/ltmain.sh" ])# _LT_PROG_LTMAIN # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the `libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to `config.status' so that its # declaration there will have the same value as in `configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags="_LT_TAGS"dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the `libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into `config.status', and then the shell code to quote escape them in # for loops in `config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$[]1 _LTECHO_EOF' } # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done _LT_OUTPUT_LIBTOOL_INIT ]) # _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) # ------------------------------------ # Generate a child script FILE with all initialization necessary to # reuse the environment learned by the parent script, and make the # file executable. If COMMENT is supplied, it is inserted after the # `#!' sequence but before initialization text begins. After this # macro, additional text can be appended to FILE to form the body of # the child script. The macro ends with non-zero status if the # file could not be fully written (such as if the disk is full). m4_ifdef([AS_INIT_GENERATED], [m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], [m4_defun([_LT_GENERATED_FILE_INIT], [m4_require([AS_PREPARE])]dnl [m4_pushdef([AS_MESSAGE_LOG_FD])]dnl [lt_write_fail=0 cat >$1 <<_ASEOF || lt_write_fail=1 #! $SHELL # Generated by $as_me. $2 SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$1 <<\_ASEOF || lt_write_fail=1 AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 _ASEOF test $lt_write_fail = 0 && chmod +x $1[]dnl m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) _LT_GENERATED_FILE_INIT(["$CONFIG_LT"], [# Run this file to recreate a libtool stub with the current configuration.]) cat >>"$CONFIG_LT" <<\_LTEOF lt_cl_silent=false exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ \`$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2011 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test $[#] != 0 do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try \`$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try \`$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. lt_cl_success=: test "$silent" = yes && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # _LT_COPYING _LT_LIBTOOL_TAGS # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) _LT_PROG_REPLACE_SHELLFNS mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Go], [_LT_LANG(GO)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG m4_ifndef([AC_PROG_GO], [ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_GO. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # m4_defun([AC_PROG_GO], [AC_LANG_PUSH(Go)dnl AC_ARG_VAR([GOC], [Go compiler command])dnl AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl _AC_ARG_VAR_LDFLAGS()dnl AC_CHECK_TOOL(GOC, gccgo) if test -z "$GOC"; then if test -n "$ac_tool_prefix"; then AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) fi fi if test -z "$GOC"; then AC_CHECK_PROG(GOC, gccgo, gccgo, false) fi ])#m4_defun ])#m4_ifndef # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([AC_PROG_GO], [LT_LANG(GO)], [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) dnl AC_DEFUN([AC_LIBTOOL_RC], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test $_lt_result -eq 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS="$save_LDFLAGS" ]) AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], [lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then lt_cv_ld_force_load=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[[012]]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES([TAG]) # --------------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported if test "$lt_cv_ld_force_load" = "yes"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) else _LT_TAGVAR(whole_archive_flag_spec, $1)='' fi _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" m4_if([$1], [CXX], [ if test "$lt_cv_apple_cc_single_mod" != "yes"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX([TAGNAME]) # ---------------------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. # Store the results from the different compilers for each TAGNAME. # Allow to override them for all tags through lt_cv_aix_libpath. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ lt_aix_libpath_sed='[ /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }]' _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" fi ]) aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [m4_divert_text([M4SH-INIT], [$1 ])])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Find how we can fake an echo command that does not interpret backslash. # In particular, with Autoconf 2.60 or later we add some code to the start # of the generated configure script which will find a shell with a builtin # printf (which we can use as an echo command). m4_defun([_LT_PROG_ECHO_BACKSLASH], [ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO AC_MSG_CHECKING([how to print strings]) # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $[]1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } case "$ECHO" in printf*) AC_MSG_RESULT([printf]) ;; print*) AC_MSG_RESULT([print -r]) ;; *) AC_MSG_RESULT([cat]) ;; esac m4_ifdef([_AS_DETECT_SUGGESTED], [_AS_DETECT_SUGGESTED([ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test "X`printf %s $ECHO`" = "X$ECHO" \ || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_WITH_SYSROOT # ---------------- AC_DEFUN([_LT_WITH_SYSROOT], [AC_MSG_CHECKING([for sysroot]) AC_ARG_WITH([sysroot], [ --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified).], [], [with_sysroot=no]) dnl lt_sysroot will always be passed unquoted. We quote it here dnl in case the user passed a directory name. lt_sysroot= case ${with_sysroot} in #( yes) if test "$GCC" = yes; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) AC_MSG_RESULT([${with_sysroot}]) AC_MSG_ERROR([The sysroot must be an absolute path.]) ;; esac AC_MSG_RESULT([${lt_sysroot:-no}]) _LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl [dependent libraries, and in which our libraries should be installed.])]) # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; *-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD="${LD-ld}_sol2" fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" ])# _LT_ENABLE_LOCK # _LT_PROG_AR # ----------- m4_defun([_LT_PROG_AR], [AC_CHECK_TOOLS(AR, [ar], false) : ${AR=ar} : ${AR_FLAGS=cru} _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], [lt_cv_ar_at_file=no AC_COMPILE_IFELSE([AC_LANG_PROGRAM], [echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' AC_TRY_EVAL([lt_ar_try]) if test "$ac_status" -eq 0; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a AC_TRY_EVAL([lt_ar_try]) if test "$ac_status" -ne 0; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a ]) ]) if test "x$lt_cv_ar_at_file" = xno; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi _LT_DECL([], [archiver_list_spec], [1], [How to feed a file listing to the archiver]) ])# _LT_PROG_AR # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [_LT_PROG_AR AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) _LT_DECL([], [lock_old_archive_extraction], [0], [Whether to use a lock for old archive extraction]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test x"[$]$2" = xyes; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" ]) if test x"[$]$2" = xyes; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n $lt_cv_sys_max_cmd_len ; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "$cross_compiling" = yes; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen="dlopen"], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) ]) ]) ]) ]) ]) ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links="nottested" if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test "$hard_links" = no; then AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", [Define to the sub-directory in which libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then # We can hardcode non-existent directories. if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; *) lt_sed_strip_eq="s,=/,/,g" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[[4-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[23]].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[[3-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], [lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [lt_cv_shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir ]) shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsdelf*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='NetBSD ld.elf_so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[[89]] | openbsd2.[[89]].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [install_override_mode], [1], [Permission mode override for installation of shared libraries]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([], [sys_lib_dlsearch_path_spec], [2], [Run-time system search path for libraries]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program which can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/$1; then lt_cv_path_MAGIC_CMD="$ac_dir/$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac]) MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program which can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PROG_ECHO_BACKSLASH])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test "$withval" = no || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method = "file_magic"]) _LT_DECL([], [file_magic_glob], [1], [How to find potential files when deplibs_check_method = "file_magic"]) _LT_DECL([], [want_nocaseglob], [1], [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi]) if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols" ;; *) DUMPBIN=: ;; esac fi AC_SUBST([DUMPBIN]) if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # _LT_CHECK_SHAREDLIB_FROM_LINKLIB # -------------------------------- # how to determine the name of the shared library # associated with a specific link library. # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) m4_require([_LT_DECL_DLLTOOL]) AC_CACHE_CHECK([how to associate runtime and link libraries], lt_cv_sharedlib_from_linklib_cmd, [lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh # decide which to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd="$ECHO" ;; esac ]) sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO _LT_DECL([], [sharedlib_from_linklib_cmd], [1], [Command to associate shared and link libraries]) ])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB # _LT_PATH_MANIFEST_TOOL # ---------------------- # locate the manifest tool m4_defun([_LT_PATH_MANIFEST_TOOL], [AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], [lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&AS_MESSAGE_LOG_FD if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest*]) if test "x$lt_cv_path_mainfest_tool" != xyes; then MANIFEST_TOOL=: fi _LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ])# _LT_PATH_MANIFEST_TOOL # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM="-lm") ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test "$GCC" = yes; then case $cc_basename in nvcc*) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; *) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; esac _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT@&t@_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT@&t@_DLSYM_CONST #else # define LT@&t@_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT@&t@_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then nm_file_list_spec='@' fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) _LT_DECL([], [nm_file_list_spec], [1], [Specify filename containing input files for $NM]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL 8.0, 9.0 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd* | netbsdelf*-gnu) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; *Sun\ F* | *Sun*Fortran*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Intel*\ [[CF]]*Compiler*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; *Portland\ Group*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_CACHE_CHECK([for $compiler option to produce PIC], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global defined # symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ;; esac ;; linux* | k*bsd*-gnu | gnu*) _LT_TAGVAR(link_all_deplibs, $1)=no ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; linux* | k*bsd*-gnu | gnu*) _LT_TAGVAR(link_all_deplibs, $1)=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test "$with_gnu_ld" = yes; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test "$lt_use_gnu_ld_interface" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global # defined symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi _LT_TAGVAR(link_all_deplibs, $1)=no else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; hpux10*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) m4_if($1, [], [ # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) _LT_LINKER_OPTION([if $CC understands -b], _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) ;; esac fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], [lt_cv_irix_exported_symbol], [save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" AC_LINK_IFELSE( [AC_LANG_SOURCE( [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], [C++], [[int foo (void) { return 0; }]], [Fortran 77], [[ subroutine foo end]], [Fortran], [[ subroutine foo end]])])], [lt_cv_irix_exported_symbol=yes], [lt_cv_irix_exported_symbol=no]) LDFLAGS="$save_LDFLAGS"]) if test "$lt_cv_irix_exported_symbol" = yes; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' fi else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' else case $host_os in openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' ;; esac fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_CACHE_CHECK([whether -lc should be explicitly linked in], [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), [$RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no else lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* ]) _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting ${shlibpath_var} if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [postlink_cmds], [2], [Commands necessary for finishing linking programs]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC="$CC" AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report which library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC="$lt_save_CC" ])# _LT_LANG_C_CONFIG # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared # libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ func_to_tool_file "$lt_outputfile"~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd2*) # C++ shared libraries are fairly broken _LT_TAGVAR(ld_shlibs, $1)=no ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ '"$_LT_TAGVAR(old_archive_cmds, $1)" _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ '"$_LT_TAGVAR(reload_cmds, $1)" ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(GCC, $1)="$GXX" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_FUNC_STRIPNAME_CNF # ---------------------- # func_stripname_cnf prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # # This function is identical to the (non-XSI) version of func_stripname, # except this one can be used by m4 code that may be executed by configure, # rather than the libtool script. m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl AC_REQUIRE([_LT_DECL_SED]) AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) func_stripname_cnf () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname_cnf ])# _LT_FUNC_STRIPNAME_CNF # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF package foo func foo() { } _LT_EOF ]) _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case ${prev}${p} in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test "$pre_test_object_deps_done" = no; then case ${prev} in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)="${prev}${p}" else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)="$p" else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)="$p" else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_LANG_PUSH(Fortran 77) if test -z "$F77" || test "X$F77" = "Xno"; then _lt_disable_F77=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_F77" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${F77-"f77"} CFLAGS=$FFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$G77" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" CFLAGS="$lt_save_CFLAGS" fi # test "$_lt_disable_F77" != yes AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_LANG_PUSH(Fortran) if test -z "$FC" || test "X$FC" = "Xno"; then _lt_disable_FC=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_FC" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${FC-"f95"} CFLAGS=$FCFLAGS compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test "$_lt_disable_FC" != yes AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} CFLAGS=$GCJFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_GO_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Go compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GO_CONFIG], [AC_REQUIRE([LT_PROG_GO])dnl AC_LANG_SAVE # Source file extension for Go test sources. ac_ext=go # Object file extension for compiled Go test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="package main; func main() { }" # Code to be used in simple link tests lt_simple_link_test_code='package main; func main() { }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GOC-"gccgo"} CFLAGS=$GOFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # Go did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GO_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code="$lt_simple_compile_test_code" # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC= CC=${RC-"windres"} CFLAGS= compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_GO # ---------- AC_DEFUN([LT_PROG_GO], [AC_CHECK_TOOL(GOC, gccgo,) ]) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_DLLTOOL # ---------------- # Ensure DLLTOOL variable is set. m4_defun([_LT_DECL_DLLTOOL], [AC_CHECK_TOOL(DLLTOOL, dlltool, false) test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program]) AC_SUBST([DLLTOOL]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f $lt_ac_sed && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test $lt_ac_count -gt 10 && break lt_ac_count=`expr $lt_ac_count + 1` if test $lt_ac_count -gt $lt_ac_max; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [AC_MSG_CHECKING([whether the shell understands some XSI constructs]) # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,b/c, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes AC_MSG_RESULT([$xsi_shell]) _LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) AC_MSG_CHECKING([whether the shell understands "+="]) lt_shell_append=no ( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes AC_MSG_RESULT([$lt_shell_append]) _LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) # ------------------------------------------------------ # In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and # '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. m4_defun([_LT_PROG_FUNCTION_REPLACE], [dnl { sed -e '/^$1 ()$/,/^} # $1 /c\ $1 ()\ {\ m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) } # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: ]) # _LT_PROG_REPLACE_SHELLFNS # ------------------------- # Replace existing portable implementations of several shell functions with # equivalent extended shell implementations where those features are available.. m4_defun([_LT_PROG_REPLACE_SHELLFNS], [if test x"$xsi_shell" = xyes; then _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac]) _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl func_basename_result="${1##*/}"]) _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}"]) _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"}]) _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl func_split_long_opt_name=${1%%=*} func_split_long_opt_arg=${1#*=}]) _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl func_split_short_opt_arg=${1#??} func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac]) _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) fi if test x"$lt_shell_append" = xyes; then _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl func_quote_for_eval "${2}" dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) # Save a `func_append' function call where possible by direct use of '+=' sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: else # Save a `func_append' function call even when '+=' is not available sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$_lt_function_replace_fail" = x":"; then AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) fi ]) # _LT_PATH_CONVERSION_FUNCTIONS # ----------------------------- # Determine which file name conversion functions should be used by # func_to_host_file (and, implicitly, by func_to_host_path). These are needed # for certain cross-compile configurations and native mingw. m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_MSG_CHECKING([how to convert $build file names to $host format]) AC_CACHE_VAL(lt_cv_to_host_file_cmd, [case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac ]) to_host_file_cmd=$lt_cv_to_host_file_cmd AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) _LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], [0], [convert $build file names to $host format])dnl AC_MSG_CHECKING([how to convert $build file names to toolchain format]) AC_CACHE_VAL(lt_cv_to_tool_file_cmd, [#assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac ]) to_tool_file_cmd=$lt_cv_to_tool_file_cmd AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) _LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], [0], [convert $build files to toolchain format])dnl ])# _LT_PATH_CONVERSION_FUNCTIONS # Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, # Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 7 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option `$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl `shared' nor `disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) ]) ])# _LT_SET_OPTIONS # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [1], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the `shared' and # `disable-shared' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the `static' and # `disable-static' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the `fast-install' # and `disable-fast-install' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the `pic-only' and `no-pic' # LT_INIT options. # MODE is either `yes' or `no'. If omitted, it defaults to `both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for lt_pkg in $withval; do IFS="$lt_save_ifs" if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS="$lt_save_ifs" ;; esac], [pic_mode=default]) test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) # ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59 which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) # ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # @configure_input@ # serial 3337 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.4.2]) m4_define([LT_PACKAGE_REVISION], [1.3337]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.4.2' macro_revision='1.3337' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) # lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 5 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 1 (pkg-config-0.24) # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) # only at the first occurence in configure.ac, so if the first place # it's called might be skipped (such as if it is within an "if", you # have to call PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])# PKG_CHECK_MODULES # Copyright (C) 2002-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.14' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.14], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.14])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each '.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAINTAINER_MODE([DEFAULT-MODE]) # ---------------------------------- # Control maintainer-specific portions of Makefiles. # Default is to disable them, unless 'enable' is passed literally. # For symmetry, 'disable' may be passed as well. Anyway, the user # can override the default with the --enable/--disable switch. AC_DEFUN([AM_MAINTAINER_MODE], [m4_case(m4_default([$1], [disable]), [enable], [m4_define([am_maintainer_other], [disable])], [disable], [m4_define([am_maintainer_other], [enable])], [m4_define([am_maintainer_other], [enable]) m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) dnl maintainer-mode's default is 'disable' unless 'enable' is passed AC_ARG_ENABLE([maintainer-mode], [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], am_maintainer_other[ make rules and dependencies not useful (and sometimes confusing) to the casual installer])], [USE_MAINTAINER_MODE=$enableval], [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) AC_MSG_RESULT([$USE_MAINTAINER_MODE]) AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) MAINT=$MAINTAINER_MODE_TRUE AC_SUBST([MAINT])dnl ] ) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_CC_C_O # --------------- # Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC # to automatically call this. AC_DEFUN([_AM_PROG_CC_C_O], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl AC_LANG_PUSH([C])dnl AC_CACHE_CHECK( [whether $CC understands -c and -o together], [am_cv_prog_cc_c_o], [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i]) if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([dovecot.m4]) dovecot-2.2.9/stamp.h.in0000644000175000017500000000000012012166665012005 00000000000000dovecot-2.2.9/doc/0002755000175000017500000000000012244505324010740 500000000000000dovecot-2.2.9/doc/thread-refs.txt0000644000175000017500000002114212244400443013617 00000000000000Optimistic incremental THREAD=REFERENCES Step (1) is the slowest stage for building a THREAD=REFERENCES tree. If its result tree is permanently saved, the following thread builds can be based on it by updating the tree incrementally. Adding new messages to the tree is simple: simply follow the normal rules as when building a new tree from scratch. Expunging messages gets more problematic though. Each node in the tree keeps a "link reference count" which specifies how many messages contain a "this message" -> "parent message" reference (number of links to parent node). The first reference is usually added by the message's own References: or In-Reply-To: header and the latter references are added by References: headers. This link refcount must be updated when adding and expunging messages. When the link refcount drops to zero, the message becomes a root. The link refcount doesn't tell much about the number of children the node has, because References: headers may reference any number of its ancestors. The optimistic approach assumes that usually there are no problematic links. In such case expunging a message simply updates the link refcounts and marks the message's node as expunged. Expunged messages may still act as dummy nodes. The node may be removed only after there are no nodes which point to it. Problematic links are handled by marking nodes affected by them. If such a node is expunged or its link is unreferenced, the thread tree must be rebuilt. This is assumed to be a rarely occurring event. The problematic cases are: 1) Duplicate Message-ID: headers. If the first message having it is expunged, the thread tree must be rebuilt. 2) Message-ID loops. If a message referencing the looping path gets expunged, the loop may break and the thread tree must be rebuilt. Because it can't be determined easily which loops get broken by which expunges, this case can be handled in a bit easier way: When detecting a loop between parent and child, rebuild the tree if any link between the parent and child gets unreferenced. 3) A message changes its parent because an earlier message's References: header contained a different link. If the message gets expunged, the thread tree must be rebuilt to get the original parent back. 4) A link in a message's References: header is ignored, because the referenced child already specified a different parent to itself. If the message gets expunged, the thread tree must be rebuilt to determine its new parent. 5) A link in a message's References: header is ignored, because an earlier message's References: header already specified a different link. If the earlier message gets expunged, the parent may change. The earlier message could be found out quickly by keeping some extra state (or with a slow scan), but since this is assumed to be a rare problem, there's an easier (but less specific) way to handle this: Rebuild the tree if a link to the child node is unreferenced (or alternatively if a link to the original parent node is unreferenced, but it probably happens more often). Pseudocode: node { parent: Pointer to parent node. Children pointers aren't required. uid: Message's UID (0 = expunged or never even existed) parent_link_refcount: Number of messages containing "this message" -> "parent message" link, i.e. "number of links to parent node". However since parents can change, not all of these references might be from our current child nodes. When this refcount reaches 0, it means we must detach from our parent. expunge_rebuilds: If this message gets expunged, rebuild the thread tree. child_unref_rebuilds: If a link between this node and its child gets unreferenced, rebuild the thread tree. } link_reference(parent_node, child_node) child_node.parent_link_refcount++ if is_ancestor(child_node, parent_node) // child_node is an ancestor of parent_node. Adding child_node -> // parent_node would introduce a loop. If any messages referencing the // path between parent_node's parent and child_node get expunged, we // have to rebuild the tree because the loop might break. For example: // #1: a -> b (a.ref=1, b.ref=1) // #2: b -> a (a.ref=2, b.ref=2) // #3: c -> a -> b (a.ref=3, b.ref=3, c.ref=1) // Expunging #3 wouldn't break the loop, but expunging #1 would. for node in nodes[parent_node.parent .. child_node] node.child_unref_rebuilds = true else if child_node.parent == parent_node // The same link already exists else // Set parent_node as child_node's parent if child_node.parent == NIL child_node.parent = parent_node else // Conflicting parent already exists, keep the original. // We get here only when handling References: header. if child_node.uid != 0 // We've already seen this message. It specifies its own parent and // it doesn't matter what any other reference says. The only way its // parent can change is if the message itself gets expunged. child_node.expunge_rebuilds = true else // Message doesn't exist, so it was one of the node's children // that created the original reference. If that reference gets // dropped, the parent is changed. We could catch this in one of // several ways: // a) Link to original parent node gets unreferenced // b) Link to this node gets unreferenced // c) Any of the child nodes gets expunged // b) is probably the least likely to happen, so use it child_node.child_unref_rebuilds = true thread_add_msg(uid) // get the Message-IDs as specified by the thread spec (msgid, parent_msgid, references) = message_get_thread_headers(uid) if msgid != NIL if nodes[msgid].uid == 0 nodes[msgid].uid = uid else // duplicate Message-ID. if the original ever gets expunged, // rebuild the thread tree nodes[msgid].expunge_rebuilds = true msgid = NIL if msgid == NIL msgid = get_unique_msg_id() node = nodes[msgid] if node.parent != NIL and (parent_msgid == NIL or node.parent.msgid != parent_msgid) // Conflicting parent, remove it. If this message gets expunged, we have // to revert back to the original parent. node.parent = NIL node.expunge_rebuilds = true if parent_msgid != NIL link_reference(nodes[parent_msgid], node) // go through References (skipping the last one) for (ref_parent, ref_child) in references link_reference(nodes[ref_parent], nodes[ref_child]) unref_link(parent, child) if parent.child_unref_rebuilds return false child.parent_link_refcount-- if child.parent_link_refcount == 0 child.parent = NIL return true // returns false if thread tree needs to be rebuilt thread_expunge_msg(uid) // get the Message-IDs as specified by the thread spec (msgid, in_reply_to_msgid, references) = message_get_thread_headers(uid) node = nodes[msgid] if node.uid != uid // Removing a duplicate Message-ID return false if node.expunge_rebuilds return false if parent_msgid != NIL and not unref_link(nodes[parent_msgid], nodes[child_msgid]) return false if references != NIL // go through References for (parent_msgid, child_msgid) in references if not unref_link(nodes[parent_msgid], nodes[child_msgid]) return false last_parent_msgid = references.last else if in_reply_to_msgid != NIL last_parent_msgid = in_reply_to_msgid if last_parent_msgid != NIL and not unref_link(nodes[last_parent_msgid], node) return false // mark this node as expunged node.uid = 0 return true thread_iterate() root_nodes = [] referenced = [] children = [][] // Steps (2) and (3) for node in nodes if node.parent != NIL root_nodes[] = node else referenced[node.parent] = true if node.uid != 0 // Find the node's first non-dummy parent and add the node as its child. // If there are no non-dummy parents, add it as the highest dummy's // child. nondummy_parent = node.parent while nondummy_parent.uid == 0 and nondummy_parent.parent != NIL nondummy_parent = nondummy_parent.parent children[nondummy_parent][] = node for node in root_nodes if node.uid == 0 if children[node] == NIL // remove dummy roots that have no children. delete(node) else if count(children[node]) == 1 // dummy root has a single child, replace the root with its child node = children[node] for node in nodes if node.uid == 0 and !referenced[node] free(node) // root_nodes and children now contain a tree equivalent to a tree built by // THREAD=REFERENCES specification steps (1)-(3). The rest of the steps // can be performed using them. Note that node.parent should not (and need // not) be used because it points its parent before steps (2) and (3). dovecot-2.2.9/doc/man/0002755000175000017500000000000012244505323011512 500000000000000dovecot-2.2.9/doc/man/doveadm-auth.1.in0000644000175000017500000000547412244400443014504 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-AUTH 1 "2010-06-09" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-auth \- Test authentication for a user .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " auth [\fB\-a\fP \fIauth_socket_path\fP] [\fB\-x\fP \fIauth_info\fP] .I user [\fIpassword\fP] .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B auth command can be used to test the authentication for the given .IR user . .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- .TP .BI \-a\ auth_socket_path This option is used to specify an absolute path to an alternative UNIX domain socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/auth\-client . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------- .TP .BI \-x\ auth_info .I auth_info specifies additional conditions for the .B auth command. The .I auth_info option string has to be given as .IB name = value pair. For multiple conditions the .B \-x option could be supplied multiple times. .br Possible names for the .I auth_info are: .RS .TP .B service The service for which the authentication lookup should be tested. The value may be the name of a service, commonly used with Dovecot. For example: .BR imap , .BR pop3\ or .BR smtp . .TP .B lip The local IP address (server) for the test. .TP .B rip The remote IP address (client) for the test. .TP .B lport The local port, e.g. 143 .TP .B rport The remote port, e.g. 24567 .RE .\"------------------------------------------------------------------------ .SH ARGUMENTS .\"------------------------------------- .TP .I user The .IR user \(aqs login name. Depending on the configuration, the login name may be for example .BR jane " or " john@example.com . .\"------------------------------------- .TP .I password Optionally the user\(aqs password. .BR doveadm (1) will prompt for the password, if none was given. .\"------------------------------------------------------------------------ .SH EXAMPLE This example demonstrates an imap authentication test for user john, assuming the user is connected from the host with the IP address 192.0.2.143. .PP .nf .B doveadm auth \-x service=imap \-x rip=192.0.2.143 john johns_password passdb: john auth succeeded extra fields: user=john .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveconf (1)dovecot-2.2.9/doc/man/doveadm-exec.1.in0000644000175000017500000000324212244400443014456 00000000000000.\" Copyright (c) 2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-EXEC 1 "2013-08-05" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-exec \- easily execute commands from Dovecot\(aqs libexec_dir .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " exec .IR binary " [" "binary arguments" ] .\"------------------------------------------------------------------------ .SH DESCRIPTION This command allows administrators and local users to simply execute commands from within .IR libexec_dir . So for example a logged in system user could start a pre\-authenticated imap session, using the command: .BR "doveadm exec imap" . An administrator would use the command: .B "doveadm exec imap \-u" .IR username . .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I binary the name of an executable located in .IR @pkglibexecdir@ . .\"------------------------------------- .TP .I binary arguments options and arguments, wich will be passed through to the .IR binary . .\"------------------------------------------------------------------------ .SH EXAMPLE This example demonstrates how to deliver a message from a file to a user\(aqs mailbox. .sp .nf .B doveadm exec dovecot\-lda \-d user@example.net \(rs .B \-f admin@example.net < \(ti/stuff/welcome.msg .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR dovecot\-lda (1)dovecot-2.2.9/doc/man/doveadm-flags.1.in0000644000175000017500000000650112244400443014627 00000000000000.\" Copyright (c) 2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-FLAGS 1 "2013-08-02" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-flags \- add, remove or replace messages\(aq flags .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " flags " [" \-S .IR socket_path "] " "command flags search_query" .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " flags " [" \-S .IR socket_path "] " "command flags search_query" .B \-A .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " flags " [" \-S .IR socket_path "] " "command flags search_query" .BI "\-u " user .\"------------------------------------------------------------------------ .SH DESCRIPTION This command is used to manipulate flags of messages. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\"------------------------------------- .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I flags Message flags as described in RFC 3501, section 2.3.2 (Flags Message Attribute): .BR \(rsAnswered ", " \(rsDeleted ", " \(rsDraft ", " \(rsFlagged ", " .BR \(rsRecent " and " \(rsSeen . And the IMAP keywords .BR \(DoForwarded ", " \(DoMDNSent ", " \(DoSubmitPending " and " .B \(DoSubmitted or user\-defined keywords, e.g. Junk, \(DoNonSpam or \(DoLabel1. .br One or multiple flags and/or keywords can be specified. .\"------------------------------------- .TP .I search_query Manipulate the flags of messages matching the given search query. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH COMMANDS .SS flags add .BR "doveadm flags add" " [" \-u .IR user |\c .BR \-A "] [" \-S .IR socket_path "] " "flags search_query" .PP This command is used to extend the current set of flags with the given .IR flags . .\"------------------------------------- .SS flags remove .BR "doveadm flags remove" " [" \-u .IR user |\c .BR \-A "] [" \-S .IR socket_path "] " "flags search_query" .PP In order to remove the given .I flags from the current set of flags, use this command. .\"------------------------------------- .SS flags replace .BR "doveadm flags replace" " [" \-u .IR user |\c .BR \-A "] [" \-S .IR socket_path "] " "flags search_query" .PP This command is used to replace ALL current flags with the given .IR flags . .\"------------------------------------------------------------------------ .SH EXAMPLE List and manipulate the message flags of the message with uid 81563 .sp .nf .B doveadm fetch \-u bob \(aquid flags\(aq mailbox dovecot uid 81563 uid: 81563 flags: \(rsAnswered \(rsSeen NonJunk .B doveadm flags \-u bob remove NonJunk mailbox dovecot uid 81563 .B doveadm flags \-u bob add \(aq\(rsFlagged \(DoForwarded\(aq \ mailbox dovecot uid 81563 .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-fetch (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-instance.1.in0000644000175000017500000000405712244400443015343 00000000000000.\" Copyright (c) 2012 Dovecot authors, see the included COPYING file .TH DOVEADM\-INSTANCE 1 "2012-02-16" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-instance \- Manage the list of running Dovecot instances .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .B instance list .br .BR doveadm " [" \-Dv "] " "instance remove" .IR name " | " base_dir .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B doveadm instance commands are used to manage the list of Dovecot instances running on the server. In most installations there is only one Dovecot instance, but in some cases is may be useful to have more (e.g. running director proxy and backend in the same server). .PP Instances are added to the list automatically when Dovecot is started. Each instance is uniquely identified by its .I base_dir setting. Instances can be named by setting .I instance_name in each instance\(aqs .IR dovecot.conf . When an instance is named, it can be accessed easily by giving .BI \-i\ instance_name command line parameter for Dovecot binaries (e.g. doveadm). .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I name The value of an instance\(aqs .I instance_name setting. .\"------------------------------------- .TP .I base_dir The base directory of a Dovecot instance. .\"------------------------------------------------------------------------ .SH COMMANDS .SS instance list .B doveadm instance list .PP This command lists the seen Dovecot instances. .\"------------------------------------- .SS instance remove .B doveadm instance remove .IR name " | " base_dir .PP This command removes the specified instance. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/reporting-bugs.inc0000644000175000017500000000031112244400443015062 00000000000000.SH REPORTING BUGS Report bugs, including .I doveconf \-n output, to the Dovecot Mailing List . Information about reporting bugs is available at: http://dovecot.org/bugreport.html dovecot-2.2.9/doc/man/doveadm-stop.10000644000175000017500000000002212244400443014103 00000000000000.so man1/doveadm.1dovecot-2.2.9/doc/man/dovecot-lda.1.in0000644000175000017500000001112412244400443014314 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVECOT\-LDA 1 "2011-01-16" "Dovecot v2.2" "Dovecot" .SH NAME dovecot\-lda \- Dovecot\(aqs local mail delivery agent .\"------------------------------------------------------------------------ .SH SYNOPSIS .B dovecot\-lda .RB [ \-ek ] [\fB\-a\fP \fIaddress\fP] [\fB\-c\fP \fIconfig_file\fP] [\fB\-d\fP \fIusername\fP] [\fB\-f\fP \fIenvelope_sender\fP] [\fB\-m\fP \fImailbox\fP] [\fB\-o\fP \fIsetting=value\fP] [\fB\-p\fP \fIpath\fP] .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B dovecot\-lda is a local mail delivery agent which takes mail from an MTA and delivers it to a user\(aqs mailbox, while keeping Dovecot index files up to date. .PP Main features of the .B dovecot\-lda are: .TP 4 * Mailbox indexing during mail delivery, providing faster mailbox access later .TP * Quota enforcing by the quota plugin .TP * Sieve language support by the Pigeonhole sieve plugin .\"------------------------------------------------------------------------ .SH OPTIONS Options accepted by .BR dovecot\-lda : .\"------------------------------------- .TP .BI \-a\ address Destination address (e.g. user+ext@domain). Default is the same as .IR username . .\"------------------------------------- .TP .BI \-c\ config_file Alternative configuration file path. .\"------------------------------------- .TP .BI \-d\ username Destination .IR username . If given, the user information is looked up from userdb. Typically used with virtual users, but not necessarily with system users. .\"------------------------------------- .TP .B \-e If mail gets rejected, write the rejection reason to stderr and exit with status 77 (EX_NOPERM). The default is to send a rejection mail ourself. .\"------------------------------------- .TP .BI \-f\ envelope_sender Envelope sender address. .\"------------------------------------- .TP .B \-k Don\(aqt clear all environment at startup. .\"------------------------------------- .TP .BI \-m\ mailbox Destination mailbox (default is .BR INBOX ). If the mailbox doesn\(aqt exist, it will not be created (unless the .I lda_mailbox_autocreate setting is set to .BR yes ). If a message couldn\(aqt be saved to the .I mailbox for any reason, it\(aqs delivered to .B INBOX instead. .\"--------------------------------- .TP .BI \-o\ setting = value Overrides the configuration .I setting from .I @pkgsysconfdir@/dovecot.conf and from the userdb with the given .IR value . In order to override multiple settings, the .B \-o option may be specified multiple times. .\"------------------------------------- .TP .BI \-p\ path Path to the mail to be delivered instead of reading from stdin. If using maildir the file is hard linked to the destination if possible. This allows a single mail to be delivered to multiple users using hard links, but currently it also prevents deliver from updating cache file so it shouldn\(aqt be used unless really necessary. .\"------------------------------------------------------------------------ .SH "EXIT STATUS" .B dovecot\-lda will exit with one of the following values: .TP 4 .B 0 Delivery was successful. (EX_OK) .TP .B 64 Invalid parameter given. (EX_USAGE) .TP .B 77 .B \-e option was used and mail was rejected. Typically this happens when user is over quota and .B quota_full_tempfail = no is configured. (EX_NOPERM) .TP .B 75 A temporary failure. This is returned for almost all failures. See the log file for details. (EX_TEMPFAIL) .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/dovecot.conf Dovecot\(aqs main configuration file. .TP .I @pkgsysconfdir@/conf.d/10\-mail.conf Mailbox locations and namespaces. .TP .I @pkgsysconfdir@/conf.d/15\-lda.conf LDA specific settings. .TP .I @pkgsysconfdir@/conf.d/90\-plugin.conf Plugin specific settings. .TP .I @pkgsysconfdir@/conf.d/90\-quota.conf Quota configuration. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR dovecot (1), .BR doveconf (1), .BR dsync (1) .PP Related MTA specific documentation: .\"------------------------------------- .TP Postfix: .BR postconf (5), .BR transport (5), .BR pipe (8) .\"------------------------------------- .PP Exim: .IP "The accept router" http://exim.org/exim\-html\-current/doc/html/spec_html/ch16.html .IP "Generic options for transports" http://exim.org/exim\-html\-current/doc/html/spec_html/ch24.html .IP "The pipe transport" http://exim.org/exim\-html\-current/doc/html/spec_html/ch29.html dovecot-2.2.9/doc/man/doveconf.1.in0000644000175000017500000001166612244400443013731 00000000000000.\" Copyright (c) 2010-2012 Dovecot authors, see the included COPYING file .TH DOVECONF 1 "2012-01-29" "Dovecot v2.2" "Dovecot" .SH NAME doveconf \- Dovecot\(aqs configuration dumping utility .\"------------------------------------------------------------------------ .SH SYNOPSIS .B doveconf .RB [ \-adnNSx ] [\fB\-c\fP \fIconfig\-file\fP] [\fB\-f\fP \fIfilter\fP] [\fB\-m\fP \fImodule\fP] .\"------------------------------------- .br .B doveconf .RB [ \-n ] [\fB\-c\fP \fIconfig\-file\fP] .IR section_name \ ... .\"------------------------------------- .br .B doveconf .RB [ \-h ] [\fB\-c\fP \fIconfig\-file\fP] [\fB\-f\fP \fIfilter\fP] .IR setting_name \ ... .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveconf reads and parses Dovecot\(aqs configuration files and converts them into a simpler format used by the rest of Dovecot. All standalone programs, such as .BR dovecot (1) and .BR dovecot\-lda (1), will first get their settings by executing doveconf. .PP For system administrators, .B doveconf is mainly useful for dumping the configuration in easy human readable output. .\"------------------------------------------------------------------------ .SH OPTIONS .TP .B \-a Show all settings with their currently configured values. .\"--------------------------------- .TP .BI \-c \ config\-file read configuration from the given .IR config\-file . By default .I @pkgsysconfdir@/dovecot.conf will be used. .\"--------------------------------- .TP .B \-d Show the setting\(aqs default value instead of the one currently configured. .\"--------------------------------- .TP .BI \-f \ filter Show the matching configuration for the specified .I filter condition. The .I filter option string has to be given as .IB name = value pair. For multiple filter conditions the .B \-f option can be supplied multiple times. .br Possible names for the .I filter are: .RS .TP .B lname The local hostname, e.g. mail.example.com. This will only match hostnames which were configured like: .br .B local_name mail.example.com { # special settings } .TP .B local The server\(aqs hostname or IP address. This will also match hostnames which were configured like: .br .B local imap.example.net { # special settings } .TP .B protocol, service The protocol, e.g. imap or pop3 .TP .B remote The client\(aqs hostname or IP address. .RE .\"--------------------------------- .TP .B \-h Hide the setting\(aqs name, show only the setting\(aqs value. .\"--------------------------------- .TP .BI \-m\ module Show only settings for the given .IR module . .RS e.g. .BR imap , .BR imap\-login , .BR lmtp , .BR pop3\ or .B pop3\-login .RE .\"--------------------------------- .TP .B \-n Show only settings with non\-default values. .\"--------------------------------- .TP .B \-N Show settings with non\-default values and explicitly set default values. .\"--------------------------------- .TP .B \-S Dump settings in simplified machine parsable/readable format. .\"--------------------------------- .TP .B \-x Expand configuration variables (e.g. \(Domail_plugins \(rA quota) and show file contents (from e.g. ssl_cert = \ @pkgsysconfdir@/dovecot.conf.new .fi .\"------------------------------------- .PP This example shows how to ask .B doveconf for a global setting and a protocol specific setting. The second command uses also the .B \-h option, in order to hide the setting\(aqs name. .sp .nf .B doveconf mail_plugins mail_plugins = quota .B doveconf \-h \-f protocol=imap mail_plugins quota imap_quota .fi .\"------------------------------------- .PP This example demonstrates how to dump a whole configuration section. .sp .nf .B doveconf dict dict { quota = pgsql:@pkgsysconfdir@/dovecot\-dict\-sql.conf.ext } .fi .PP Or how to dump only the quota dict: .sp .nf .B doveconf dict/quota dict/quota = pgsql:@pkgsysconfdir@/dovecot\-dict\-sql.conf.ext .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR dovecot (1), .BR dovecot\-lda (1), .BR dsync (1)dovecot-2.2.9/doc/man/option-u-user.inc0000644000175000017500000000051112244400443014643 00000000000000.TP .BI \-u\ user/mask Run the .I command only for the given .IR user . It\(aqs also possible to use .RB \(aq * \(aq and .RB \(aq ? \(aq wildcards (e.g. \-u *@example.org). .br When neither the .B \-A option nor .BI \-u\ user was specified, the .I command will be executed with the environment of the currently logged in user. dovecot-2.2.9/doc/man/deliver.10000644000175000017500000000002612244400443013137 00000000000000.so man1/dovecot-lda.1dovecot-2.2.9/doc/man/doveadm-index.1.in0000644000175000017500000000562312244400443014646 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-INDEX 1 "2013-11-23" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-index \- Index mailboxes .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " index " [" \-S .IR socket_path ] .RB [ \-q "] [" \-n .IR max_recent "] " mailbox .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " index " [" \-S .IR socket_path "] " .B \-A .RB [ \-q "] [" \-n .IR max_recent "] " mailbox .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " index " [" \-S .IR socket_path "] " .BI \-u \ user .RB [ \-q "] [" \-n .IR max_recent "] " mailbox .\"------------------------------------------------------------------------ .SH DESCRIPTION Add unindexed messages in a mailbox into index/cache file. If full text search is enabled, also add unindexed messages to the fts database. .PP The caching adds only the fields that were previously added to the mailbox\(aqs caching decisions, so it won\(aqt do anything useful for mailboxes that user\(aqs client hasn\(aqt accessed yet. You can use .B doveadm dump command to show a specific mailbox\(aqs current caching decisions. .PP Messages can also be added automatically to full text search index using: .sp .nf plugin { ... fts_autoindex = yes } .fi in .IR @pkgsysconfdir@/conf.d/90\-plugin.conf . .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- .TP .BI \-n \ max_recent An integer value, which specifies the maximum number of \(rsRecent messages in mailboxes. If the mailbox contains more than .I max_recent messages with \(rsRecent flag set, the mailbox will not be indexed. This may be useful to avoid unnecessary indexing for large mailboxes that are never opened. .\"------------------------------------- .TP .B \-q Queues the indexing to be run by indexer process. Without \-q the indexing is done directly by the .BR doveadm (1) process. Some backends like fts\-lucene can\(aqt handle multiple processes updating the indexes simultaneously, so \-q should usually be used on production. .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I mailbox The name of the mailbox to index. .\"------------------------------------------------------------------------ .SH EXAMPLE Index bob\(aqs INBOX: .PP .nf .B doveadm index \-u bob INBOX .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-quota.1.in0000644000175000017500000000610412244400443014663 00000000000000.\" Copyright (c) 2010-2011 Dovecot authors, see the included COPYING file .TH DOVEADM\-QUOTA 1 "2011-02-17" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-quota \- Initialize/recalculate or show current quota usage .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR quota " [" \-S .IR socket_path "] " command .\"------------------------------------- .br .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR quota " [" \-S .IR socket_path "]" .IB command\ \-A .\"------------------------------------- .br .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR quota " [" \-S .IR socket_path "]" .IB command\ \-u \ user .\"------------------------------------------------------------------------ .SH DESCRIPTION In the first form, .BR doveadm (1) will execute the .I command for the currently logged in user. .PP In the second form, the .I command will be performed for all users. .PP In the third form, the .I command will affect only the matching .IR user (s). .IP Note: The .B quota get and .B quota recalc commands are only available when the global .I mail_plugins setting contains the .B quota plugin. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH COMMANDS .SS quota get .BR doveadm " [" \-f .IR formatter ] .B quota get [\fB\-A\fP|\fB\-u\fP \fIuser\fP] .PP The .B quota get command is used to display the current quota usage. The storage values are reported in kilobytes. .PP This command uses by default the output formatter .BR table . .\"------------------------------------- .SS quota recalc .B doveadm quota recalc [\fB\-A\fP|\fB\-u\fP \fIuser\fP] .PP The .B quota recalc command is used to recalculate the current quota usage. .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/dovecot.conf Dovecot\(aqs main configuration file, including the .I dict section. .TP .I @pkgsysconfdir@/dovecot\-dict\-sql.conf.ext SQL dictionary proxy settings. .TP .I @pkgsysconfdir@/conf.d/10\-mail.conf Mailbox locations and namespaces, including global .I mail_location .TP .I @pkgsysconfdir@/conf.d/90\-quota.conf Quota configuration. .\"------------------------------------------------------------------------ .SH EXAMPLE Get the current quota usage of user jane. .sp .nf .B doveadm quota get \-u jane Quota name Type Value Limit % user STORAGE 90099 102400 87 user MESSAGE 20548 30000 68 .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-penalty.1.in0000644000175000017500000000362212244400443015210 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-PENALTY 1 "2010-07-12" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-penalty \- Show current penalties .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " penalty " [" \-a .IR anvil_socket_path ] [\fIip\fP[\fB/\fP\fImask\fP]] .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B doveadm penalty command can be used to see the current penalties. (Extend me!/explain it) .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .TP .BI \-a\ anvil_socket_path This option is used to specify an alternative socket. The option\(aqs argument is either an absolute path to a local UNIX domain socket, or a hostname and port .RI ( hostname : port ), in order to connect a remote host via a TCP socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/anvil . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .IR ip [/ mask ] To reduce/filter the output supply an IP address or a network range in CIDR notation (ip/mask). .\"------------------------------------------------------------------------ .SH EXAMPLE Show current penalties .sp .nf .B doveadm penalty IP penalty last_penalty last_update 192.0.2.222 3 2010\-06\-15 15:19:27 15:19:27 192.0.2.53 3 2010\-06\-15 15:19:34 15:19:34 .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-reload.10000644000175000017500000000002212244400443014364 00000000000000.so man1/doveadm.1dovecot-2.2.9/doc/man/doveadm.1.in0000644000175000017500000001414712244400443013542 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM 1 "2013-11-16" "Dovecot v2.2" "Dovecot" .SH NAME doveadm \- Dovecot\(aqs administration utility .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .IR command " [" command_options "] [" command_arguments ] .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm is the Dovecot administration tool. It can be used to manage various parts of Dovecot, as well as access users\(aq mailboxes. .br Execute .BR doveadm\ help , for a command usage listing. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\"------------------------------------------------------------------------ .SH COMMANDS .\"------------------------------------------------------------------------ These commands work directly with Dovecot\(aqs master process. .\"------------------------------------- .SS reload .B doveadm reload .PP Force .BR dovecot (1) to reload the configuration. .\"------------------------------------------------------------------------ .SS stop .B doveadm stop .PP Stop .BR dovecot (1) and all its child processes. .\"------------------------------------------------------------------------ .SS ADDITIONAL MASTER COMMANDS .TP .B doveadm director .BR doveadm\-director (1), Manage Dovecot directors (if used by proxy servers). .\"------------------------------------- .TP .B doveadm exec .BR doveadm\-exec (1), Easily execute commands from Dovecot\(aqs libexec_dir. .\"------------------------------------- .TP .B doveadm instance .BR doveadm\-instance (1), Manage the list of running Dovecot instances. .\"------------------------------------- .TP .B doveadm kick .BR doveadm\-kick (1), Disconnect users by user name and/or IP address. .\"------------------------------------- .TP .B doveadm log .BR doveadm\-log (1), Locate, test or reopen Dovecot\(aqs log files. .\"------------------------------------- .TP .B doveadm mount .BR doveadm\-mount (1), Manage the list of mountpoints where mails are stored. .\"------------------------------------- .TP .B doveadm penalty .BR doveadm\-penalty (1), Show current penalties. .\"------------------------------------- .TP .B doveadm who .BR doveadm\-who (1); Show who is logged in to the Dovecot server. .\"------------------------------------------------------------------------ .SS AUTHENTICATION COMMANDS .\"------------------------------------- .TP .B doveadm auth .BR doveadm\-auth (1), Test authentication for a user. .\"------------------------------------- .TP .B doveadm pw .BR doveadm\-pw (1), Dovecot\(aqs password hash generator. .\"------------------------------------- .TP .B doveadm user .BR doveadm\-user (1), Perform a user lookup in Dovecot\(aqs userdbs .\"------------------------------------------------------------------------ .SS MAILBOX COMMANDS .TP .B doveadm altmove .BR doveadm\-altmove (1), Move matching mails to the alternative storage. .\"------------------------------------- .TP .B doveadm batch .BR doveadm\-batch (1), Execute multiple commands for multiple users. .\"------------------------------------- .TP .B doveadm copy .BR doveadm\-copy (1), Copy messages matching the given search query into another mailbox. .\"------------------------------------- .TP .B doveadm deduplicate .BR doveadm\-deduplicate (1), Expunge duplicate messages. .\"------------------------------------- .TP .B doveadm dump .BR doveadm\-dump (1), Dump the content of Dovecot\(aqs binary mailbox index/log. .\"------------------------------------- .TP .B doveadm expunge .BR doveadm\-expunge (1), Expunge messages matching given search query. .\"------------------------------------- .TP .B doveadm fetch .BR doveadm\-fetch (1), Fetch messages matching given search query. .\"------------------------------------- .TP .B doveadm flags .BR doveadm\-flags (1), Add, remove or replace messages\(aq flags. .\"------------------------------------- .TP .B doveadm force\-resync .BR doveadm\-force\-resync (1), Repair broken mailboxes, in case Dovecot doesn\(aqt automatically do that. .\"------------------------------------- .TP .B doveadm import .BR doveadm\-import (1), Import messages matching given search query. .\"------------------------------------- .TP .B doveadm index .BR doveadm\-index (1), Index messages in a given mailbox. .\"------------------------------------- .TP .B doveadm mailbox .BR doveadm\-mailbox (1), Various commands related to handling mailboxes. .\"------------------------------------- .TP .B doveadm move .BR doveadm\-move (1) Move messages matching the given search query into another mailbox. .\"------------------------------------- .TP .B doveadm purge .BR doveadm\-purge (1) Remove messages with refcount=0 from mdbox files. .\"------------------------------------- .TP .B doveadm quota .BR doveadm\-quota (1), Initialize/recalculate or show current quota usage. .\"------------------------------------- .TP .B doveadm search .BR doveadm\-search (1), Show a list of mailbox GUIDs and message UIDs matching given search query. .\"------------------------------------------------------------------------ .SH "EXIT STATUS" .B doveadm will exit with one of the following values: .TP 4 .B 0 Selected command was executed successful. .TP .B >0 Command failed in some way. .\"------------------------------------------------------------------------ .SH ENVIRONMENT .TP .B USER This environment variable is used to determine the .I user if a command accepts a .I user but none was specified. .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/dovecot.conf Dovecot\(aqs main configuration file. .TP .I @pkgsysconfdir@/conf.d/10\-mail.conf Mailbox locations and namespaces. .TP .I @pkgsysconfdir@/conf.d/90\-plugin.conf Plugin specific settings. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm\-help (1), .BR doveconf (1), .BR dovecot (1), .BR dsync (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-dump.1.in0000644000175000017500000000415512244400443014503 00000000000000.\" Copyright (c) 2010-2012 Dovecot authors, see the included COPYING file .TH DOVEADM\-DUMP 1 "2012-02-21" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-dump \- Dump the content of Dovecot\(aqs binary mailbox index/log .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " dump " [" \-t .IR type "] " path .\"------------------------------------------------------------------------ .SH DESCRIPTION Dovecot uses several binary index and log files in order to improve performance for accessing mails. For some mailbox formats, such as sdbox and mdbox, the index files are part of the format itself. For details about index files, see also: http://wiki2.dovecot.org/IndexFiles .PP .B doveadm dump is used to show the contents of those mailbox index/log files, in human readable format. This is mainly useful for Dovecot developers when debugging some problem. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .TP .BI \-t\ type the file type of the file to be dumped. If the .I type was omitted, .BR doveadm (1) tries to detect the type of .IR path . .I type can be: .RS .TP 12 .B dbox \(rA m.\c .I n (sdbox or mdbox mailbox file) .TP .B index \(rA dovecot.index, dovecot.map.index .TP .B log \(rA dovecot.index.log, dovecot.map.index.log .TP .B mailboxlog \(rA dovecot.mailbox.log .TP .B thread \(rA dovecot.index.thread .RE .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I path The path to the corresponding dbox storage, index or log file. If only a directory is specified, doveadm tries to detect the type of files under it and dumps them. .\"------------------------------------------------------------------------ .SH EXAMPLE Look at the contents of a mailbox\(aqs index: .PP .nf .B doveadm dump ~/Maildir/.work/ .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-mailbox.1.in0000644000175000017500000001671512244460063015202 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-MAILBOX 1 "2013-11-24" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-mailbox \- Commands related to handling mailboxes .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter "] " mailbox_cmd " [" options "] [" arguments ] .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm mailbox can be used to query and modify mailboxes. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I mailbox Is the name of a .IR mailbox , as visible to IMAP clients, except in UTF\-8 format. The hierarchy separator between a parent and child mailbox is commonly .RB \(aq / \(aq or .RB \(aq . \(aq, but this depends on your selected mailbox storage format and namespace configuration. The mailbox names may also require a namespace prefix. .\"------------------------------------------------------------------------ .SH COMMANDS .SS mailbox create .B doveadm mailbox create [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .RB [ \-g .IR guid ] .RB [ \-s ] .IR mailbox\ ... .PP This command is used to create one or more mailboxes. The mailbox format of the created mailboxes depends on the .I mail_location setting, or the user\(aqs .I mail field, returned by the userdb. .PP .TP .BI \-g \ guid Create the mailbox with the specified GUID. .TP .B \-s When this option was given, the created mailboxes will be also added to the user\(aqs subscriptions. .\"------------------------------------------------------------------------ .SS mailbox delete .B doveadm mailbox delete [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .RB [ \-s ] .IR mailbox\ ... .PP This command deletes a mailbox and expunges all the messages it contains. If the mailbox has any children, they won\(aqt be deleted. .br When the .B \-s option is present, the deleted mailboxes will be also unsubscribed. .\"------------------------------------------------------------------------ .SS mailbox list .B doveadm mailbox list .RB [ \-7 | \-8 ] [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .RB [ \-s ] [\fImailbox\fP ...] .PP To get an overview of existing mailboxes use this command. It\(aqs also possible to use wildcards in the .I mailbox name. .br When the .B \-s option is present, only subscribed mailboxes will be listed. Listed subscriptions may also contain mailboxes that are already deleted. .PP .\"------------------------------------- .TP .B \-7 Lists the mailboxes with mUTF\-7 encoding. .\"------------------------------------- .TP .B \-8 Lists the mailboxes with UTF\-8 encoding. .\"------------------------------------------------------------------------ .SS mailbox mutf7 .B doveadm mailbox mutf7 .RB [ \-7 | \-8 ] .IR name\ ... .PP The .B mailbox mutf7 command may be used to convert the international mailbox .I name into a modified version of the UTF\-7 encoding and vice versa. See RFC 3501, section 5.1.3 (Mailbox International Naming Convention). .PP .\"------------------------------------- .TP .B \-7 Indicates that the .IR name \(aqs string representation is mUTF\-7 encoded and it should be converted to UTF\-8. .\"------------------------------------- .TP .B \-8 Indicates that the .IR name \(aqs is UTF\-8 encoded and should be converted to mUTF\-7 (default). .TP .I name One or more mailbox names that should be converted. .\"------------------------------------------------------------------------ .SS mailbox rename .B doveadm mailbox rename [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .RB [ \-s ] .I old_name .I new_name .PP The .B mailbox rename command is used to rename the mailbox .I old_name to .IR new_name . .br When the .B \-s option is given, .I old_name will be unsubscribed and .I new_name will be subscribed. .\"------------------------------------------------------------------------ .SS mailbox status .BR doveadm " [" \-f .IR formatter ] .B mailbox status [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] [\fB\-t\fP] .IR "fields mailbox\ " ... .PP Show the .B status of one or more mailboxes. The .I mailbox name may also contain wildcards. .br This command uses by default the output .I formatter .BR flow . .TP .B \-t Summarize the values of the status .I fields .BR messages , .BR recent , .BR unseen " and/or" .B vsize of multiple mailboxes to a sum (total). .\"------------------------------------- .TP .I fields Specify the status .I fields which should be shown. In order to specify multiple status .IR fields , enclosed them in quotes. .RS .TP .B all This is a special status field name. It means show all of the following .IR fields . When the .B \-t option is present, it means show only the .BR messages , .BR recent , .BR unseen " and" .B vsize .IR fields . .TP .B guid The .IR mailbox \(aqs globally unique identifier. .TP .B highestmodseq The highest mod\-sequence value of all messages in the .IR mailbox . .TP .B messages The number of messages in the .IR mailbox . .TP .B recent The number of messages with the \(rsRecent flag set. .TP .B uidnext The next unique identifier value. .TP .B uidvalidity The unique identifier validity value. .TP .B unseen The message sequence number of the first unseen message in the .IR mailbox . .TP .B vsize The .IR mailbox \(aqs virtual size, computed with CRLF line terminators. .RE .PP .\"------------------------------------------------------------------------ .SS mailbox subscribe .B doveadm mailbox subscribe [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .IR mailbox\ ... .PP This command is used to subscribe one or more mailboxes. .\"------------------------------------------------------------------------ .SS mailbox unsubscribe .B doveadm mailbox unsubscribe [\fB\-A\fP|\fB\-u\fP \fIuser\fP] [\fB\-S\fP \fIsocket_path\fP] .IR mailbox\ ... .PP This command is used to unsubscribe one or more mailboxes. .\"------------------------------------------------------------------------ .SH EXAMPLE List subscribed mailboxes, beginning with \(aqdovecot\(aq, of user bob. .sp .nf .B doveadm mailbox list \-s \-u bob dovecot* dovecot dovecot/pigeonhole dovecot/pigeonhole/2.0 .fi .\"------------------------------------- .PP Now have a look at the status of user bob\(aqs dovecot mailboxes. .sp .nf .B doveadm \-f table mailbox status \-u bob \(dqmessages vsize\(dq dovecot* mailbox messages vsize dovecot 20501 93968492 dovecot/pigeonhole 0 0 dovecot/pigeonhole/2.0 47 323474 .fi .\"------------------------------------- .PP Converting an internationalized mailbox name from mUTF\-7 to UTF\-8 and vice versa. .sp .nf .B doveadm mailbox mutf7 \-7 \(dq~peter/mail/&U,BTFw\-/&ZeVnLIqe\-\(dq ~peter/mail/台北/日本語 .B doveadm mailbox mutf7 ~peter/mail/台北/日本語 ~peter/mail/&U,BTFw\-/&ZeVnLIqe\- .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-import.1.in0000644000175000017500000000665612244444217015067 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-IMPORT 1 "2013-11-24" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-import \- Import messages matching given search query .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " import " [" \-S .IR socket_path ] .RB [ \-s ] .I source_location dest_parent search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " import " [" \-S .IR socket_path ] .RB [ \-s ] .BI \-A " source_location dest_parent search_query" .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " import " [" \-S .IR socket_path ] .RB [ \-s ] .BI \-u " user source_location dest_parent search_query" .\"------------------------------------------------------------------------ .SH DESCRIPTION This command can be used to import mails from another mail storage specified by .I source_location to one or more user\(aqs mailboxes. All the mailboxes are imported under the given .I dest_parent mailbox, or to root level if .I dest_parent is empty (\(dq\(dq). The .I search_query can be used to restrict which mailboxes or messages are imported. In the first form, .BR doveadm (1) will executed the .B import action with the environment of the logged in system user. .PP In the second form, the mails will be imported for all users. .PP In the third form, the mails will be imported only for given .IR user (s). .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- .TP .B \-s When the .B \-s option is present, .I dest_parent and all new mailboxes under it will be listed in the .I subscriptions file. .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I dest_parent The name of the destination mailbox, under which the mails should be imported. .BR doveadm (1) will create the .I dest_parent mailbox if it doesn\(aqt exist. .TP .I search_query Copy messages matching this search query. See .BR doveadm\-search\-query (7) for details. .TP .I source_location This argument specifies the mailbox format and location of the source location. The syntax is the same as for the .I mail_location setting. For example: .B maildir:/backup/20101126/jane.doe/Maildir or .B mdbox:/srv/mail/john.doe/mdbox:ALT=/nfsmount/john.doe/mdbox .\"------------------------------------------------------------------------ .SH EXAMPLE This example imports all mails from a backup under a .I backup\-20101026 mailbox: .PP .nf .B doveadm import \-u jane.doe@example.org \(rs .B mdbox:/backup/20101026/jane.doe/mdbox backup\-20101026 all .fi .PP Another example that imports only messages from foo@example.org in the backup mdbox\(aqs INBOX to jane\(aqs INBOX: .PP .nf .B doveadm import \-u jane.doe@example.org \(rs .B mdbox:~/mdbox\-backup \(dq\(dq mailbox INBOX from foo@example.org .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-fetch (1), .BR doveadm\-search (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-altmove.1.in0000644000175000017500000000545312244400443015207 00000000000000.\" Copyright (c) 2010-2011 Dovecot authors, see the included COPYING file .TH DOVEADM\-ALTMOVE 1 "2011-09-15" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-altmove \- Move matching mails to the alternative storage (dbox\-only) .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " altmove " [" \-r "] ["\-S .IR socket_path "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " altmove " [" \-r "] ["\-S .IR socket_path "] " .BI \-A " search_query" .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " altmove " [" \-r "] ["\-S .IR socket_path "] " .BI \-u " user search_query" .\"------------------------------------------------------------------------ .SH DESCRIPTION This command can be used with sdbox or mdbox storage to move mails to alternative storage path when :ALT= is specified for the mail location. .PP In the first form, .BR doveadm (1) will executed the .B altmove action with the environment of the logged in system user. .PP In the second form, the command will be performed for all users. .PP In the third form, only matching mails of the given .IR user (s) will be moved to the alternative storage. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- .TP .B \-r When the .B \-r option is given this .I command works the other way round. Mails will be moved from the alternative storage back to the default mail location. .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I search_query Messages matching this search query will be moved to alt storage. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/conf.d/10\-mail.conf Mailbox locations and namespaces. .TP .I @pkgsysconfdir@/conf.d/auth\-*.conf.ext Authentication processes, including userdb settings. .\"------------------------------------------------------------------------ .SH EXAMPLE This example moves seen mails older than one week to alternative storage under /nfsmount: .br .nf mail_location = mdbox:~/mdbox:ALT=/nfsmount/%h/mdbox .fi .PP .nf .B doveadm altmove \-u johnd@example.com seen savedbefore 1w .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-copy.10000644000175000017500000000003012244400443014067 00000000000000.so man1/doveadm-move.1 dovecot-2.2.9/doc/man/doveadm-config.10000644000175000017500000000002312244400443014364 00000000000000.so man1/doveconf.1dovecot-2.2.9/doc/man/sed.sh0000644000175000017500000000140012244400443012527 00000000000000#!/bin/sh SRCDIR="${1:-`pwd`}" RUNDIR="${2:-/usr/local/var/run/dovecot}" PKGSYSCONFDIR="${3:-/usr/local/etc/dovecot}" PKGLIBEXECDIR="${4:-/usr/local/libexec/dovecot}" sed -e "/^@INCLUDE:global-options@$/{ r ${SRCDIR}/global-options.inc d }" \ -e "/^@INCLUDE:global-options-formatter@$/{ r ${SRCDIR}/global-options-formatter.inc d }" \ -e "/^@INCLUDE:option-A@$/{ r ${SRCDIR}/option-A.inc d }" \ -e "/^@INCLUDE:option-S-socket@$/{ r ${SRCDIR}/option-S-socket.inc d }" \ -e "/^@INCLUDE:option-u-user@$/{ r ${SRCDIR}/option-u-user.inc d }" \ -e "/^@INCLUDE:reporting-bugs@$/{ r ${SRCDIR}/reporting-bugs.inc d }" | sed -e "s|@pkgsysconfdir@|${PKGSYSCONFDIR}|" \ -e "s|@rundir@|${RUNDIR}|" \ -e "s|@pkglibexecdir@|${PKGLIBEXECDIR}|" dovecot-2.2.9/doc/man/Makefile.am0000644000175000017500000000363512244400443013470 00000000000000pkgsysconfdir = $(sysconfdir)/dovecot SUFFIXES = .1.in .1 dist_man1_MANS = \ deliver.1 \ doveadm-config.1 \ doveadm-copy.1 \ doveadm-reload.1 \ doveadm-stop.1 dist_man7_MANS = \ doveadm-search-query.7 nodist_man1_MANS = \ doveadm.1 \ doveadm-altmove.1 \ doveadm-auth.1 \ doveadm-batch.1 \ doveadm-deduplicate.1 \ doveadm-director.1 \ doveadm-dump.1 \ doveadm-exec.1 \ doveadm-expunge.1 \ doveadm-fetch.1 \ doveadm-flags.1 \ doveadm-import.1 \ doveadm-instance.1 \ doveadm-index.1 \ doveadm-force-resync.1 \ doveadm-help.1 \ doveadm-kick.1 \ doveadm-log.1 \ doveadm-mailbox.1 \ doveadm-mount.1 \ doveadm-move.1 \ doveadm-penalty.1 \ doveadm-purge.1 \ doveadm-pw.1 \ doveadm-quota.1 \ doveadm-search.1 \ doveadm-user.1 \ doveadm-who.1 \ doveconf.1 \ dovecot.1 \ dovecot-lda.1 \ dsync.1 man_includefiles = \ $(srcdir)/global-options-formatter.inc \ $(srcdir)/global-options.inc \ $(srcdir)/option-A.inc \ $(srcdir)/option-S-socket.inc \ $(srcdir)/option-u-user.inc \ $(srcdir)/reporting-bugs.inc EXTRA_DIST = \ doveadm.1.in \ doveadm-altmove.1.in \ doveadm-auth.1.in \ doveadm-batch.1.in \ doveadm-deduplicate.1.in \ doveadm-director.1.in \ doveadm-dump.1.in \ doveadm-exec.1.in \ doveadm-expunge.1.in \ doveadm-fetch.1.in \ doveadm-flags.1.in \ doveadm-import.1.in \ doveadm-instance.1.in \ doveadm-index.1.in \ doveadm-force-resync.1.in \ doveadm-help.1.in \ doveadm-kick.1.in \ doveadm-log.1.in \ doveadm-mailbox.1.in \ doveadm-mount.1.in \ doveadm-move.1.in \ doveadm-penalty.1.in \ doveadm-purge.1.in \ doveadm-pw.1.in \ doveadm-quota.1.in \ doveadm-search.1.in \ doveadm-user.1.in \ doveadm-who.1.in \ doveconf.1.in \ dovecot.1.in \ dovecot-lda.1.in \ dsync.1.in \ sed.sh \ $(man_includefiles) CLEANFILES = $(nodist_man1_MANS) .1.in.1: $(man_includefiles) Makefile $(SHELL) $(srcdir)/sed.sh $(srcdir) $(rundir) $(pkgsysconfdir) \ $(pkglibexecdir) < $< > $@ dovecot-2.2.9/doc/man/doveadm-expunge.1.in0000644000175000017500000000504012244444174015214 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-EXPUNGE 1 "2013-11-24" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-expunge \- Expunge messages matching given search query .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " expunge " [" \-S .IR socket_path ] .RB [ \-d ] .I search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " expunge " [" \-S .IR socket_path ] .RB [ \-d ] .BI \-A " search_query" .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " expunge " [" \-S .IR socket_path ] .RB [ \-d ] .BI \-u " user search_query" .\"------------------------------------------------------------------------ .SH DESCRIPTION This command can be used to expunge mails matching the given search query. It is typically used to expunge old mails from users\(aq Trash and/or Spam mailboxes. To test which messages a given search query would match, you can use .I doveadm fetch or .I doveadm search commands. .PP In the first form, .BR doveadm (1) will executed the .B expunge action with the environment of the logged in system user. .PP In the second form, the command will be performed for all users. .PP In the third form, only matching mails of the given .IR user (s) will be expunged. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- .TP .B \-d Delete the mailbox if it is empty after expunging. .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I search_query Expunge messages matching this search query. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH EXAMPLE This example expunges messages from Spam mailbox that were saved/copied there more than two weeks ago: .PP .nf .B doveadm expunge \-u jane.doe@example.org mailbox Spam savedbefore 2w .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-fetch (1), .BR doveadm\-purge (1), .BR doveadm\-search (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-user.1.in0000644000175000017500000000753712244400443014523 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-USER 1 "2013-11-23" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-user \- Perform a user lookup in Dovecot\(aqs userdbs .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " user [\fB\-a\fP \fIuserdb_socket_path\fP] [\fB\-f\fP \fIfield\fP] .RB [ \-u ] [\fB\-x\fP \fIauth_info\fP] .IR user\ ... .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B user command is used to perform a user lookup \- to show what information Dovecot sees about the .IR user (s), or if it exists at all in the configured .IR userdb (s). .PP The .I auth_info may be useful when the userdb is for example a SQL database and you are using %variables, like .B %s or .BR %l , in the .I user_query setting. Or when you have configured the userdb in a way like this: .sp .nf userdb { driver = passwd\-file args = /etc/%s.passwd } .fi .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- .TP .BI \-a\ userdb_socket_path This option is used to specify an absolute path to an alternative UNIX domain socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/auth\-userdb . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------- .TP .BI \-f\ field When this option and the name of a userdb field is given, .BR doveadm (1) will show only the value of the specified field. .\"------------------------------------- .TP .B \-u When this option is given, .BR doveadm (1) will only show values from the .IR userdb . Without \-u parameter if any of the .IR uid ", " gid ", " home " or " mail fields are missing, their defaults are taken from configuration file. .\"------------------------------------- .TP .BI \-x\ auth_info .I auth_info specifies additional conditions for the .B user command. The .I auth_info option string has to be given as .IB name = value pair. For multiple conditions the .B \-x option could be supplied multiple times. .br Possible names for the .I auth_info are: .RS .TP .B service The service for which the userdb lookup should be tested. The value may be the name of a service, commonly used with Dovecot. For example: .BR imap , .BR pop3\ or .BR smtp . .TP .B lip The local IP address (server) for the test. .TP .B rip The remote IP address (client) for the test. .TP .B lport The local port, e.g. 143 .TP .B rport The remote port, e.g. 24567 .RE .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I user Is a .IR user \(aqs login name. Depending on the configuration, a login name may be for example .BR jane " or " john@example.com . It\(aqs also possible to use .RB \(aq * \(aq and .RB \(aq ? \(aq wildcards (e.g. \-u *@example.org). .\"------------------------------------------------------------------------ .SH EXAMPLE Perform a user lookup for the users jane and john@example.com. .sp .nf .B doveadm user jane john@example.com userdb: jane uid : 8001 gid : 8001 home : /home/jane mail : sdbox:~/sdbox plugins : sieve quota_rule: *:storage=150M userdb: john@example.com home : /srv/mail/8/70312/79832 uid : 79832 gid : 70312 mail : mdbox:~/mdbox .fi .PP The next example shows a user lookup, using wildcards. .sp .nf .B doveadm user *.?oe@example.net jane.doe@example.net judy.roe@example.net john.doe@example.net .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/dovecot.1.in0000644000175000017500000001006512244400443013561 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVECOT 1 "2013-08-06" "Dovecot v2.2" "Dovecot" .SH NAME dovecot \- a secure and highly configurable IMAP and POP3 server .\"------------------------------------------------------------------------ .SH SYNOPSIS \fBdovecot\fP [\fB\-Fp\fP] [\fB\-c\fP \fIconfig\-file\fP] .br .B dovecot \-a [\fB\-c\fP \fIconfig\-file\fP] .br .B dovecot \-n [\fB\-c\fP \fIconfig\-file\fP] .br .B dovecot \-\-build\-options .br .B dovecot \-\-help .br .B dovecot \-\-hostdomain .br .B dovecot \-\-version .br .B dovecot reload .br .B dovecot stop .\"------------------------------------------------------------------------ .SH DESCRIPTION Dovecot is an open source IMAP and POP3 server for Linux/UNIX\-like systems, written with security primarily in mind. Dovecot is an excellent choice for both small and large installations. It\(aqs fast, simple to set up, requires no special administration and it uses very little memory. .\"------------------------------------------------------------------------ .SH OPTIONS .TP .B \-a Dump all configuration settings to stdout and exit successfully. The same as .IR doveconf\ \-a . .TP .BI \-c\ config\-file Start .B dovecot with an alternative configuration. .TP .B \-F Run .B dovecot in foreground, do not daemonize. .TP .B \-n Dump non\-default settings to stdout and exit successfully. The same as .IR doveconf\ \-n . .TP .B \-p Prompt for the ssl key password for the configured .I ssl_key on startup. .TP .B \-\-build\-options Show Dovecot\(aqs build options and exit successfully. .TP .B \-\-help Print a usage message to stdout and exit successfully. .TP .B \-\-hostdomain Shows the current .IR host . domain name of the system. If the domain lookup should fail for some reason, only the hostname will be shown. .TP .B \-\-version Show Dovecot\(aqs version and exit successfully. .\"------------------------------------------------------------------------ .SH COMMANDS .TP .B reload Force .B dovecot to reload its configuration. .TP .B stop Shutdown .B dovecot and all its child processes. .PP When .I shutdown_clients is set to .BR no , existing sessions will continue to use the old settings, after a .BR "dovecot reload" . Also all sessions will keep alive after a .BR "dovecot stop" . .br By default all active sessions will be shut down. .\"------------------------------------------------------------------------ .SH SIGNALS Dovecot handles the following .I signals as described: .TP .B HUP Force .B dovecot to reload its configuration. .TP .B INT Shutdown .B dovecot and all its child processes. .TP .B TERM Shutdown .B dovecot and all its child processes. .TP .B USR1 Force .B dovecot to reopen all configured log files (\c .IR log_path , .IR info_log_path\ and .IR debug_log_path ). . .PP The .I signals .BR ALARM\ and .B PIPE are ignored. .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/dovecot.conf Dovecot\(aqs main configuration file. .TP .I @pkgsysconfdir@/dovecot\-ldap.conf.ext Dovecot\(aqs LDAP authdb/userdb module configuration file. .TP .I @pkgsysconfdir@/dovecot\-sql.conf.ext Dovecot\(aqs SQL authdb/userdb module configuration file. .TP .I @pkgsysconfdir@/dovecot\-dict\-sql.conf.ext Dovecot\(aqs dict configuration with SQL\-backend. .TP .I @pkgsysconfdir@/conf.d/auth\-*\-conf.ext Configuration files of different authentication modules. .TP .I @pkgsysconfdir@/conf.d/*.conf Configuration files of different services and settings. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH AUTHOR Dovecot and its manual pages were written by the Dovecot authors , mainly Timo Sirainen , and are licensed under the terms of the MIT and LGPLv2.1 licenses, see for details. .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveconf (1), .BR dovecot\-lda (1), .BR dsync (1)dovecot-2.2.9/doc/man/doveadm-director.1.in0000644000175000017500000001545312244400443015354 00000000000000.\" Copyright (c) 2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-DIRECTOR 1 "2013-07-12" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-director \- Manage Dovecot directors .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " "director add" [\fB\-a\fP \fIdirector_socket_path\fP] .IR host " [" vhost_count ] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "director flush" [\fB\-a\fP \fIdirector_socket_path\fP] \fIhost\fP|\fBall\fP .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "director map" [\fB\-a\fP \fIdirector_socket_path\fP] [\fB\-f\fP \fIusers_file\fP] .RI [ host ] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "director remove" [\fB\-a\fP \fIdirector_socket_path\fP] .I host .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "director dump" [\fB\-a\fP \fIdirector_socket_path\fP] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "director status" [\fB\-a\fP \fIdirector_socket_path\fP] .RI [ user ] .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm director can be used to manage and query the status of the list of backend mail servers where Dovecot proxy can redirect connections to. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- .TP .BI \-a\ director_socket_path This option is used to specify an alternative socket. The option\(aqs argument is either an absolute path to a local UNIX domain socket, or a hostname and port .RI ( hostname : port ), in order to connect a remote host via a TCP socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/director\-admin . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I host A mail server\(aqs hostname or IP address. .\"------------------------------------- .TP .I user Is a .IR user \(aqs login name. Depending on the configuration, a login name may be for example .BR jane " or " john@example.com . .\"------------------------------------- .TP .I vhost_count The number of \(dqvirtual hosts\(dq to assign to this server. The higher the number is relative to other servers, the more connections it gets. The default is 100. .\"------------------------------------------------------------------------ .SH COMMANDS .SS director add .B doveadm director add [\fB\-a\fP \fIdirector_socket_path\fP] .I host .RI [ vhost_count ] .PP The command\(aqs tasks are: .TP 4 * assign a new mail server to the director. .TP * increase/decrease the .I vhost_count of an already assigned server. .PP .\"------------------------------------- .SS director flush .B doveadm director flush [\fB\-a\fP \fIdirector_socket_path\fP] \fIhost\fP|\fBall\fP .PP .B doveadm director flush drops all user associations either from the given .I host or .B all hosts. This command is intended mainly for testing purposes. .\"------------------------------------- .SS director map .B doveadm director map [\fB\-a\fP \fIdirector_socket_path\fP] [\fB\-f\fP \fIusers_file\fP] .RI [ host ] .PP The command .B doveadm director map is used to list current .IR user "\ \(->\ " host mappings. Note that the director works using 32bit hashes which makes collisions quite likely, so this command can\(aqt reliably list exactly which users have recently logged in. .PP .TP .BI \-f\ users_file Path to a file containing all user names (one per line). When given no .I userdb lookup will be performed. This may be a helpful alternative when for example the network connection to the LDAP or SQL server is slow. .TP .I host Specify a server\(aqs IP address or hostname, to list only mappings of the given .IR host . .\"------------------------------------- .SS director remove .B doveadm director remove [\fB\-a\fP \fIdirector_socket_path\fP] .I host .PP Use this command in order to remove the given .I host from the director. .\"------------------------------------- .SS director dump .B doveadm director dump [\fB\-a\fP \fIdirector_socket_path\fP] .PP Dump the current host configuration as doveadm commands. These commands can be easily run after a full director cluster restart to get back to the dumped state. .\"------------------------------------- .SS director status .B doveadm director status [\fB\-a\fP \fIdirector_socket_path\fP] .RI [ user ] .PP This command is used to show the current usage of all assigned mail servers. .br When a user name is given, this command shows which server the .I user is currently assigned to, where the user will be assigned after the current saved assignment gets removed and where the user would be assigned to if the whole proxy cluster was restarted fresh. .\"------------------------------------------------------------------------ .SH FILES .TP .I @pkgsysconfdir@/dovecot.conf Dovecot\(aqs main configuration file. .TP .I @pkgsysconfdir@/conf.d/10\-director.conf Director specific settings. .\"------------------------------------------------------------------------ .SH EXAMPLE Add a director with vhost count 150 (or change existing one\(aqs vhost count to 150): .PP .nf .B doveadm \-v director add x1357.imap.ha.example.net 150 2001:db8:543:6861:143::1357: OK .fi .\"------------------------------------- .PP Remove a director: .PP .nf .B doveadm director remove x1357.imap.ha.example.net .fi .\"------------------------------------- .PP Query the status of mail hosts in a director: .PP .nf .B doveadm director status mail server ip vhosts users 192.168.10.1 100 125 192.168.10.2 100 144 192.168.10.3 100 115 .fi .\"------------------------------------- .PP Query the status of a user\(aqs assignment: .PP .nf .B doveadm director status user@example.com Current: 192.168.10.1 (expires 2010\-06\-18 20:17:04) Hashed: 192.168.10.2 Initial config: 192.168.10.3 .fi .PP This means that the user is currently assigned to mail server on IP 192.168.10.1. After all of user\(aqs connections have logged out, the assignment will be removed (currently it looks like at 20:17:04, but that may be increased). After the assignment has expired, the user will next time be redirected to 192.168.10.2 (assuming no changes to director settings). If the entire Dovecot proxy cluster was restarted, so that all of the director configuration would revert back to its initial values, the user would be redirected to 192.168.10.3. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-who.1.in0000644000175000017500000000602412244400443014330 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-WHO 1 "2010-07-12" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-who \- Show who is logged in to the Dovecot server .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR who\ [ \-1 ] [\fB\-a\fP \fIanvil_socket_path\fP] .RI [ user ] [\fIip\fP[\fB/\fP\fImask\fP]] .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B who command is used to show which users from which hosts are currently connected to which service. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\" --- command specific options --- "/. .PP This command uses by default the output formatter .BR table . .PP Command specific .IR options : .\"------------------------------------- .TP .B \-1 Print one line per user and connection. Otherwise the connections are grouped by the username. .\"------------------------------------- .TP .BI \-a\ anvil_socket_path This option is used to specify an alternative socket. The option\(aqs argument is either an absolute path to a local UNIX domain socket, or a hostname and port .RI ( hostname : port ), in order to connect a remote host via a TCP socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/anvil . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP \fIip\fP[\fB/\fP\fImask\fP] Specify an .I ip address or network range, in CIDR notation, to reduce the result to matching connections. .\"------------------------------------- .TP .I user List only users, whose name match .IR user . It\(aqs also possible to use wildcards in the .I user name. .\"------------------------------------------------------------------------ .SH EXAMPLE Show authenticated sessions, filtered by the client\(aqs IP address. .sp .nf .B doveadm who ::1 username # proto (pids) (ips) jane 2 imap (30155 30412) (::1) .B doveadm who 192.0.2.0/24 username # proto (pids) (ips) john@example.com 1 imap (30257) (192.0.2.34) .fi .PP Show authenticated sessions, filtered by username. .sp .nf .B doveadm who pvo username # proto (pids) (ips) pvo 1 sieve (30343) (fd95:4eed:38ba::25) pvo 4 imap (25693 25686 25679 25669) (fd95:4eed:38ba::25) .B doveadm who ja* username # proto (pids) (ips) james 1 imap (30091) (127.0.0.1) jane 2 imap (30155 30412) (::1) .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-kick (1)dovecot-2.2.9/doc/man/global-options-formatter.inc0000644000175000017500000000111512244400443017050 00000000000000.SH OPTIONS Global .BR doveadm (1) .IR options : .TP .B \-D Enables verbosity and debug messages. .TP .BI \-f\ formatter Specifies the .I formatter for formatting the output. Supported formatters are: .RS .TP .B flow prints each line with .IB key = value pairs. .TP .B pager prints each .IR key :\ value pair on its own line and separates records with form feed character .RB ( ^L ). .TP .B tab prints a table header followed by tab separated value lines. .TP .B table prints a table header followed by adjusted value lines. .RE .TP .B \-v Enables verbosity, including progress counter. dovecot-2.2.9/doc/man/option-A.inc0000644000175000017500000000130012244400443013600 00000000000000.TP .B \-A If the .B \-A option is present, the .I command will be performed for all users. Using this option in combination with system users from .B userdb { driver = passwd } is not recommended, because it contains also users with a lower UID than the one configured with the .I first_valid_uid setting. .sp When the SQL userdb module is used make sure that the .I iterate_query setting in .I @pkgsysconfdir@/dovecot\-sql.conf.ext matches your database layout. When using the LDAP userdb module, make sure that the .IR iterate_attrs " and " iterate_filter settings in .I @pkgsysconfdir@/dovecot-ldap.conf.ext match your LDAP schema. Otherwise .BR doveadm (1) will be unable to iterate over all users. dovecot-2.2.9/doc/man/doveadm-pw.1.in0000644000175000017500000001057112244400443014163 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-PW 1 "2013-08-17" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-pw \- Dovecot\(aqs password hash generator .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " "pw \-l" .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " pw [\fB\-p\fP \fIpassword\fP] [\fB\-r\fP \fIrounds\fP] [\fB\-s\fP \fIscheme\fP] [\fB\-u\fP \fIuser\fP] .RB [ \-V ] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "pw \-t" .I hash [\fB\-p\fP \fIpassword\fP] [\fB\-u\fP \fIuser\fP] .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm pw is used to generate password hashes for different password .IR scheme s and optionally verify the generated hash. .PP All generated password hashes have a .RI { scheme } prefix, for example .RB { SHA512\-CRYPT.HEX }. All passdbs have a default scheme for passwords stored without the .RI { scheme } prefix. The default scheme can be overridden by storing the password with the scheme prefix. .PP .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- .TP .B \-l List all supported password .IR scheme s and exit successfully. .br There are up to three optional password .IR scheme s: .BR BLF\-CRYPT " (Blowfish crypt)," .BR SHA256\-CRYPT\ and .BR SHA512\-CRYPT . Their availability depends on the system\(aqs currently used libc. .\"------------------------------------- .TP .BI \-p\ password The plain text .I password for which the hash should be generated. If no .I password was given .BR doveadm (1) will prompt interactively for one. .\"------------------------------------- .TP .BI \-r\ rounds The password .IR scheme s .BR BLF\-CRYPT , .BR SHA256\-CRYPT\ and .B SHA512\-CRYPT supports a variable number of encryption .IR rounds . The following table shows the minimum/maximum number of encryption .I rounds per scheme. When the .B \-r option was omitted the default number of encryption rounds will be applied. .\" .sp .nf Scheme | Minimum | Maximum | Default ---------------------------------------------- BLF\-CRYPT | 4 | 31 | 5 SHA256\-CRYPT | 1000 | 999999999 | 5000 SHA512\-CRYPT | 1000 | 999999999 | 5000 .fi .\" .\"------------------------------------- .TP .BI \-s\ scheme The password .I scheme which should be used to generate the hashed password. By default the .BI CRAM\-MD5\ scheme will be used. It is also possible to append an encoding suffix to the .IR scheme . Supported encoding suffixes are: .BR .b64 , .BR .base64\ and .BR .hex . .br See also http://wiki2.dovecot.org/Authentication/PasswordSchemes for more details about password schemes. .\"------------------------------------- .TP .BI \-t\ hash Test if the given password .IR hash matches a given plain text password. The plain text password may be passed using the .BR \-p " option." When no password was specified, .BR doveadm (1) will prompt interactively for one. .\"------------------------------------- .TP .BI \-u\ user When the .BI DIGEST\-MD5\ scheme is used, also the .I user name must be given, because the user name is a part of the generated hash. For more information about Digest\-MD5 please read also: http://wiki2.dovecot.org/Authentication/Mechanisms/DigestMD5 .\"------------------------------------- .TP .B \-V When this option is given, the hashed password will be internally verified. The result of the verification will be shown after the hashed password, enclosed in parenthesis. .\"------------------------------------------------------------------------ .SH EXAMPLE The first password hash is a DIGEST\-MD5 hash for jane.roe@example.com. The second password hash is a CRAM\-MD5 hash for john.doe@example.com. .sp .nf .B doveadm pw \-s digest\-md5 \-u jane.roe@example.com Enter new password: Retype new password: {DIGEST\-MD5}9b9dcb4466233a9307bbc33708dffda0 .B doveadm pw Enter new password: Retype new password: {CRAM\-MD5}913331d8782236a8ecba7764a63aa27b26437fd40ca878d887f11d81245c2c6b .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-batch.1.in0000644000175000017500000000521012244400443014610 00000000000000.\" Copyright (c) 2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-BATCH 1 "2013-11-17" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-batch \- Execute multiple commands for multiple users .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " batch " [" \-S .IR socket_path "] " .BI \-A " sep command sep command" [...] .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " batch " [" \-S .IR socket_path "] " .BI \-u " usermask sep command sep command" [...] .\"------------------------------------------------------------------------ .SH DESCRIPTION The command .B doveadm batch is used to execute multiple .BR doveadm (1) .IR command s sequentially for multiple users. This is primarily an optimization where Dovecot can do all the mailbox operations for the same user at once, instead of having to go through the users multiple times. .PP In the first form .BR doveadm (1) will loop over all users, defined in the configured .IR user_db (s), and execute the sequence of .IR command s for each of them. .PP In the second form the .IR command s will be executed for each user matching the given .IR usermask . .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I command Any .BR doveadm (1) command, with arguments and options \(em except for the .BR \-A ", " \-S " and " \-u " options." .\"------------------------------------- .TP .I sep A single character used as command separator, e.g. \(aq:\(aq. Be careful, unquoted characters, like .BR ; " or " & , may be interpreted by your shell and .BR doveadm (1) will never see them. .\"------------------------------------------------------------------------ .SH EXAMPLE In this example we move seen mails, older than one month, for all example.net users to the alternative storage under /nfsmount: .br .nf mail_location = mdbox:~/mdbox:ALT=/nfsmount/%h/mdbox .fi The second command will remove messages with refcount=0 from the primary storage. .PP .nf .B doveadm batch \-u \(rs*@example.net : altmove seen savedbefore 30d : purge .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-log.1.in0000644000175000017500000000617412244464325014333 00000000000000.\" Copyright (c) 2010-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-LOG 1 "2013-11-24" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-log \- Locate, test or reopen Dovecot\(aqs log files .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " "log errors" " [" \-s .IR min_timestamp ] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "log find" .RI [ directory ] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "log reopen" .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " "log test" .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B doveadm log .I commands are used to locate and reopen the log files of .BR dovecot (1). It\(aqs also possible to test the configured targets of the .I *log_path settings. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\"------------------------------------------------------------------------ .SH COMMANDS .SS log errors .BR "doveadm log errors" " [" \-s .IR min_timestamp ] .PP The .B log errors command is used to show the last \- up to 1,000 \- errors and warnings. If no output is generated, no errors have occurred since the last start. .PP .TP .BI \-s \ min_timestamp An integer value, representing seconds since the epoch \- also known as Unix timestamp. When a min_timestamp was given, .BR doveadm (1) will only show errors occurred since that point in time. .\"------------------------------------- .SS log find .B doveadm log find .RI [ directory ] .PP The .B log find command is used to show the location of the log files, to which .BR dovecot (1) sends its log messages. If .BR dovecot (1) logs its messages through .BR syslogd (8) and .BR doveadm (1) could not find any log files, you can specify the .I directory where your syslogd writes its log files. .\"------------------------------------- .SS log reopen .B doveadm log reopen .PP This command causes .B doveadm to reopen all log files, configured in the .IR log_path , .I info_log_path and .I debug_log_path settings. These settings are configured in .IR @pkgsysconfdir@/conf.d/10\-logging.conf . .br This is for example useful after manually rotating the log files. .\"------------------------------------- .SS log test .B doveadm log test .PP This command causes .B doveadm to write the message \(dqThis is Dovecot\(aqs .I priority log .RI ( timestamp )\(dq to the configured log files. The used priorities are: .BR debug , .BR info , .BR warning , .BR error and .BR fatal . .\"------------------------------------------------------------------------ .SH EXAMPLE This example shows how to locate the log files used by .BR dovecot (1). .sp .nf .B doveadm log find Looking for log files from /var/log Debug: /var/log/dovecot.debug Info: /var/log/mail.log Warning: /var/log/mail.log Error: /var/log/mail.log Fatal: /var/log/mail.log .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-search-query.70000644000175000017500000002240012244400443015540 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-SEARCH\-QUERY 7 "2011-11-24" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-search\-query \- Overview of search queries for doveadm mailbox \ commands .\"------------------------------------------------------------------------ .SH DESCRIPTION Several .BR doveadm (1) commands use a .I search_query in order to act only on matching messages. This manual page describes all .IR SEARCH\ KEYS , which can be used in a .IR search_query . .PP Each .I search_query consists at least of one .IR SEARCH\ KEY . Most of the .I SEARCH KEYS require an argument. All .I SEARCH\ KEYS are case\-insensitive. The shortest valid .I search_query is .BR ALL . For example: .sp .RS .nf doveadm search ALL .fi .RE .PP Multiple search query expressions will be combined with the .B AND operator by default. To find all messages that are new and greater than 50 kilobyte, one can use: .sp .RS .nf doveadm search NEW LARGER 50k .fi .RE .PP .BR OR \-ed .I SEARCH KEYS have to be written in parenthesis, when mixing ANDs and ORs. Shells commonly require escaping for parentheses. To find messages that were saved on the \(dq13th of April 2007\(dq AND have the \(rsSeen and/or \(rsFlagged flag set, one can use for example: .sp .RS .nf doveadm search SAVEDON 2007\-04\-13 \(rs( SEEN OR FLAGGED \(rs) .fi .RE .PP It\(aqs also possible to specify the mailbox, in which should be searched. Use either the keyword .B mailbox and the name of the mailbox or the keyword .B mailbox\-guid and the mailbox\(aqs globally unique identifier in the .IR search_query . To find all messages in the mailbox with the GUID \(dq44f68b13ce97044b837f000035ca9452\(dq use: .sp .RS .nf doveadm search mailbox\-guid 44f68b13ce97044b837f000035ca9452 ALL .fi .RE .PP To list all deleted messages in the \(dqTrash\(dq folder use: .sp .RS .nf doveadm search mailbox Trash DELETED .fi .RE .\"------------------------------------------------------------------------ .SH SEARCH KEYS The following search keys from the specification of IMAP version 4 revision 1 (see: RFC 3501, section 6.4.4) are supported: .\"----------------- .TP .B ALL Matches all messages. .\"----------------- .TP .B ANSWERED Matches messages with the IMAP flag \(rsAnswered set. .\"----------------- .TP .BI BCC\ pattern Matches messages, which contain .I pattern in the BCC field of the message\(aqs IMAP envelope structure. .\"----------------- .TP .BI BEFORE \ date\ specification Matches messages with an internal date before .IR date\ specification . .\"----------------- .TP .BI BODY\ pattern Matches messages, which contain .I pattern in the body part. .\"----------------- .TP .BI CC\ pattern Matches messages, which contain .I pattern in the CC field of the message\(aqs IMAP envelope structure. .\"----------------- .TP .B DELETED Matches messages with the IMAP flag \(rsDeleted set. .\"----------------- .TP .B DRAFT Matches messages with the IMAP flag \(rsDraft set. .\"----------------- .TP .B FLAGGED Matches messages with the IMAP flag \(rsFlagged set. .\"----------------- .TP .BI FROM\ pattern Matches messages, which contain .I pattern in the FROM field of the message\(aqs IMAP envelope structure. .\"----------------- .TP \fBHEADER\fP \fIfield\fP \fIpattern\fP Matches messages, which either have the named header .IR field , when empty .I pattern was given. Or messages, where the given header .IR field \(aqs value contains the specified .IR pattern . .\"----------------- .TP .BI KEYWORD\ keyword Matches messages with the given IMAP .I keyword (e.g. \(DoForwarded) flag set. .\"----------------- .TP .BI LARGER\ size Matches messages that are larger than the specified .IR size . .\"----------------- .TP .BI MAILBOX\ name Matches messages in the mailbox with the specified .IR name . .\"----------------- .TP .BI MAILBOX\-GUID\ guid Matches messages in the mailbox with the specified .IR guid . .\"----------------- .TP .B NEW Matches messages, which have the IMAP flag \(rsRecent set .B but not the IMAP flag \(rsSeen. .\"----------------- .TP .BI NOT\ search\ key Inverse matching \- matches massages, where the search doesn\(aqt match the specified .I search\ key or its value. .\"----------------- .TP .B OLD Matches messages, which do not have the IMAP flag \(rsRecent set. .\"----------------- .TP .BI ON\ date\ specification Matches messages whose internal date matches the given .IR date\ specification . .\"----------------- .TP .IB search\ key\ OR\ search\ key Matches messages where one of the OR\-ed search keys matches. .br Note: IMAP4rev1 uses the syntax: .BI OR\ search\ key\ search\ key .\"----------------- .TP .B RECENT Matches messages with the IMAP flag \(rsRecent set. .\"----------------- .TP .B SEEN Matches messages with the IMAP flag \(rsSeen set. .\"----------------- .TP .BI SENTBEFORE\ date\ specification Matches messages with a Date: header before .IR date\ specification . .\"----------------- .TP .BI SENTON\ date\ specification Matches messages with a Date: header matching the given .IR date\ specification . .\"----------------- .TP .BI SENTSINCE\ date\ specification Matches messages with a Date: header matching or after the given .IR date\ specification . .\"----------------- .TP .BI SINCE\ date\ specification Matches messages whose internal date is within or after the given .IR date\ specification . .\"----------------- .TP .BI SMALLER\ size Matches messages with a size smaller than the given .IR size . .\"----------------- .TP .BI SUBJECT\ pattern Matches messages, which contain .I pattern in the SUBJECT field of the message\(aqs IMAP envelope structure. .\"----------------- .TP .BI TEXT\ pattern Matches messages, which contain .I pattern in the message body. .\"----------------- .TP .BI TO\ pattern Matches messages, which contain .I pattern in the TO field of the message\(aqs IMAP envelope structure. .\"----------------- .TP .BI UID\ sequence Matches messages with the given UID(s). A .I sequence may be a single UID. Can be a sequence range, written as .IR from : to , .RB e.g.\ 100 : 125 . As comma separated list of UIDs, e.g. .BR 11,50,4 . It\(aqs also possible to combine multiple sequences, e.g. .BR 1,3,5,7,10:20 . .\"----------------- .TP .B UNANSWERED Matches messages, which do not have the IMAP flag \(rsAnswered set. .\"----------------- .TP .B UNDELETED Matches messages, which do not have the IMAP flag \(rsDeleted set. .\"----------------- .TP .B UNDRAFT Matches messages, which do not have the IMAP flag \(rsDraft set. .\"----------------- .TP .B UNFLAGGED Matches messages, which do not have the IMAP flag \(rsFlagged set. .\"----------------- .TP .BI UNKEYWORD\ keyword Matches messages, which do not have the given IMAP .I keyword flag set .\"----------------- .TP .B UNSEEN Matches messages, which do not have the IMAP flag \(rsSeen set. .\"------------------------------------- .SS DOVEADM SEARCH KEYS Additional search keys, provided by .BR doveadm (1). .\"----------------- .TP .BI SAVEDBEFORE\ date\ specification Matches messages, which were saved before .IR date\ specification . .\"----------------- .TP .BI SAVEDON\ date\ specification Matches messages whose save date matches the given .IR date\ specification . .\"----------------- .TP .BI SAVEDSINCE\ date\ specification Matches messages with a save date matching or after the given .IR date\ specification . .\"------------------------------------------------------------------------ .SH DATE SPECIFICATION .BR doveadm (1) supports a few additional .I date specification formats. They can be used anywhere, where a .I date specification value is obligatory. .TP .IB day \- month \- year Default IMAP4rev1 date format. .br .IR day , the day of month: .BR 1 \- 31 . .br .IR month , the abbreviated month name: .BR Jan , .BR Feb , .BR Mar , .BR Apr , .BR May , .BR Jun , .BR Jul , .BR Aug , .BR Sep , .BR Oct , .BR Nov \ or .BR Dec . .br .IR year , four digits of year, e.g. .BR 2007 . .br For example the \(dq13th of April 2007\(dq will be represented as .BR 13\-Apr\-2007 . . .TP .I interval Combination of a positive integer .I number and a .IR time\ unit . .br Available .I time\ units are: .BR weeks " (abbr: " w ), .BR days " (abbr: " d ), .BR hours " (abbr: " h ), .BR mins " (abbr: " m ") and" .BR secs " (abbr: " s ). .br To match messages from last week, you may specify for example: .BR since\ 1w , .BR since\ 1weeks \ or .BR since\ 7days . . .TP .I Unix timestamp A 10 digit Unix timestamp, seconds since the 1st of January 1970, 00:00:00 UTC. For example the \(dq13th of April 2007\(dq will be represented as .BR 1176418800 . . .TP .I YYYY\-MM\-DD Extended ISO\-8601 calendar date format. For example the \(dq13th of April 2007\(dq will be represented as .BR 2007\-04\-13 . .\"------------------------------------------------------------------------ .SH SIZE .BR doveadm (1) provides also an additional .I size representation format. The following formats can be used anywhere, where a .I size value is obligatory. .TP .I octets The message size in octets, as specified in the IMAP4rev1 specification. .TP .I size The message size in .BR B \ (byte), .BR k \ (kilobyte), .BR M \ (megabyte), .BR G \ (gigabyte)\ or .BR T \ (terabyte). .br To match messages, bigger than 1 megabyte, you may specify for example: .BR larger\ 1M \ or .BR larger\ 1024k . .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-search (1) dovecot-2.2.9/doc/man/doveadm-search.1.in0000644000175000017500000000611212244400443014776 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-SEARCH 1 "2010-11-25" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-search \- Show a list of mailbox GUIDs and message UIDs matching \ given search query. .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR search " [" \-S .IR socket_path "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR search " [" \-S .IR socket_path ] .BI \-A \ search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR search " [" \-S .IR socket_path ] .BI \-u " user search_query" .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B search command is used to find matching messages. .BR doveadm (1) will print the mailbox\(aqs guid and the message\(aqs uid for each match. .br When used with the .B \-A or .BI \-u \ wildcard options, .BR doveadm (1) will print the fields .BR username , .BR mailbox\-guid \ and .B uid for each matching message. .PP In the first form, .BR doveadm (1) will executed the .B search action with the environment of the logged in system user. .PP In the second form, the command will be performed for all users. .PP In the third form, only matching mails of the given .IR user (s) will be searched. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\" --- command specific options --- "/. .PP This command uses by default the output formatter .B flow (without the .IR key = prefix). .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I search_query Show messages matching this search query. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH EXAMPLE This example demonstrates how to search in user bob\(aqs dovecot mailboxes all messages, which contains the word \(dqtodo\(dq in the Subject: header. .PP .nf .B doveadm search \-u bob mailbox dovecot\(rs* subject todo 3a94c928d66ebe4bda04000015811c6a 8 3a94c928d66ebe4bda04000015811c6a 25 3a94c928d66ebe4bda04000015811c6a 45 .fi .PP The search command is mainly useful when used together with .I doveadm\ fetch command. For example to save message bodies of all messages from INBOX that have "todo" in subject, use: .PP .nf .B doveadm search \-u bob mailbox INBOX subject todo | .B while read guid uid; do .B \ \ doveadm fetch \-u bob body mailbox\-guid $guid uid $uid > msg.$uid .B done .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-fetch (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/Makefile.in0000644000175000017500000004757612244477267013537 00000000000000# Makefile.in generated by automake 1.14 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = doc/man DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_man1_MANS) $(dist_man7_MANS) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/dovecot.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 am__installdirs = "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)" man7dir = $(mandir)/man7 NROFF = nroff MANS = $(dist_man1_MANS) $(dist_man7_MANS) $(nodist_man1_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ACLOCAL_AMFLAGS = @ACLOCAL_AMFLAGS@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTH_CFLAGS = @AUTH_CFLAGS@ AUTH_LIBS = @AUTH_LIBS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CDB_LIBS = @CDB_LIBS@ CFLAGS = @CFLAGS@ CLUCENE_CFLAGS = @CLUCENE_CFLAGS@ CLUCENE_LIBS = @CLUCENE_LIBS@ COMPRESS_LIBS = @COMPRESS_LIBS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CRYPT_LIBS = @CRYPT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DICT_LIBS = @DICT_LIBS@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ KRB5CONFIG = @KRB5CONFIG@ KRB5_CFLAGS = @KRB5_CFLAGS@ KRB5_LIBS = @KRB5_LIBS@ LD = @LD@ LDAP_LIBS = @LDAP_LIBS@ LDFLAGS = @LDFLAGS@ LIBCAP = @LIBCAP@ LIBDOVECOT = @LIBDOVECOT@ LIBDOVECOT_COMPRESS = @LIBDOVECOT_COMPRESS@ LIBDOVECOT_DEPS = @LIBDOVECOT_DEPS@ LIBDOVECOT_LDA = @LIBDOVECOT_LDA@ LIBDOVECOT_LOGIN = @LIBDOVECOT_LOGIN@ LIBDOVECOT_SQL = @LIBDOVECOT_SQL@ LIBDOVECOT_STORAGE = @LIBDOVECOT_STORAGE@ LIBDOVECOT_STORAGE_DEPS = @LIBDOVECOT_STORAGE_DEPS@ LIBICONV = @LIBICONV@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBWRAP_LIBS = @LIBWRAP_LIBS@ LINKED_STORAGE_LDADD = @LINKED_STORAGE_LDADD@ LINKED_STORAGE_LIBS = @LINKED_STORAGE_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MODULE_LIBS = @MODULE_LIBS@ MODULE_SUFFIX = @MODULE_SUFFIX@ MYSQL_CFLAGS = @MYSQL_CFLAGS@ MYSQL_CONFIG = @MYSQL_CONFIG@ MYSQL_LIBS = @MYSQL_LIBS@ NM = @NM@ NMEDIT = @NMEDIT@ NOPLUGIN_LDFLAGS = @NOPLUGIN_LDFLAGS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PGSQL_CFLAGS = @PGSQL_CFLAGS@ PGSQL_LIBS = @PGSQL_LIBS@ PG_CONFIG = @PG_CONFIG@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ QUOTA_LIBS = @QUOTA_LIBS@ RANLIB = @RANLIB@ RPCGEN = @RPCGEN@ RUN_TEST = @RUN_TEST@ SED = @SED@ SETTING_FILES = @SETTING_FILES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ SQL_CFLAGS = @SQL_CFLAGS@ SQL_LIBS = @SQL_LIBS@ SSL_CFLAGS = @SSL_CFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VALGRIND = @VALGRIND@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ dict_drivers = @dict_drivers@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mail_storages = @mail_storages@ mailbox_list_drivers = @mailbox_list_drivers@ mandir = @mandir@ mkdir_p = @mkdir_p@ moduledir = @moduledir@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ rundir = @rundir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ sql_drivers = @sql_drivers@ srcdir = @srcdir@ ssldir = @ssldir@ statedir = @statedir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ pkgsysconfdir = $(sysconfdir)/dovecot SUFFIXES = .1.in .1 dist_man1_MANS = \ deliver.1 \ doveadm-config.1 \ doveadm-copy.1 \ doveadm-reload.1 \ doveadm-stop.1 dist_man7_MANS = \ doveadm-search-query.7 nodist_man1_MANS = \ doveadm.1 \ doveadm-altmove.1 \ doveadm-auth.1 \ doveadm-batch.1 \ doveadm-deduplicate.1 \ doveadm-director.1 \ doveadm-dump.1 \ doveadm-exec.1 \ doveadm-expunge.1 \ doveadm-fetch.1 \ doveadm-flags.1 \ doveadm-import.1 \ doveadm-instance.1 \ doveadm-index.1 \ doveadm-force-resync.1 \ doveadm-help.1 \ doveadm-kick.1 \ doveadm-log.1 \ doveadm-mailbox.1 \ doveadm-mount.1 \ doveadm-move.1 \ doveadm-penalty.1 \ doveadm-purge.1 \ doveadm-pw.1 \ doveadm-quota.1 \ doveadm-search.1 \ doveadm-user.1 \ doveadm-who.1 \ doveconf.1 \ dovecot.1 \ dovecot-lda.1 \ dsync.1 man_includefiles = \ $(srcdir)/global-options-formatter.inc \ $(srcdir)/global-options.inc \ $(srcdir)/option-A.inc \ $(srcdir)/option-S-socket.inc \ $(srcdir)/option-u-user.inc \ $(srcdir)/reporting-bugs.inc EXTRA_DIST = \ doveadm.1.in \ doveadm-altmove.1.in \ doveadm-auth.1.in \ doveadm-batch.1.in \ doveadm-deduplicate.1.in \ doveadm-director.1.in \ doveadm-dump.1.in \ doveadm-exec.1.in \ doveadm-expunge.1.in \ doveadm-fetch.1.in \ doveadm-flags.1.in \ doveadm-import.1.in \ doveadm-instance.1.in \ doveadm-index.1.in \ doveadm-force-resync.1.in \ doveadm-help.1.in \ doveadm-kick.1.in \ doveadm-log.1.in \ doveadm-mailbox.1.in \ doveadm-mount.1.in \ doveadm-move.1.in \ doveadm-penalty.1.in \ doveadm-purge.1.in \ doveadm-pw.1.in \ doveadm-quota.1.in \ doveadm-search.1.in \ doveadm-user.1.in \ doveadm-who.1.in \ doveconf.1.in \ dovecot.1.in \ dovecot-lda.1.in \ dsync.1.in \ sed.sh \ $(man_includefiles) CLEANFILES = $(nodist_man1_MANS) all: all-am .SUFFIXES: .SUFFIXES: .1.in .1 $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/man/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign doc/man/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(dist_man1_MANS) $(nodist_man1_MANS) @$(NORMAL_INSTALL) @list1='$(dist_man1_MANS) $(nodist_man1_MANS)'; \ list2=''; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list='$(dist_man1_MANS) $(nodist_man1_MANS)'; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) install-man7: $(dist_man7_MANS) @$(NORMAL_INSTALL) @list1='$(dist_man7_MANS)'; \ list2=''; \ test -n "$(man7dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man7dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man7dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.7[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man7dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man7dir)" || exit $$?; }; \ done; } uninstall-man7: @$(NORMAL_UNINSTALL) @list='$(dist_man7_MANS)'; test -n "$(man7dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man7dir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man7dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-man7 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man1 uninstall-man7 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-man7 install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-man uninstall-man1 uninstall-man7 .1.in.1: $(man_includefiles) Makefile $(SHELL) $(srcdir)/sed.sh $(srcdir) $(rundir) $(pkgsysconfdir) \ $(pkglibexecdir) < $< > $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: dovecot-2.2.9/doc/man/doveadm-force-resync.1.in0000644000175000017500000000366712244400443016144 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-FORCE\-RESYNC 1 "2010-11-25" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-force\-resync \- Repair broken mailboxes .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " force\-resync " [" \-S .IR socket_path "] " mailbox .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " force\-resync " [" \-S .IR socket_path "] " .BI \-A \ mailbox .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " force\-resync " [" \-S .IR socket_path "] " .BI \-u " user mailbox" .\"------------------------------------------------------------------------ .SH DESCRIPTION Under certain circumstances it may happen, that .BR dovecot (1) is unable to automatically solve problems with mailboxes. In such situations the .B force\-resync command may be helpful. It tries to fix all problems. For sdbox and mdbox mailboxes the storage files will be also checked. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I mailbox The name of the mailbox to fix. With mdbox all of the mailboxes are fixed, so you can use for example INBOX as the name. .\"------------------------------------------------------------------------ .SH EXAMPLE Fix bob\(aqs INBOX: .PP .nf .B doveadm force\-resync \-u bob INBOX .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/option-S-socket.inc0000644000175000017500000000046512244400443015123 00000000000000.TP .BI \-S\ socket_path The option\(aqs argument is either an absolute path to a local UNIX domain socket, or a hostname and port .RI ( hostname : port ), in order to connect a remote host via a TCP socket. .sp This allows an administrator to execute .BR doveadm (1) mail commands through the given socket. dovecot-2.2.9/doc/man/doveadm-kick.1.in0000644000175000017500000000754612244400443014466 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-KICK 1 "2010-06-12" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-kick \- Disconnect users by user name and/or IP address .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " kick " [ \-a .IR anvil_socket_path ] .RB [ \-f ] .I user .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " kick " [ \-a .IR anvil_socket_path ] .RB [ \-f ] \fIip\fP[\fB/\fP\fImask\fP] .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " kick " [ \-a .IR anvil_socket_path ] .RB [ \-f ] .I user \fIip\fP[\fB/\fP\fImask\fP] .\"------------------------------------------------------------------------ .SH DESCRIPTION .BR doveadm \(aqs\ kick command is used to disconnect users by .I user name and/or the .I ip address, from which they are connected. .PP In the first form, all users, whose login name matches the .I user argument, will be disconnected. .PP In the second form, all users, connected from the given IP address or network range, will be disconnected. .PP In the last form, only users connected from the given IP address or networks range and a matching login name will be disconnected. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- .TP .BI \-a\ anvil_socket_path This option is used to specify an absolute path to an alternative UNIX domain socket. .sp By default .BR doveadm (1) will use the socket .IR @rundir@/anvil . The socket may be located in another directory, when the default .I base_dir setting was overridden in .IR @pkgsysconfdir@/dovecot.conf . .\"------------------------------------- .TP .B \-f Enforce the disconnect, even when there are multiple .IR user s, from different networks, connected to a single process. This option may be only required when you have configured something like: .sp .nf service imap { ... client_limit = \fI1+n\fP service_count = 0 ... } .fi .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .IR ip [/ mask ] .I ip or .IB ip /\c .I mask is the host or network, from which the users are connected. .\"------------------------------------- .TP .I user Is a .IR user \(aqs login name. Depending on the configuration, a login name may be for example .BR jane " or " john@example.com . It\(aqs also possible to use .RB \(aq * \(aq and .RB \(aq ? \(aq wildcards (e.g. \-u *@example.org). .\"------------------------------------------------------------------------ .SH EXAMPLE If you don\(aqt want to disconnect all users at once, you can check who\(aqs currently logged in. The first example demonstrates how to disconnect all users whose login name is 3 characters long and begins with .BR ba . .sp .nf .B doveadm who \-1 ja* username proto pid ip jane imap 8192 ::1 james imap 8203 2001:db8:543:2::1 .B doveadm kick ba? kicked connections from the following users: bar baz .fi .PP The next example shows how to kick user foo\(aqs connections from 192.0.2.*. .sp .nf .B doveadm who \-1 foo username proto pid ip foo imap 8135 fd95:4eed:38ba::25 foo imap 9112 192.0.2.53 foo imap 8216 192.0.2.111 .B doveadm kick foo 192.0.2.0/24 kicked connections from the following users: foo .B doveadm who f* username # proto (pids) (ips) foo 1 imap (8135) (fd95:4eed:38ba::25) .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-who (1)dovecot-2.2.9/doc/man/doveadm-help.1.in0000644000175000017500000000177512244400443014473 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-HELP 1 "2010-06-22" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-help \- Show information about doveadm commands .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " help .RI [ command ] .\"------------------------------------------------------------------------ .SH DESCRIPTION .br With no .I command argument given, .B doveadm help will print: .TP 4 * the synopsis for the most of the .BR doveadm (1) commands. .TP * groups of commands, e.g. .BR log " or " mailbox . .PP When the name of a .I command (or a group) was given, it will show the man page for that command. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/doveadm-purge.1.in0000644000175000017500000000352312244400443014656 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DOVEADM\-PURGE 1 "2010-11-25" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-purge \- Remove messages with refcount=0 from mdbox files .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " purge " [" \-S .IR socket_path "] " .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " purge " [" \-S .IR socket_path ] .B \-A .\"------------------------------------- .br .BR doveadm " [" \-Dv "] " purge " [" \-S .IR socket_path "] " .BI \-u \ user .\"------------------------------------------------------------------------ .SH DESCRIPTION The .B doveadm purge command is used to remove all messages with refcount=0 from a user\(aqs mail storage. The refcount of a message is decreased to 0 when the user (or some administration utility) has expunged all instances of a message from all mailboxes. .PP In the first form, .BR doveadm (1) will perform the .B purge action for the currently logged in user. .PP In the second form, the command will be executed for all users .PP In the last form, only messages of the given .IR user (s) will be purged. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-expunge (1) .PP See http://wiki2.dovecot.org/MailboxFormat/dbox#Multi\-dbox for some details.dovecot-2.2.9/doc/man/doveadm-mount.1.in0000644000175000017500000000614512244400443014701 00000000000000.\" Copyright (c) 2012 Dovecot authors, see the included COPYING file .TH DOVEADM\-MOUNT 1 "2012-02-16" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-mount \- Manage the list of mountpoints where mails are stored .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .B mount .IR command " [" arguments ] .\"------------------------------------------------------------------------ .SH DESCRIPTION The doveadm .B mount .IR command s can be used to manage the list of mountpoints where mails are stored. This is used mainly for better error handling when a mountpoint isn\(aqt mounted for some reason: .TP 4 * If a mail directory doesn\(aqt exist, it\(aqs autocreated. If the user\(aqs mails aren\(aqt mounted and filesystem permissions still allow the autocreation, the user will see an empty mailbox and later will have to redownload all mails. If the mountpoint is known to be unmounted, Dovecot will simply fail opening any mailboxes. .TP * If dbox alternate storage isn\(aqt mounted and a mail in it is attempted to be accessed, Dovecot normally rebuilds the indexes and notices that all the mails in alt storage are expunged. When the alt storage is mounted back and even if index is again rebuilt, the mails won\(aqt necessarily become visible anymore for IMAP clients. If the mountpoint is known to be unmounted, Dovecot won\(aqt rebuild indexes and lose the mails. .PP Dovecot automatically adds mountpoints to this list at startup. If you don\(aqt want some of the mountpoints added, you can add a wildcard ignore for it. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I path The directory name of a mountpoint .\"------------------------------------- .TP .I state The .I state of a mountpoint. Either .BR online " or " ignore . .\"------------------------------------------------------------------------ .SH COMMANDS .SS mount add .B doveadm mount add .RI [ path " [" state ]] .PP If this command is run without any parameters, doveadm detects all missing mountpoints and adds them (the same way as when Dovecot does at startup). .PP When a mountpoint .I path is given, it\(aqs added as a mountpoint. The .I state can currently be either .RB \(dq online \(dq (default) or .RB \(dq ignore \(dq. The ignore state is mainly useful with path wildcards to add mountpoints that you never want Dovecot to automatically add, such as: .B doveadm mount add '/mnt/*' ignore .\"------------------------------------- .SS mount list .BR doveadm " [" \-Dv " ] [" \-f .IR formatter ] .B mount list .PP This command lists the mountpoints known to Dovecot and their state. .\"------------------------------------- .SS mount remove .BI "doveadm mount remove " path .PP This command removes the specified .I path from the mountpoint list. .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1)dovecot-2.2.9/doc/man/global-options.inc0000644000175000017500000000023412244400443015050 00000000000000.SH OPTIONS Global .BR doveadm (1) .IR options : .TP .B \-D Enables verbosity and debug messages. .TP .B \-v Enables verbosity, including progress counter. dovecot-2.2.9/doc/man/doveadm-fetch.1.in0000644000175000017500000001156412244400443014631 00000000000000.\" Copyright (c) 2010-2012 Dovecot authors, see the included COPYING file .TH DOVEADM\-FETCH 1 "2012-02-13" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-fetch \- Fetch partial/full messages or message information .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR fetch " [" \-S .IR socket_path "] " "fields search_query" .br .\"------------------------------------- .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR fetch " [" \-S .IR socket_path "]" .BI \-A " fields search_query" .br .\"------------------------------------- .BR doveadm " [" \-Dv "] [" \-f .IR formatter ] .BR fetch " [" \-S .IR socket_path "]" .BI \-u " user fields search_query" .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm fetch can be used to fetch messages\(aq contents and metadata. This can be useful for scripts and for debugging. If you want to fetch messages one at a time, see .BR doveadm\-search (1). .IP Important: Please respect your users\(aq privacy. .\"------------------------------------------------------------------------ @INCLUDE:global-options-formatter@ .\" --- command specific options --- "/. .PP This command uses by default the output formatter .BR pager . .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I fields One or more result field names to display, if the .I search_query matches any messages. In order to specify multiple fields, enclose them in single or double quotes. .br Supported .I fields are: .RS .TP .B body .\"----------------- The body of a message. .TP .B date.received Date and time of final delivery, when the message was delivered to a user\(aqs mailbox for the first time. .br The internal date and time of the source message, when the message was copied by the IMAP COPY command. .br The date\-time attribute when present, otherwise the current time, when the message was saved by the IMAP APPEND command. .\"----------------- .TP .B date.saved Date and time when the message was saved to mailbox. .\"----------------- .TP .B date.sent Date and time of the message\(aqs Date: header. .\"----------------- .TP .B flags A message\(aqs IMAP flags, e.g. \(rsSeen .\"----------------- .TP .B guid A message\(aqs globally unique identifier. .\"----------------- .TP .B hdr The header of the message. .\"----------------- .TP .B imap.body IMAP BODY output of the message (see RFC 3501). .\"----------------- .TP .B imap.bodystructure IMAP BODYSTRUCTURE output of the message (see RFC 3501). .\"----------------- .TP .B imap.envelope IMAP ENVELOPE output of the message (see RFC 3501). .\"----------------- .TP .B mailbox Name of the mailbox, in which the message is stored. The name is in UTF\-8. .\"----------------- .TP .B mailbox\-guid The globally unique identifier of the mailbox, in which the message is located. .\"----------------- .TP .B pop3.uidl A message\(aqs unique (POP3) identifier within a mailbox. .\"----------------- .TP .B seq A message\(aqs sequence number in a mailbox. .\"----------------- .TP .B size.physical A message\(aqs physical size. .\"----------------- .TP .B size.virtual A message\(aqs virtual size, computed with CRLF line terminators. .\"----------------- .TP .B text The entire message (header and body). .\"----------------- .TP .B text.utf8 The entire message (header and body) \(em UTF\-8 encoded. .\"----------------- .TP .B uid A message\(aqs unique (IMAP) identifier in a mailbox. .\"----------------- .TP .B user A message owner\(aqs login name. .\"----------------- .RE .\"------------------------------------- .TP .I search_query Fetch messages matching this search query. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH EXAMPLE This example based on the first example from .BR doveadm\-search (1). We are fetching the fields .BR mailbox\ and\ date.sent from user bob\(aqs mailbox with the guid \(dq3a94c928d66ebe4bda04000015811c6a\(dq for the messages with the UIDs .BR 8 ,\ 25 \ and \ 45 . .PP .nf .B doveadm fetch \-u bob \(dqmailbox date.sent\(dq \(rs .B mailbox\-guid 3a94c928d66ebe4bda04000015811c6a uid 8,25,45 mailbox: dovecot/pigeonhole/2.0 date.sent: 2010\-01\-19 01:17:41 (+0100) ^L mailbox: dovecot/pigeonhole/2.0 date.sent: 2010\-01\-28 09:38:49 (+0100) ^L mailbox: dovecot/pigeonhole/2.0 date.sent: 2010\-03\-28 18:41:14 (+0200) ^L .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-search (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/doveadm-move.1.in0000644000175000017500000000756312244400443014512 00000000000000.\" Copyright (c) 2011-2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-MOVE 1 "2013-11-23" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-move \- Move messages matching the given search query into another mailbox .br doveadm\-copy \- Copy messages matching the given search query into another mailbox .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " move " [" \-S .IR socket_path "] " destination .RB [ user .IR source_user "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " move " [" \-S .IR socket_path "] " .BI \-A " destination .RB [ user .IR source_user "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " move " [" \-S .IR socket_path "] " .BI \-u " user destination .RB [ user .IR source_user "] " search_query .\"------------------------------------- .PP .BR doveadm " [" \-Dv "] " copy " [" \-S .IR socket_path "] " "destination .RB [ user .IR source_user "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " copy " [" \-S .IR socket_path "] " .BI \-A " destination .RB [ user .IR source_user "] " search_query .br .\"------------------------------------- .BR doveadm " [" \-Dv "] " copy " [" \-S .IR socket_path "] " .BI \-u " user destination .RB [ user .IR source_user "] " search_query .\"------------------------------------------------------------------------ .SH DESCRIPTION .B doveadm move can be used for moving mails between mailboxes for one or more users. The .I search_query is used to restrict which messages are moved into the .I destination mailbox. .br .B doveadm copy behaves the same as .BR "doveadm move" , except that copied messages will not be expunged after copying. .PP In the first form, .BR doveadm (1) will execute the .BR move / copy action with the environment of the logged in system user. .PP In the second form, .BR doveadm (1) will iterate over all users, found in the configured .IR user_db (s), and move or copy each user\(aqs messages, matching the given .IR search_query , into the user\(aqs .IR destination " mailbox." .PP In the third form, matching mails will be moved or copied only for given .IR user (s). .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\" --- command specific options --- "/. .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I destination The name of the destination mailbox, into which the mails should be moved or copied. The .I destination mailbox must exist, otherwise this command will fail. .\"------------------------------------- .TP .I search_query Move/copy messages matching the given search query. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------- .TP .BI user \ source_user The keyword .B user followed by a valid user name. When this argument is present, .BR doveadm (1) will apply the .I search_query to the .IR source_user "\(aqs " mail_location . .br .B Limitation: Currently the users, specified by .BI \-u " user" and .BI user " source_user," must share the same UID and GID. .\"------------------------------------------------------------------------ .SH EXAMPLE Move jane\(aqs messages \- received in September 2011 \- from her INBOX into her archive. .PP .nf .B doveadm move \-u jane Archive/2011/09 mailbox INBOX BEFORE \(rs .B 2011\-10\-01 SINCE 01\-Sep\-2011 .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/man/dsync.1.in0000644000175000017500000002120012244400443013227 00000000000000.\" Copyright (c) 2010 Dovecot authors, see the included COPYING file .TH DSYNC 1 "2011-01-16" "Dovecot v2.2" "Dovecot" .SH NAME dsync \- Dovecot\(aqs mailbox synchronization utility .\"------------------------------------------------------------------------ .SH SYNOPSIS .B dsync .RI [ options ] .BI mirror\ location2 .\"------------------------------------- .br .B dsync .RI [ options ] .BI backup\ location2 .\"------------------------------------------------------------------------ .SH DESCRIPTION .B dsync is Dovecot\(aqs mailbox synchronization utility. It can be used for several different use cases: Two\-way synchronization of mailboxes in different servers (via .BR ssh (1)), creating backups of mails to a remote server, and convert mailboxes from/to different mailbox formats. .PP The syncing is done as perfectly as possible: an IMAP or a POP3 client shouldn\(aqt be able to notice any differences between the two mailboxes. Two\-way syncing means that it\(aqs safe to do any kind of modifications in both sides, and .B dsync will merge the changes without losing any changes done on either side. This is possible because .B dsync can access Dovecot\(aqs index logs that keep track of changes. It\(aqs of course possible to have conflicts during merging, these are resolved in a safe way. See the .B dsync design document for more information. .PP .B dsync uses the same configuration files as the rest of Dovecot (via doveconf binary). The entire configuration can be changed by giving \-c parameter to another configuration file, or using \-o parameter to override specific settings. When executing a remote .B dsync program it works the same way: it uses its own local configuration. .PP .B dsync can be run completely standalone. It doesn\(aqt require any Dovecot server processes to be running, except when using \-u parameter to do a userdb lookup from auth process. .PP .B dsync can currently sync only one user at a time. If you want to .B dsync all users, you\(aqll need to get a list of all users and execute .B dsync separately for each one. Any errors are written to stderr. .\"------------------------------------------------------------------------ .SH OPTIONS .B dsync recognizes the following command line options: .TP .BI \-c \ config\-file read configuration from the given .IR config\-file . By default .I @pkgsysconfdir@/dovecot.conf will be used. .\"--------------------------------- .BI \-C\ alt_char Specifies an alternative mailbox name character. If source and destination mailbox formats are different, it\(aqs possible that on one side there exists a mailbox name that isn\(aqt valid for the other side. These invalid mailbox names are fixed by replacing such invalid characters with the given .IR alt_char . The default is .RB \(aq _ \(aq. .\"--------------------------------- .TP .B \-D Activates debug messages and makes .B dsync more verbose. .\"--------------------------------- .TP .B \-f Makes .B dsync run in \(dqfull sync\(dq mode rather than \(dqfast sync\(dq mode. In fast sync mode .B dsync might skip syncing a mailbox, if both locations had modified it equally many times (i.e. highest\-modseqs were equal), but with different changes. .\"--------------------------------- .TP .BI \-m\ mailbox Specifies the .I mailbox that should be synchronized or from which mails should be converted. The default is to synchronize all respectively convert from all mailboxes. .\"--------------------------------- .TP .BI \-o\ setting = value Overrides the configuration .I setting from .I @pkgsysconfdir@/dovecot.conf and from the userdb with the given .IR value . In order to override multiple settings, the .B \-o option may be specified multiple times. .\"--------------------------------- .TP .B \-R Reverse backup direction, so mails in .I location2 are backed up to default mail location. .\"--------------------------------- .TP .BI \-u\ user Specifies that the userdb lookup for the given .I user should be done and used to set up the environment (uid, gid, home, etc.). By default the system user\(aqs current environment will be used. .\"--------------------------------- .TP .B \-v Makes .B dsync more verbose. .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I location2 The first mail location is based on configuration .RI ( mail_location " or " userdb " settings). It\(aqs also possible to override it by giving .BI \-o\ mail_location= mail_location setting. This parameter defines the other mail location that is used. .sp If the location is on local filesystem, you can use a regular mail_location, such as maildir:/backup/user/Maildir .sp If the location is on a remote server, .B dsync can ssh to it by giving .I host or .I user@host as the parameter. If user is specified, it\(aqs given as .B \-u parameter to .BR dsync , not to ssh. The ssh username is always the default. .sp The final way to specify a location is to give a full command line or a path to a script that executes the .BR dsync . For example: .sp .nf ssh mailuser@host dsync \-u user .fi .\"------------------------------------------------------------------------ .SH COMMANDS .B dsync provides the following commands: .\"------------------------------------------------------------------------ .SS mirror Does a two\-way synchronization between two mail locations. Changes in both locations are synchronized to the other one, without losing any changes made by either of them. Any potential UID conflicts are resolved by giving them new UIDs. .\"------------------------------------------------------------------------ .SS backup Backup mails from default mail location to .I location2 (or vice versa, if .B \-R parameter is given). No changes are ever done to the source location. Any changes done in destination are discarded. .\"------------------------------------------------------------------------ .SH "EXIT STATUS" .B dsync will exit with one of the following values: .TP 4 .B 0 Synchronization was done perfectly. .TP .B 2 Synchronization was done without errors, but some changes couldn\(aqt be done, so the mailboxes aren\(aqt perfectly synchronized. Running dsync again usually fixes this. Typically this occurs for message modification sequences with newly created mailboxes. It can also occur if one of the mailboxes change during the syncing. .TP .B 1, >2 Synchronization failed. .\"------------------------------------------------------------------------ .SH EXAMPLE .SS MIRRORING Mirror mailboxes to a remote server. Any errors are written to stderr. .PP .RS .nf .B dsync -u username mirror username@example.com .fi .RE .PP If you need more complex parameters to ssh, you can use e.g.: .PP .RS .nf .B dsync \-u username mirror ssh \-i id_dsa.dovecot mailuser@example.com dsync \-u username .fi .RE .\"------------------------------------------------------------------------ .SS CONVERTING Assuming that the .I mail_location setting in .I @pkgsysconfdir@/conf.d/10\-mail.conf is set to: .BR "mail_location = mdbox:~/mdbox" , a logged in system user may convert her/his mails from its Maildir in her/his home directory to the mdbox mailbox format. The user has to execute the command: .PP .RS .nf .B dsync mirror maildir:~/Maildir .fi .RE .PP If you want to do this without any downtime, you can do the conversion one user at a time. Initially: .RS 4 .IP \(bu 4 Configuration uses .B mail_location = maildir:~/Maildir .IP \(bu Set up the possibility of doing per\-user mail location using .I userdb extra fields. .RE .PP Then for each user: .RS 4 .IP 1. 4 Run .I dsync mirror once to do the initial conversion. .IP 2. Run .I dsync mirror again, because the initial conversion could have taken a while and new changes could have occurred during it. This second time only applies changes, so it should be fast. .IP 3. Update mail extra field in userdb to .BR mdbox:~/mdbox . If you\(aqre using auth cache, you need to flush it. .IP 4. Wait for a few seconds and then kill the user\(aqs all existing imap and pop3 sessions (that are still using maildir). .IP 5. Run .I dsync mirror once more to apply final changes that were possibly done. After this there should be no changes to Maildir, because the user\(aqs mail location has been changed and all existing processes using it have been killed. .RE .PP Once all users have been converted, you can set the default .I mail_location to mdbox and remove the per\-user mail locations from .IR userdb . .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-kick (1), .BR doveconf (1), .BR dovecot (1) .\"------------------------------------- .PP Additional resources: .IP "dsync design" http://wiki2.dovecot.org/Design/Dsyncdovecot-2.2.9/doc/man/doveadm-deduplicate.1.in0000644000175000017500000000544212244400443016021 00000000000000.\" Copyright (c) 2013 Dovecot authors, see the included COPYING file .TH DOVEADM\-DEDUPLICATE 1 "2013-08-03" "Dovecot v2.2" "Dovecot" .SH NAME doveadm\-deduplicate \- expunge duplicate messages .\"------------------------------------------------------------------------ .SH SYNOPSIS .BR doveadm " [" \-Dv "] " deduplicate " [" \-u .IR user |\c .BR \-A "] [" \-S .IR socket_path "] ["\c .BR \-m ] .I search_query .\"------------------------------------------------------------------------ .SH DESCRIPTION This command is used to expunge duplicated messages in mailboxes. .B doveadm deduplicate is mainly useful to revert some (more or less) accidental duplication of messages, e.g. after .BR "doveadm copy" " or " "doveadm import" . .BR doveadm (1) will delete the newest duplicated messages from the mailbox and keep the oldest. .br Deduplication across multiple mailboxes is not supported. .\"------------------------------------------------------------------------ @INCLUDE:global-options@ .\"------------------------------------- .PP Command specific .IR options : .\"------------------------------------- @INCLUDE:option-A@ .\"------------------------------------- @INCLUDE:option-S-socket@ .\"------------------------------------- .TP .B \-m if the .B \-m option is given, .BR doveadm (1) will deduplicate by Message\-Id header. By default deduplication will be done by message GUIDs. .\"------------------------------------- @INCLUDE:option-u-user@ .\"------------------------------------------------------------------------ .SH ARGUMENTS .TP .I search_query expunge duplicates found from messages matching the given search query. Typically a search query like \(aq\fBmailbox\fP \fImailbox_name\fP \fBOR mailbox\fP \fIother_box\fP\(aq will be sufficient. See .BR doveadm\-search\-query (7) for details. .\"------------------------------------------------------------------------ .SH EXAMPLE This example shows how to list and expunge duplicate messages from a mailbox. .sp .nf .B doveadm \-f table fetch \-u jane \(aqguid uid\(aq mailbox a_Box | sort guid uid 8aad0f0a30169f4bea620000ca356bad 18751 8aad0f0a30169f4bea620000ca356bad 18756 923e301ab9219b4b4f440000ca356bad 18748 923e301ab9219b4b4f440000ca356bad 18753 \&... .B doveadm deduplicate \-u jane mailbox a_Box .B doveadm \-f table fetch \-u jane \(aqguid uid\(aq mailbox a_Box | sort guid uid 8aad0f0a30169f4bea620000ca356bad 18751 923e301ab9219b4b4f440000ca356bad 18748 a7999e1530739c4bd26d0000ca356bad 18749 \&... .fi .\"------------------------------------------------------------------------ @INCLUDE:reporting-bugs@ .\"------------------------------------------------------------------------ .SH SEE ALSO .BR doveadm (1), .BR doveadm\-copy (1), .BR doveadm\-fetch (1), .BR doveadm\-import (1), .BR doveadm\-search\-query (7)dovecot-2.2.9/doc/securecoding.txt0000644000175000017500000001304312244400443014066 00000000000000Simplicity provides security. The more you have to remember to maintain security the easier it is to forget something. Use Multiple Layers of Security ------------------------------- Input validation is useful to prevent clients from taking too much server resources. Add the restrictions only where it's useful. For example a simple "maximum line length" will limit the length of pretty much all possible client input. Don't rely on input validation. Maybe you missed something. Maybe someone calls your function somewhere else where you didn't originally intend it. Maybe someone makes the input validation less restrictive for some reason. Point is, it's not an excuse to cause a security hole just because input wasn't what you expected it to be. Don't trust memory. If code somewhere overflowed a buffer, don't make it easier to exploit it. For example if you have code: static char staticbuf[100]; .. char stackbuf[100]; strcpy(stackbuf, staticbuf); Just because staticbuf was declared as [100], it doesn't mean it couldn't contain more data. Overflowing static buffers can't be directly exploited, but the strcpy() overflowing stackbuf makes it possible. Always copy data with bounds checking. Prevent Buffer Overflows ------------------------ Avoid writing to buffers directly. Write everything through buffer API (lib/buffer.h) which guarantees protection against buffer overflows. There are various safe string APIs as well (lib/str.h, lib/strfuncs.h). Dovecot also provides a type safe array API (lib/array.h). If you do write to buffers directly, mark the code with /* @UNSAFE */ unless it's _obviously_ safe. Only obviously safe code is calling a function with (buffer, sizeof(buffer)) parameters. If you do _any_ calculations with buffer size, mark it unsafe. Use const with buffers whenever you can. It guarantees that you can't accidentally modify it. Use "char *" only for NUL-terminated strings. Use "unsigned char *" if it's not guaranteed to be NUL-terminated. Avoid free() ------------ Accessing freed memory is the most difficult problem to solve with C code. Only real solution is to use garbage collector, but it's not possible to write a portable GC without radical changes in how you write code. I've added support for Boehm GC, but it doesn't seem to be working very well currently. In any case I'd rather not make it required. There are a few ways to avoid most free() calls however: data stack and memory pools. Data stack works in somewhat similiar way to C's control stack. alloca() is quite near to what it does, but there's one major difference: Stack frames are explicitly defined, so functions can return values allocated from data stack. t_strdup_printf() call is an excellent example of why this is useful. Rather than creating some arbitrary sized buffer and using snprintf() which may truncate the value, you can just use t_strdup_printf() without worrying about buffer sizes being large enough. Try to keep the allocations from data stack small, since the data stack's highest memory usage size is kept for the rest of the process's lifetime. The initial data stack size is 32kB and it should be enough in normal use. See lib/data-stack.h. Memory pools are useful when you have to construct an object from multiple pieces and you can free it all at once. Actually Dovecot's Memory Pool API is just an abstract class for allocating memory. There's system_pool for allocating memory with calloc(), realloc() and free() and you can create a pool to allocate memory from data stack. If your function needs to allocate memory for multiple objects, you may want to take struct pool as parameter to allow caller to specify where the memory is allocated from. See lib/mempool.h Deinitialize safely ------------------- Whenever you free a pointer, set it to NULL. That way if you accidentally try to free it again, it's less likely to cause a security hole. Dovecot does this automatically with most of its free() calls, but you should also make it a habit of making all your _destroy() functions take a pointer-to-pointer parameter which you set to NULL. Don't Keep Secrets ------------------ We don't do anything special to protect ourself against read access buffer overflows, so don't store anything sensitive in memory. We use multiple processes to protect sensitive information between users. When dealing with passwords and such, erase them from memory after you don't need it anymore. Note that such memset() may be optimized away by compiler, use safe_memset(). Use GCC Extensions ------------------ GCC makes it easy to catch some potential errors: Format string vulnerabilities can be prevented by marking all functions using format strings with __attr_format__() and __attr_format_arg__() macros and using -Wformat=2 GCC option. -W option checks that you don't compare signed and unsigned variables. I hope GCC will later emit a warning whenever there's potential integer truncation. -Wconversion kind of does that, but it's not really meant for it and it gives too many other useless warnings. Use union Safely ---------------- Suppose there was code: union { unsigned int number; char *str; } u; If it was possible for user to set number arbitrarily, but access the union as string it'd be possible to read or write arbitrary memory locations. There's two ways to handle this. First would be to avoid union entirely and use a struct instead. You don't really need the extra few bytes of memory that union saves. Another way is to access the union only through macro that verifies that you're accessing it correctly. See IMAP_ARG_*() macros in lib-imap/imap-parser.h. dovecot-2.2.9/doc/Makefile.am0000644000175000017500000000045012244400443012705 00000000000000if BUILD_DOCS DOCDIRS = wiki example-config endif SUBDIRS = man $(DOCDIRS) docfiles = \ documentation.txt \ securecoding.txt \ thread-refs.txt \ mkcert.sh \ dovecot-openssl.cnf \ solr-schema.xml if BUILD_DOCS doc_DATA = $(docfiles) endif EXTRA_DIST = \ dovecot-initd.sh \ $(docfiles) dovecot-2.2.9/doc/solr-schema.xml0000644000175000017500000000561112244400443013614 00000000000000 id dovecot-2.2.9/doc/wiki/0002755000175000017500000000000012244505324011703 500000000000000dovecot-2.2.9/doc/wiki/Upgrading.1.2.txt0000644000175000017500000000510412244263664014611 00000000000000Upgrading Dovecot v1.1 to v1.2 ============================== * Relative home directory paths are giving errors now. They were never supported, but earlier they just didn't usually cause problems. * If you were using e.g. 'mail_location = maildir:/var/mail/%h', just change it to 'mail_location = maildir:%h' and add '/var/mail/' prefix to home dirs. * To get absolute home dir from relative path in LDAP, use something like: 'user_attrs = .., homeDirectory=home=/var/mail/%$' * SQL dictionary (quota, expire plugin) configuration file is different than in v1.1. See 'doc/dovecot-dict-sql-example.conf' or for the new format. * When creating files or directories to mailboxes, Dovecot now uses the mailbox directory's permissions and GID for them. Previous versions simply used 0600 mode always, so you should check the directories' permissions to make sure they're strict enough. For backwards compatibility 'dovecot-shared' file's permissions still override these with Maildir. Authentication: * system_user [UserDatabase.ExtraFields.txt] was renamed to system_groups_user to better describe its functionality. Settings: * Renamed 'ssl_disable=yes' to 'ssl=no'. * Renamed 'auth_ntlm_use_winbind' to 'auth_use_winbind', which also determines if GSS-SPNEGO is handled by GSSAPI or winbind. * Removed 'login_greeting_capability'. The capabilities are now always sent (LEMONADE [http://www.lemonadeformobiles.com/] requires this and it's not that much extra traffic). * Removed 'auth_worker_max_request_count'. It was useful only with PAM, so it can now be specified in 'passdb pam { args = max_requests=n } '. The default is 100. * Removed 'umask'. It wasn't really used anywhere anymore. ACL: * The global ACL file overrides per-mailbox ACL file. Sieve: * You should consider [Pigeonhole.Sieve.Configuration.txt] (see the link for instructions). [Pigeonhole.ManageSieve.txt]: * The 'sieve=' and 'sieve_storage=' settings need to be placed in the ' plugin {}' section now and 'sieve_storage=' needs to be renamed to 'sieve_dir='. This removes the duplication of these values with respect to the [Pigeonhole.Sieve.txt] for [LDA.txt]. So, since you are using the Sieve plugin, these settings should already be there and all that needs to be done is remove the 'sieve=' and 'sieve_storage=' settings from the ' protocol managesieve {} ' section. (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/Migration.UW.txt0000644000175000017500000001253312244263650014654 00000000000000UW-IMAP ======= *WARNING: Badly done migration will cause your IMAP and/or POP3 clients to re-download all mails. Read page first carefully.* Namespaces ---------- By default UW-IMAP allows access to whole home directory. Since the home directory may contain many other files as well, many people have chosen to store their mails in the 'mail/' directory. This usually means that IMAP clients have been configured to use 'mail/' as their "IMAP namespace prefix" (the clients use different names for this). This doesn't work with Dovecot, because Dovecot shows clients only the 'mail/' directory instead of the whole home directory. So if the IMAP namespace was kept as 'mail/', Dovecot would try to access the '~/mail/mail/' directory. There are three ways to fix this: 1. Remove the IMAP namespace prefix from the clients. 2. Use namespaces to allow users to keep using the prefix. See "Backwards Compatibility" in for an example configuration. 3. Configure Dovecot to use home directory ('mail_location = mbox:~/:INBOX=/var/mail/%u' and set 'full_filesystem_access=yes'). The latter is needed to make '~/mail' and '~user/mail' prefixes work. A typical mailbox location setting is: ---%<------------------------------------------------------------------------- mail_location = mbox:~/mail:INBOX=/var/mail/%u ---%<------------------------------------------------------------------------- ~/mbox file ----------- If a '~/mbox' file exists, UW-IMAP moves all the mails from '/var/mail/user' into the '~/mbox' file. Currently Dovecot doesn't support this feature. There are two possibilities to handle this: * Move everyone's mails to '~/mbox' and reconfigure your to deliver new mails there by default. * Move the existing mails from '~/mbox' back to '/var/mail/user'. Subscriptions ------------- UW-IMAP keeps the list of subscribed mailboxes in '~/.mailboxlist' file, while Dovecot keeps them in '~/mail/.subscriptions' file. UW-IMAP's subscriptions also contain the mailboxes with their prefixes, for example: ---%<------------------------------------------------------------------------- mail/box ~/mail/box2 ~user/mail/box3 ---%<------------------------------------------------------------------------- * If you removed the prefix from the IMAP clients, you'll also have to remove these prefixes. * You can use script to copy all the users' '.mailboxlist' files to '.subscription' files (without any prefix removal). * It's possible to keep using the '.mailboxlist' filename (as long as it's in the same directory) by modifying 'SUBSCRIPTION_FILE_NAME' define in 'src/lib-storage/index/mbox/mbox-storage.h' UIDs, flags and keywords ------------------------ Dovecot uses UW-IMAP compatible metadata headers in mboxes, so it's possible to migrate back and forth without losing any flags, keywords or cause IMAP UIDs to change. Locking ------- UW-IMAP uses dotlock + flock() as the default locking combination, while Dovecot uses dotlock + fcntl() by default. See to determine what are the correct lock settings for you. To use UW-IMAP compatible locking, use: ---%<------------------------------------------------------------------------- mbox_read_locks = flock mbox_write_locks = dotlock flock ---%<------------------------------------------------------------------------- DON'T DELETE THIS MESSAGE -- FOLDER INTERNAL DATA ------------------------------------------------- The first message in a mbox file may contain the subject "DON'T DELETE THIS MESSAGE -- FOLDER INTERNAL DATA". This system message has information about the mailbox and its state. Dovecot v1.0 uses these system messages. Keep them. Maildir conversion ------------------ If you're planning on migrating to Dovecot, you might also want to switch to Maildir format as well. However it might be easier to first migrate from UW-IMAP + mbox to Dovecot + mbox, and only then migrate the users to Maildir format. * http://people.redhat.com/rkeech/maildir-migration.txt describes how to migrate from UW-IMAP+mbox to Dovecot v0.99 + Maildir. Note that Dovecot v0.99 has slightly different configuration file settings. * has some migration scripts * Here is a tool ([attachment:uw2dovecot.pl]) that will convert mbox, mbx, and mix formatted UW-IMAP folders to Maildir/dovecot format. UW-POP3 (UW-IMAP's POP3 wrapper, ipop3d) ======================================== By default Dovecot generates POP3 UIDLs differently than UW-POP3, which causes POP3 clients to redownload them as new messages. You can avoid this by setting: ---%<------------------------------------------------------------------------- pop3_uidl_format = %08Xv%08Xu ---%<------------------------------------------------------------------------- To utilize the UW login conversion to lowecase, we recommend that you use the following option: ---%<------------------------------------------------------------------------- auth_username_format = %Lu ---%<------------------------------------------------------------------------- You can confirm that the old and the new UIDLs match: ---%<------------------------------------------------------------------------- telnet localhost 110 user test pass test uidl quit ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Upgrading.2.1.txt0000644000175000017500000000357112244263664014617 00000000000000Upgrading Dovecot v2.0 to v2.1 ============================== v2.1 is mostly compatible with v2.0 configuration, except: * 15-mailboxes.conf included in the default configuration now specifies a few default SPECIAL-USE [http://tools.ietf.org/html/rfc6154] mailboxes. This file assumes that you already have 'namespace inbox { .. } ' specified (in 10-mail.conf). If you don't, you'll get errors about namespaces. Note that the namespace's name must be "inbox" (as well as usually include inbox=yes setting). The solution is to either make sure that you have such a namespace defined, or you can simply delete the 15-mailboxes.conf if you don't care about SPECIAL-USE. * now use UTF-8 mailbox names rather than mUTF-7: acl, autocreate, expire, trash, virtual * Usernames in authentication are now lowercased by default. * Non-lowercase usernames in password/user database result in "unknown user" errors * To allow mixed case usernames again, set 'auth_username_format=' (i.e. to empty) * [Plugins.FTS.Solr.txt] full text search backend changed to use mailbox GUIDs instead of mailbox names, requiring reindexing everything. solr_old backend can be used with old indexes to avoid reindexing, but it doesn't support some newer features. * [Plugins.Expire.txt]: Only go through users listed by userdb iteration. Delete dict rows for nonexistent users, unless expire_keep_nonexistent_users=yes. * [Tools.Dsync.txt] was merged into doveadm. There is still "dsync" symlink pointing to "doveadm", which you can use the old way for now. The preferred ways to run dsync are "doveadm sync" (for old "dsync mirror") and "doveadm backup". * dsync protocol isn't compatible with v2.0's dsync, so you can't dsync between v2.0 and v2.1 servers. (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/Upgrading.2.2.txt0000644000175000017500000000425312244263664014616 00000000000000Upgrading Dovecot v2.1 to v2.2 ============================== v2.2 has a couple of changes to settings since v2.1: * doveadm_proxy_port setting renamed to doveadm_port (but the old exists still as an alias) * imapc_ssl_ca_dir and pop3c_ssl_ca_dir settings replaced by a common ssl_client_ca_dir There are also some changes you should be aware of: * fts-solr no longer does "hard commits" to the Solr index for performance reasons. [Plugins.FTS.Solr.txt]. * When creating home directories, the permissions are copied from the parent directory if it has setgid-bit set. For full details, see . * "doveadm auth" command was renamed to "doveadm auth test" * IMAP: ID command now advertises server name as Dovecot by default. It was already trivial to guess this from command replies. * LDA/LMTP: If saving a mail brings user from under quota to over quota, allow it based on quota_grace setting (default: 10% above quota limit). * pop3_lock_session=yes now uses a POP3-only dovecot-pop3-session.lock file instead of actually locking the mailbox (and causing IMAP/LDA/LMTP to wait for the POP3 session to close). * mail_shared_explicit_inbox setting's default switched to "no". * dsync isn't compatible with v2.1 protocol. (The new protocol will be compatible with future Dovecot versions.) * autocreate plugin is being deprecated and it will log warnings. Convert the configuration to instead. Downgrading can be done fully safely to v2.1.16. * v2.1.16 adds support for "attribute changes", which are used by URLAUTH command and dsync with ACLs and/or Sieve scripts. If none of these features are used, you can downgrade safely to v2.1.11. * The error message for these attribute changes is: 'Log synchronization error at seq=..,offset=.. for .../dbox-Mails/dovecot.index: Unknown transaction record type 0x0' * v2.1.11 adds support for cache file changes. Older versions may think that the 'dovecot.index.cache' files are corrupted and complain about "Invalid magic in hole header". (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/UserIds.txt0000644000175000017500000001456412244263664013762 00000000000000System users used by Dovecot ============================ Dovecot typically requires 3 or more system users: * /root/: Dovecot is started as root. * /dovenull/: Dovecot uses an unprivileged /dovenull/ user for untrusted login processes. * /dovecot/: Dovecot uses an unprivileged /dovecot/ user for internal processes. * auth user: Password and user database lookups are done as auth user. * mail user(s): Mails are accessed using yet another user. The mail user should not be /dovecot/ user. Using multiple users allows privilege separation, which makes it harder for attackers to compromise the whole system if a security hole is found from one component. However, if you really want to run everything under a single user, [HowTo.Rootless.txt]. Dovenull user ------------- /dovenull/ user is used internally for processing users' logins. It shouldn't have access to any files, authentication databases or anything else either. It should belong to its own private *dovenull* group where no one else belongs to, and which doesn't have access to any files either (other than what Dovecot internally creates). You can change the default /dovenull/ user to something else from 'default_login_user' setting. Dovecot user ------------ /dovecot/ user is used internally for unprivileged Dovecot processes. It should belong to its own private /dovecot/ group. Mail files are not accessed as /dovecot/ user, so you shouldn't give it access to mails. You can change the default /dovecot/ user to something else from 'default_internal_user' setting. Mail users ---------- You can use one or more system users for accessing users' mails. Most configurations can be placed to two categories: 1. [SystemUsers.txt] where each Dovecot user has their own system user in '/etc/passwd'. For system user setups you generally don't have to worry about UIDs or GIDs, they are returned by the [AuthDatabase.Passwd.txt] lookup. 2. [VirtualUsers.txt] where all Dovecot users run under a single system user. Typically you'd set this with 'mail_uid' setting (e.g. 'mail_uid=vmail'). Note that you most likely don't want the [UserDatabase.txt] to return any UID/GID, as they override the 'mail_uid' setting. However it's possible to use a setup that is anything between these two. For example use a separate system user for each domain. See below for more information about how UIDs can be used. UIDs ---- Dovecot's [UserDatabase.txt] configuration calls system users UIDs. There are a few things you should know about them: * Although UID normally means a numeric ID (as specified by '/etc/passwd'), it's anyway possible to use names as UID values and let Dovecot do the lookup (eg. uid=vmail). However depending on where you used it, it may slow down the authentication. * The UIDs don't really have to exist in '/etc/passwd' (the kernel doesn't care about that). For example you could decide to use UIDs 10000-59999 for 50000 virtual Dovecot users. You'll then just have to be careful that the UIDs aren't used unintentionally elsewhere. * The important thing to consider with your UID allocation policy is that if Dovecot has a security hole in its IMAP or POP3 implementation, the attacker can read mails of other people who are using the same UID. So clearly the most secure way is to allocate a different UID for each user. It can however be a bit of a pain and OSes don't always support more than 65536 UIDs. * By default Dovecot allows users to log in only with UID numbers 500 and above. This check tries to make sure that no-one can ever log in as daemons or other system users. If you're using an UID lower than 500, you'll need to change the 'first_valid_uid' setting. GIDs ---- System groups (GIDs) work very much the same way as UIDs described above: You can use names instead of numbers for GID values, and the used GIDs don't have to exist in '/etc/group'. System groups are useful for sharing mailboxes between users that have different UIDs but belong to a same group. Currently Dovecot doesn't try to do anything special with the groups, so if you're not sure how you should create them, you might as well place all the users into a single group or create a separate group for each user. If you use multiple UIDs and you wish to create [SharedMailboxes.txt], setting up the groups properly may make your configuration more secure. For example if you have two teams and their mailboxes are shared only to their team members, you could create a group for each team and set the shared mailbox's group to the team's group and permissions to 0660, so neither team can even accidentally see each others' shared mailboxes. Currently Dovecot supports specifying only the primary group, but if your userdb returns 'system_user' [UserDatabase.ExtraFields.txt], the non-primary groups are taken from '/etc/group' for that user. In a future version the whole GID list will be configurable without help from '/etc/group'. It's also possible to give all the users access to extra groups with 'mail_access_groups' setting. Authentication process user --------------------------- Depending on passdb and userdb configuration, the lookups are done either by auth process or auth worker process. They have different default users: ---%<------------------------------------------------------------------------- service auth { user = $default_internal_user } service auth-worker { user = root } ---%<------------------------------------------------------------------------- The user must have access to your [PasswordDatabase.txt] and [UserDatabase.txt]. It's not used for anything else. The default is to use root, because it's guaranteed to have access to all the password databases. If you don't need this, you should change it to $default_internal_user. [PasswordDatabase.PAM.txt] and [PasswordDatabase.Shadow.txt] passdbs are usually configured to read '/etc/shadow' file. Even this doesn't need root access if the file is readable by shadow group: ---%<------------------------------------------------------------------------- service auth-worker { user = $default_internal_user group = shadow } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/VirtualUsers.txt0000644000175000017500000001312512244263664015044 00000000000000Virtual Users ============= There are many ways to configure Dovecot to use virtual users. If you have no idea how you want your users to be configured, select some [HowTo.txt] and follow its instructions. Users are often categorized as being either system users (in '/etc/passwd') or virtual users (not in '/etc/passwd'). However from Dovecot's point of view there isn't much of a difference between them. If a [AuthDatabase.Passwd.txt] lookup and a [AuthDatabase.SQL.txt] lookup return the same [UserDatabase.txt] information, Dovecot's behavior is identical. Password and user databases --------------------------- Dovecot supports many different [PasswordDatabase.txt] and [UserDatabase.txt]. With virtual users the most commonly used ones are [AuthDatabase.LDAP.txt], [AuthDatabase.SQL.txt] and [AuthDatabase.PasswdFile.txt]. The databases usually contain the following information: * Username * Password * UNIX User ID (UID) and primary UNIX Group ID (GID) * Home directory and/or mail location Usernames and domains --------------------- Dovecot doesn't care much about domains in usernames. IMAP and POP3 protocols currently have no concept of "domain", so the username is just something that shows up in your logs and maybe in some configuration, but they have no direct functionality. So although Dovecot makes it easier to handle "user@domain" style usernames (eg. %n and %d [Variables.txt]), nothing breaks if you use for example "domain%user" style usernames instead. However some [Authentication.Mechanisms.txt] do have an explicit support for realms (pretty much the same as domains). If those mechanisms are used, the username is changed to be "user@realm". And of course there's no need to have domains at all in the usernames. Passwords --------- The password can be in [Authentication.PasswordSchemes.txt], but you need to tell the format to Dovecot because it won't try to guess it. The SQL and LDAP configuration files have the 'default_pass_scheme' setting for this. If you have passwords in multiple formats, or the passdb doesn't have such a setting, you'll need to prefix each password with "{}", for example "{PLAIN}plaintext-password" or "{PLAIN-MD5}1a1dc91c907325c69271ddf0c944bc72". UNIX UIDs --------- The most important thing you need to understand is that *Dovecot doesn't access the users' mails as the /dovecot/ user*! So *don't* put /dovecot/ into the /mail/ group, and don't make mails owned by the /dovecot/ user. That will only make your Dovecot installation less secure. So, if not the /dovecot/ user, what then? You can decide that yourself. You can create, for example, one /vmail/ user which owns all the mails, or you can assign a separate UID for each user. See [UserIds.txt] for more information about different ways to allocate UIDs for users. UNIX GIDs --------- Unless you're using [SharedMailboxes.txt] and multiple UIDs, it doesn't really matter what GIDs you use. You can, for example, use a single GID for all users, or create a separate GID for each user. See [UserIds.txt] for more information. Home directories ---------------- Some people are opposed to the idea of virtual users having home directories, but no matter what you call it, it's a good idea to have a directory where user-specific configuration and other state is stored. See more information. Mail location ------------- The userdb can return the 'mail' [UserDatabase.txt] to override the default 'mail_location' setting. Usually you shouldn't need this. Examples -------- Dynamic passwd-file locations ----------------------------- ---%<------------------------------------------------------------------------- mail_location = maildir:/home/%d/%n/Maildir passdb { driver = passwd-file args = username_format=%n /home/%d/etc/shadow } userdb { driver = passwd-file args = username_format=%n /home/%d/etc/passwd } ---%<------------------------------------------------------------------------- In the above examples users are expected to log in as "user@domain". Their mail is kept in their home directory at '/home///Maildir'. The usernames in the passwd and shadow files are expected to contain only the user part, no domain. This is because the path itself already contained %d to specify the domain. If you want the files to contain full user@domain names, you can use 'username_format=%u'. static userdb ------------- Many people store only usernames and passwords in their database and don't want to deal with UIDs or GIDs. In that case the easiest way to get Dovecot running is to use the [UserDatabase.Static.txt]: ---%<------------------------------------------------------------------------- mail_location = maildir:~/Maildir passdb { driver = pam } userdb { driver = static args = uid=vmail gid=vmail home=/var/mail/virtual/%d/%n } ---%<------------------------------------------------------------------------- This makes Dovecot look up the mails from '/var/mail/virtual///Maildir/' directory, which should be owned by vmail user and vmail group. Existing virtual user management software ----------------------------------------- * VPopMail [http://www.inter7.com/vpopmail/]: Dovecot [AuthDatabase.VPopMail.txt]. * Linuxconf [http://www.solucorp.qc.ca/linuxconf/]: See * Also see the page (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/SocketUnavailable.txt0000644000175000017500000000615212244263661015767 00000000000000UNIX Socket Resource Temporarily Unavailable ============================================ Commonly visible as: ---%<------------------------------------------------------------------------- imap-login: Error: net_connect_unix(imap) failed: Resource temporarily unavailable ---%<------------------------------------------------------------------------- This means that there are more imap-login processes trying to connect to the "imap" UNIX socket than there are imap processes accepting the connections. The kernel's connection listener queue got full and it started rejecting further connections. So what can be done about it? Dovecot v2.2 bug ---------------- Dovecot v2.2.0 - v2.2.6 were attempting to optimize host.domain lookups by doing them only once in the master process. Unfortunately they were actually doing the lookup every time when creating a new process. In some configuration this lookup could have done a somewhat slow DNS lookup, causing the process creation to become very slow and triggering this message. The fix is in v2.2.7 and you can also workaround this: * Add to dovecof.conf: 'import_environment = TZ DEBUG_OUTOFMEM DOVECOT_HOSTDOMAIN' * Before "dovecot" binary is started, run: 'export DOVECOT_HOSTDOMAIN=mailserver.example.com' (of course changing the value) Wrong service settings ---------------------- This can happen if 'service imap { client_limit } ' is set to anything else than 1. IMAP (and POP3 and other mail) processes do disk IO, lock waiting and such, so if all the available imap processes are stuck waiting on something, they can't accept new connections and they queue up in the kernel. For mail processes only 'client_limit=1' is recommended, except maybe for very tiny systems with a few users. Master process busy ------------------- Dovecot master process forks all of the new processes. If it's using 100% CPU, it doesn't have time to fork enough new processes. Even if it's not constantly using 100% CPU there may be fork bursts where it temporarily gets too busy. The solution is to make it do less work by forking less processes: * Most importantly switch to [LoginProcess.txt]. This alone might be enough. * You can also switch (most of the) other commonly forked processes to be reused. For example 'service imap { service_count = 100 } ' reuses the imap process for 100 different IMAP connections before it dies. This is useful mainly for imap, pop3 and managesieve services. It's better to avoid using 'service_count=0' (unlimited) in case there are memory leaks. * You can pre-fork some idling processes to handle bursts with 'service { process_min_avail }'. See before changing any service settings. Some services require specific values to work correctly. Listener queue size ------------------- Dovecot uses the ' service { client_limit } '*' service { process_limit } ' as the listener queue size, but uses a hardcoded upper limit of 511. Most OSes use an even lower limit, typically 128. In Linux you can increase this from '/proc/sys/net/core/somaxconn'. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/AuthDatabase.CheckPassword.txt0000644000175000017500000002240412244263637017461 00000000000000CheckPassword ============= Checkpassword is an authentication interface originally implemented by qmail [http://www.qmail.org/]. Checkpassword combines both the [PasswordDatabase.txt] and [UserDatabase.txt] lookups into a single checkpassword lookup, which makes the standard implementation unsuitable for a standalone userdb. With Dovecot extensions it's also possible to use checkpassword as a userdb. Typically you'll use [UserDatabase.Prefetch.txt] as the userdb, but it's not required that you use the checkpassword script's userdb capabilities. You can still use for example [UserDatabase.Static.txt] if you're using only a single UID and GID, and your home directory fits into a template. Security -------- The standard checkpassword design is incompatible with Dovecot's security model. If the system has local users and the checkpassword script setuid()s into a local user, the user is able to ptrace into the communication and change the authentication results. This is of course undesirable, so v2.2.7+ will just refuse to run in such environments by default. The possibilities to solve this are: 1. If possible, change the checkpassword to return 'userdb_uid' and 'userdb_gid' extra fields instead of using 'setuid()' and 'setgid()'. This also improves the performance. 2. If you can't change the script, you can make Dovecot's 'checkpassword-reply' binary setuid or setgid (e.g.'chgrp dovecot /usr/local/libexec/dovecot/checkpassword-reply; chmod g+s /usr/local/libexec/dovecot/checkpassword-reply') 3. If you don't have any untrusted local users and you just don't care about this check, you can set 'INSECURE_SETUID=1' environment e.g. with a wrapper checkpassword script. Deliver ------- If your checkpassword script doesn't support Dovecot extensions, you can't use it as a user database. This means that if you wish to use , you can't use the '-d' parameter to do userdb lookups. There are two ways to solve this: 1. Use another userdb which does the lookup for deliver, for example [AuthDatabase.SQL.txt] or [UserDatabase.Static.txt]. Add this userdb after the prefetch userdb. 2. Use a script to look up the user's home directory and run deliver without '-d' parameter. For example: ---%<------------------------------------------------------------------------- #!/bin/sh # <> # If users have different UIDs/GIDs, make sure to also change this process's UID and GID. # If you want to override any settings, use dovecot-lda's -o parameter # (e.g. dovecot-lda -o mail_location=maildir:~/Maildir). export HOME exec /usr/local/libexec/dovecot/dovecot-lda ---%<------------------------------------------------------------------------- Checkpassword Interface ----------------------- The interface is specified in http://cr.yp.to/checkpwd/interface.html. However here's a quick tutorial for writing a script: * Read ' NUL NUL' from fd 3. * Verify the username and password. * If the authentication fails, exit with code 1. This makes Dovecot give "Authentication failed" error to user. * This error is returned both for password mismatch and also if the user doesn't exist at all. Internally Dovecot maps this as password mismatch. * If you encounter an internal error, exit with code 111. This makes Dovecot give "Temporary authentication failure" error to user. * If the authentication succeeds, you'll need to: * Set user's home directory to '$HOME' environment. This isn't required, [VirtualUsers.txt]. * Set '$USER' environment variable. If the user name was changed (eg. if you lowercased "Username" to "username"), you can tell about it to Dovecot by setting '$USER' to the changed user name. * Return the user's [UserIds.txt] using 'userdb_uid' and 'userdb_gid' environments and add them to the 'EXTRA' environment (see below for Dovecot extensions). * This is recommended over actually changing the UID/GID using setuid()/setgid() as specified by the standard checkpassword interface, because it's [AuthDatabase.CheckPassword.txt]. * Your program received a path to 'checkpassword-reply' binary as the first parameter. Execute it. Qmail-LDAP ---------- Note that auth_imap that comes with qmail-ldap is not compatible with this interface. You can get a patch that adds auth_dovecot functionality to qmail-ldap here [http://japc.uncovering.org/dovecot/qmail-ldap-1.03-20060201-dovecot.patch]. Or you can use auth_pop instead, but you may need to pass /aliasempty/ to let auth_pop find the Maildir, so it is recommended to write a /var/qmail/bin/auth_dovecot wrapper (don't forget to chmod +x it) around auth_pop. ---%<------------------------------------------------------------------------- #!/bin/sh QMAIL="/var/qmail" if [ -e $QMAIL/control/defaultdelivery ]; then ALIASEMPTY=`head -n 1 $QMAIL/control/defaultdelivery 2> /dev/null` else ALIASEMPTY=`head -n 1 $QMAIL/control/aliasempty 2> /dev/null` fi ALIASEMPTY=${ALIASEMPTY:-"./Maildir/"} exec $QMAIL/bin/auth_pop "$@" $ALIASEMPTY ---%<------------------------------------------------------------------------- you can also use this wrapper to pass LOGLEVEL environmental variable to auth_pop. Dovecot Extensions ------------------ If you wish to return [PasswordDatabase.ExtraFields.txt] for Dovecot, set them in environment variables and then list them in EXTRA environment variable. The [UserDatabase.ExtraFields.txt] can be returned by prefixing them with 'userdb_'. For example: ---%<------------------------------------------------------------------------- userdb_quota_rule=*:storage=10000 userdb_mail=mbox:$HOME/mboxes EXTRA=userdb_quota_rule userdb_mail ---%<------------------------------------------------------------------------- Dovecot also sets some environment variables that the script may use: * 'SERVICE': contains eg. imap, pop3 or smtp * 'TCPLOCALIP' and 'TCPREMOTEIP': Client socket's IP addresses if available * 'MASTER_USER': If master login is attempted. This means that the password contains the master user's password and the normal username contains the user who master wants to log in as. * 'AUTH_*': All of the [Variables.txt] are available as 'AUTH_' extra fields. For example '%{cert}' is in 'AUTH_CERT'. (v2.0.16+) Checkpassword as userdb ----------------------- Dovecot calls the script with 'AUTHORIZED=1' environment set when performing a userdb lookup. The script must acknowledge this by changing the environment to 'AUTHORIZED=2', otherwise the lookup fails. Other than that, the script works the same way as a passdb checkpassword script. If user doesn't exist, use exit code 3. Checkpassword with passdb lookups (v2.1.2+) ------------------------------------------- Normally checkpassword answers to questions "is user X's password Y?" This doesn't work with non-plaintext auth mechanisms, or when Dovecot wants to do a non-authenticating passdb lookup (e.g. for LMTP proxy). These passdb credentials lookups can be implemented the same way as a userdb lookup (i.e. change the 'AUTHORIZED' environment). * 'AUTHORIZED=1' is set, just like for userdb lookup * When doing a non-plaintext authentication: * 'CREDENTIALS_LOOKUP=1' environment is set * The password scheme that Dovecot wants is available in 'SCHEME' environment (e.g.'SCHEME=CRAM-MD5') * If a password is returned, it must be returned as 'password={SCHEME}secret'. * When doing a passdb lookup, e.g. a proxy which doesn't really want the password, just the passdb extra fields: * Neither 'CREDENTIALS_LOOKUP' nor 'SCHEME' is set. * FIXME: Unfortunately it looks like you currently can't easily differentiate a passdb lookup from userdb lookup! * If user doesn't exist, use exit code 3. * If you get an error about checkpassword exiting with code 0, you didn't execute the 'checkpassword-reply' binary as you should have (which exits with code 2 on success) Example ------- The standard way: ---%<------------------------------------------------------------------------- passdb { driver = checkpassword args = /usr/bin/checkpassword } userdb { driver = prefetch } # If you want to use deliver -d and your users are in SQL: userdb { driver = sql args = /etc/dovecot/dovecot-sql.conf.ext } ---%<------------------------------------------------------------------------- Using checkpassword only to verify the password: ---%<------------------------------------------------------------------------- passdb { driver = checkpassword args = /usr/bin/checkpassword } userdb { driver = static args = uid=vmail gid=vmail home=/home/%u } ---%<------------------------------------------------------------------------- Performance ----------- The backend is not suited for heavy traffic. Especially if the script spawned have to launch an entire language interpreter. If your user database is only accessible with custom code an alternative might be using the [AuthDatabase.Dict.txt]. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Memory.txt0000644000175000017500000002236212244263644015055 00000000000000Memory Allocations ================== C language requires explicitly allocating and freeing memory. The main two problems with this are: 1. A lot of allocations and frees cause memory fragmentation. The longer a process runs, the more it could have leaked memory because there are tiny unused free spots all around in heap. 2. Freeing memory is easy to forget, causing memory leaks. Sometimes it can be accidentally done multiple times, causing a potential security hole. A lot of free() calls all around in the code also makes the code more difficult to read and write. The second problem could be solved with Boehm garbage collector, which Dovecot can use optionally, but it's not very efficient. It also doesn't help with the first problem. To reduce the problems caused by these issues, Dovecot has several ways to do memory management. Common Design Decisions ----------------------- All memory allocations (with some exceptions in data stack) return memory filled with NULs. This is also true for new memory when growing an allocated memory with realloc. The zeroing reduces accidental use of uninitialized memory and makes the code simpler since there is no need to explicitly set all fields in allocated structs to zero/NULL. (I guess assuming that this works correctly for NULLs isn't strictly ANSI-C compliant, but I don't see this assumption breaking in any system anyone would really use Dovecot.) The zeroing is cheap anyway. In out-of-memory situations memory allocation functions die internally by calling 'i_fatal_status(FATAL_OUTOFMEM, ..)'. There are several reasons for this: * Trying to handle malloc() failures explicitly would add a lot of error handling code paths and make the code much more complex than necessary. * In most systems malloc() rarely actually fails because the system has run out of memory. Instead the kernel will just start killing processes. * Typically when malloc() does fail, it's because the process's address space limit is reached. Dovecot enforces these limits by default. Reaching it could mean that the process was leaking memory and it should be killed. It could also mean that the process is doing more work than anticipated and that the limit should probably be increased. * Even with perfect out-of-memory handling, the result isn't much better anyway than the process dying. User isn't any happier by seeing "out of memory" error than "server disconnected". When freeing memory, most functions usually also change the pointer to NULL. This is also the reason why most APIs' deinit functions take pointer-to-pointer parameter, so that when they're done they can change the original pointer to NULL. malloc() Replacements --------------------- 'lib/imem.h' has replacements for all the common memory allocation functions: * 'malloc', 'calloc' -> 'i_malloc()' * 'realloc()' -> 'i_realloc()' * 'strdup()' -> 'i_strdup()' * 'free()' -> 'i_free' * etc. All memory allocation functions that begin with 'i_' prefix require that the memory is later freed with 'i_free()'. If you actually want the freed pointer to be set to NULL, use 'i_free_and_null()'. Currently 'i_free()' also changes the pointer to NULL, but in future it might change to something else. Memory Pools ------------ 'lib/mempool.h' defines API for allocating memory through memory pools. All memory allocations actually go through memory pools. Even the 'i_*()' functions get called through 'default_pool', which by default is 'system_pool' but can be changed to another pool if wanted. All memory allocation functions that begin with 'p_' prefix have a memory pool parameter, which it uses to allocate the memory. Dovecot has many APIs that require you to specify a memory pool. Usually (but not always) they don't bother freeing any memory from the pool, instead they assume that more memory can be just allocated from the pool and the whole pool is freed later. These pools are usually alloconly-pools, but can also be data stack pools. See below. Alloc-only Pools ---------------- 'pool_alloconly_create()' creates an allocate-only memory pool with a given initial size. As the name says, alloconly-pools only support allocating more memory. As a special case its last allocation can be freed.'p_realloc()' also tries to grow the existing allocation only if it's the last allocation, otherwise it'll just allocates new memory area and copies the data there. Initial memory pool sizes are often optimized in Dovecot to be set large enough that in most situations the pool doesn't need to be grown. To make this easier, when Dovecot is configured with --enable-devel-checks, it logs a warning each time a memory pool is grown. The initial pool size shouldn't of course be made too large, so usually I just pick some small initial guessed value and later if I get too many "growing memory pool" warnings I start growing the pool sizes. Sometimes there's just no good way to set the initial pool size and avoid the warnings, in that situation you can prefix the pool's name with MEMPOOL_GROWING and it doesn't log warnings. Alloconly-pools are commonly used for an object that builds its state from many memory allocations, but doesn't change (much of) its state. It's a lot easier when you can do a lot of small memory allocations and when destroying the object you'll just free the memory pool. Data Stack ---------- 'lib/data-stack.h' describes the low-level data stack functions. Data stack works a bit like C's control stack.'alloca()' is quite near to what it does, but there's one major difference: In data stack the stack frames are explicitly defined, so functions can return values allocated from data stack.'t_strdup_printf()' call is an excellent example of why this is useful. Rather than creating some arbitrary sized buffer and using snprintf(), which might truncate the value, you can just use t_strdup_printf() without worrying about buffer sizes being large enough. Try to keep the allocations from data stack small, since the data stack's highest memory usage size is kept for the rest of the process's lifetime. The initial data stack size is 32kB, which should be enough in normal use. If Dovecot is configured with --enable-devel-checks, it logs a warning each time the data stack needs to be grown. Stack frames are preferably created using T_BEGIN/T_END block, for example: ---%<------------------------------------------------------------------------- T_BEGIN { string_t *str1 = t_str_new(256); string_t *str2 = t_str_new(256); /* .. */ } T_END; ---%<------------------------------------------------------------------------- In the above example two strings are allocated from data stack. They get freed once the code goes past T_END, that's why the variables are preferably declared inside the T_BEGIN/T_END block so they won't accidentally be used after they're freed. T_BEGIN and T_END expand to 't_push()' and 't_pop()' calls and they must be synchronized. Returning from the block without going past T_END is going to cause Dovecot to panic in next T_END call with "Leaked t_pop() call" error. Memory allocations have similar disadvantages to alloc-only memory pools. Allocations can't be grown, so with the above example if str1 grows past 256 characters, it needs to be reallocated, which will cause it to forget about the original 256 bytes and allocate 512 bytes more. Memory allocations from data stack often begin with 't_' prefix, meaning "temporary". There are however many other functions that allocate memory from data stack without mentioning it. Memory allocated from data stack is usually returned as a const pointer, so that the caller doesn't try to free it (which would cause a compiler warning). When should T_BEGIN/T_END used and when not? This is kind of black magic. In general they shouldn't be used unless it's really necessary, because they make the code more complex. But if the code is going through loops with many iterations, where each iteration is allocating memory from data stack, running each iteration inside its own stack frame would be a good idea to avoid excessive memory usage. It's also difficult to guess how public APIs are being used, so I've tried to make such API functions use their own private stack frames. Dovecot's ioloop code also wraps all I/O callbacks and timeout callbacks into their own stack frames, so you don't need to worry about them. You can create temporary memory pools from data stack too. Usually you should be calling 'pool_datastack_create()' to generate a new pool, which also tries to track that it's not being used unsafely across different stack frames. Some low-level functions can also use 'unsafe_data_stack_pool' as the pool, which doesn't do such tracking. Data stack's advantages over malloc(): * FAST, most of the time allocating memory means only updating a couple of pointers and integers. Freeing memory all at once also is a fast operation. * No need to 'free()' each allocation resulting in prettier code * No memory leaks * No memory fragmentation It also has some disadvantages: * Allocating memory inside loops can accidentally allocate a lot of memory * Memory allocated from data stack can be accidentally stored into a permanent location and accessed after it's already been freed. * Debugging invalid memory usage may be difficult using existing tools (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/HowTo.Rootless.txt0000644000175000017500000000740112244263646015245 00000000000000Rootless Installation ===================== It's possible to make Dovecot run under a single system user without requiring root privileges at any point. This shouldn't be thought of as a security feature, but instead simply as a way for non-admins to run Dovecot in their favorite mail server. It's also useful if you just wish to test Dovecot without messing up your system. If you think of this as a good way to achieve security, ask yourself which is worse: a) * A very small possibility to get root privileges through Dovecot. * A small possibility without logging in to get into system as a non-privileged *dovecot* user, chrooted into an empty directory. * A small possibility to get user's privileges after logging in, but no possibility to read others' mails since they're saved with different UIDs (plus you might also be chrooted to your own mailbox). b) * Absolutely zero possibility to get root privileges through Dovecot. * A small possibility to get into system as a mail user, possibly even without logging in, and being able to read everyone's mail (and finally gaining roots by exploiting some just discovered local vulnerability, unless you bothered to set up a special chrooted environment). Installation ------------ Install somewhere under home directory: ---%<------------------------------------------------------------------------- ./configure --prefix=$HOME/dovecot make make install ---%<------------------------------------------------------------------------- Dovecot is then started by running '~/dovecot/sbin/dovecot'. The example configuration file exists in '~/dovecot/share/doc/dovecot/example-config/' and needs to be copied to '~/dovecot/etc/dovecot/'. Configuration ------------- The important settings to change for rootless installation are: * Set usernames to the user which dovecot will be run under: ---%<---------------------------------------------------------------------- default_internal_user = user default_login_user = user ---%<---------------------------------------------------------------------- * Remove default chrooting from all services: ---%<---------------------------------------------------------------------- service anvil { chroot = } service imap-login { chroot = } service pop3-login { chroot = } ---%<---------------------------------------------------------------------- * Change listener ports: ---%<---------------------------------------------------------------------- service imap-login { inet_listener imap { port = 10143 } inet_listener imaps { port = 10993 } } service pop3-login { inet_listener pop3 { port = 10110 } inet_listener pop3s { port = 10995 } } ---%<---------------------------------------------------------------------- * Change logging destination: ---%<---------------------------------------------------------------------- log_path = /home/user/dovecot.log ---%<---------------------------------------------------------------------- * Instead of [PasswordDatabase.PAM.txt] use for example [AuthDatabase.PasswdFile.txt]: ---%<---------------------------------------------------------------------- passdb { driver = passwd-file args = /home/user/dovecot/etc/passwd } userdb { driver = passwd } ---%<---------------------------------------------------------------------- Where the 'passwd' file contains the username and password for your login user: ---%<---------------------------------------------------------------------- user:{PLAIN}pass ---%<---------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailLocation.SharedDisk.txt0000644000175000017500000000575712244263647017004 00000000000000Mail storage on shared disks ============================ Dovecot supports keeping mails and index files in clustered filesystems, such as Red Hat GFS [http://www.redhat.com/gfs/], Oracle OCFS2 [http://oss.oracle.com/projects/ocfs2/] and HP Polyserve Matrix [http://h18006.www1.hp.com/storage/software/clusteredfs/index.html]. Dovecot also supports keeping mails and index files on NFS. Everything described in this page applies to NFS as well, but see for more NFS-specific problems and optimizations. Memory mapping -------------- By default Dovecot mmap()s the index files. This may not work with all clustered filesystems, and it most certainly won't work with NFS. Setting 'mmap_disable = yes' disables mmap() and Dovecot does its own internal caching. If mmap() is supported by your filesystem, it's still not certain that it gives better performance. Try benchmarking to make sure. Locking ------- Dovecot supports locking index files with fcntl (default), flock or dotlocks. Some clustered filesystems may not support fcntl, so you can change it to use flock instead. Fcntl locks may also cause problems with some NFS configurations, in which case you can try if switching to dotlocks helps. Note that dotlocks are the slowest locking method. You can change the locking method from 'lock_method' setting. Regardless of the 'lock_method' setting, Dovecot always uses dotlocks for some locks. Clock synchronization --------------------- Run ntpd in each computer to make sure their clocks are synchronized. If the clocks are more than one second apart from each others and multiple computers access the same mailbox simultaneously, you may get errors. Caching ------- Your cluster will probably perform better if users are usually redirected to the same server. This is because the mailbox may already be cached in the memory and it may also reduce the traffic between the clusterfs nodes. You can use [Director.txt] service to do this easily automatically. Or at the very least make sure that your load balancer redirects connections from the same IP address to the same server. FUSE / GlusterFS ---------------- FUSE caches dentries and file attributes internally. If you're using multiple GlusterFS clients to access the same mailboxes, you're going to have problems. Worst of these problems can be avoided by using NFS cache flushes, which just happen to work with FUSE as well: ---%<------------------------------------------------------------------------- mail_nfs_index = yes mail_nfs_storage = yes ---%<------------------------------------------------------------------------- These probably don't work perfectly. Samba / CIFS ------------ Dovecot's temporary files may include a colon character ':' in their filename, which is not a permitted character when using cifs.Dovecot also renames the temporary files whilst holding a lock in them, which generates the error 'Text file is busy'. Cifs/smbfs is unlikely to work as a remote filesystem. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.txt0000644000175000017500000001601712244263650014323 00000000000000Migration to Dovecot ==================== *WARNING: Badly done migration will cause your IMAP and/or POP3 clients to re-download all mails. Read this page carefully.* This page contains generic information related to migrating from another IMAP or POP3 server to Dovecot. You should read this page, and then look at the server-specific instructions: * [Migration.Dsync.txt] * [Migration.UW.txt] * [Migration.Linuxconf.txt] * [Migration.Courier.txt] * [Migration.Cyrus.txt] * [Migration.Vm-pop3d.txt] * [Migration.Teapop.txt] * [Migration.BincIMAP.txt] * [Migration.Gmail.txt] Dovecot is one of the easiest IMAP servers to migrate to because of its powerful configuration options. Dovecot can store email in both [MailboxFormat.mbox.txt] and [MailboxFormat.Maildir.txt] formats, making it compatible with many existing servers. Dovecot is also very flexible as to where it stores the email. It supports many different databases for storing [PasswordDatabase.txt] [UserDatabase.txt]. Migration involves several separate tasks. You either need to convert your data or make Dovecot read your existing data. Dovecot is very good at being compatible and configurable, so it is likely to read your existing mailboxes and user and password configurations. Tasks for conversion include: * [MailLocation.txt] * [PasswordDatabase.txt] * [UserDatabase.txt] (home directory structure, UID and GID) IMAP migration -------------- When migrating mails from another IMAP server, you should make sure that these are preserved: 1. Message flags * Lost flags can be really annoying, you most likely want to avoid it. 2. Message UIDs and UIDVALIDITY value * If UIDs are lost, at the minimum clients' message cache gets cleaned and messages are re-downloaded as new. * Some IMAP clients store metadata by assigning it to specific UID, if UIDs are changed these will be lost. 3. Mailbox subscription list * Users would be able to manually subscribe them again if you don't want to mess with it. POP3 migration -------------- When migrating mails from another POP3 server, you should try to preserve the old UIDLs. If POP3 client is configured to keep mails in the server and the messages' UIDLs change, all the messages are downloaded again as new messages. *Don't trust the migration scripts or anything you see in this wiki. Verify manually that the UIDLs are correct before exposing real clients to Dovecot.* You can do this by logging in using your old POP3 server, issuing UIDL command and saving the output. Then log in using Dovecot and save its UIDL output as well. Use e.g.'diff' command to verify that the lists are identical. Note that: * *If a client already saw changed UIDLs and decided to start re-downloading mails, it's unlikely there is anything you can do to stop it. Even going back to your old server is unlikely to help at that point.* * Some (many?) POP3 clients also require that the message ordering is preserved. * Some clients re-download all mails if you change the hostname in the client configuration. Be aware of this when testing. Some servers (UW, Cyrus) implementing both IMAP and POP3 protocols use the IMAP UID and UIDVALIDITY values for generating the POP3 UIDL values. To preserve the POP3 UIDL from such servers you'll need to preserve the IMAP UIDs and set 'pop3_uidl_format' properly. If the server doesn't use IMAP UIDs for the POP3 UIDL, you'll need to figure out another way to do it. One way is to put the UIDL value into X-UIDL: header in the mails and set 'pop3_reuse_xuidl=yes'. Some POP3 servers (QPopper) write the X-UIDL: header themselves, making the migration easy. Some POP3 servers using Maildir uses the maildir base filename as the UIDL. You can use 'pop3_uidl_format = %f' to do this. Here is a list of POP3 servers and how they generate their UIDs. Please update if you know more: * UW-POP3: 'pop3_uidl_format = %08Xv%08Xu' * qmail-pop3d: 'pop3_uidl_format = %f' * Cyrus <= 2.1.3: 'pop3_uidl_format = %u' * Cyrus >= 2.1.4: 'pop3_uidl_format = %v.%u' * Citadel [http://www.citadel.org] (all versions): 'pop3_uidl_format = %u' * Dovecot 0.99: 'pop3_uidl_format = %v.%u' * tpop3d [http://www.ex-parrot.com/~chris/tpop3d/]: * Maildir: 'pop3_uidl_format = %Mf' (MD5 sum of the maildir base filename in hex) * mbox: MD5 sum in hex of first 512 bytes of the message (or of the full message if it's less than 512 bytes). * popa3d [http://www.openwall.com/popa3d/] Generates MD5 sum from a couple of headers. Dovecot uses compatible MD5 sums internally, but converts them into UIDL strings in a bit different way. * teapop [http://www.toontown.org/teapop/] 0.3.8: * Maildir: 'pop3_uidl_format = %Mf' (MD5 sum of the maildir base filename in hex) * mbox: MD5 sum of the message without the following lines: Status,X-Status,Lines and Content-Length. * Cucipop mbox: v1.31 uses its own homebrew checksum based on headers and body. Injection of X-UIDL: headers and pop3_reuse_xuidl=yes is the way to go. * qpopper: 'pop3_reuse_xuidl=yes' IMAP <-> IMAP copying --------------------- If you don't care about preserving messages' UIDs, or if there isn't a simple way to preserve them, you can always migrate from another IMAP server to Dovecot by downloading the messages via IMAP from the old server and then uploading them to Dovecot via IMAP. There are several different tools for this, for example UW-IMAP [http://www.washington.edu/imap/]'s mailutil, imapsync [http://freshmeat.net/projects/imapsync], YippieMove [http://www.yippiemove.com] and Larch [https://github.com/rgrove/larch]. imapsync -------- Here's an example of how to run imapsync for a single user: ---%<------------------------------------------------------------------------- imapsync --syncinternaldates \ --host1 192.168.1.57 --authmech1 LOGIN --user1 leah@example.com --password1 secret \ --host2 127.0.0.1 --authmech2 LOGIN --user2 leah@example.com --password2 secret ---%<------------------------------------------------------------------------- It is quite easy to script this for a number of users, assuming you have their passwords. Even if you do not, imapsync also supports logging in as an admin user that has the ability to copy message for sub users, and a variety of other authentication options. Larch ----- Here's an example of how to run Larch: ---%<------------------------------------------------------------------------- larch --from imap://mail1.example.com --to imap://mail2.example.com ---%<------------------------------------------------------------------------- When run it will ask you for usernames and passwords that will be used for logging into servers, but you can also specify them on the command line. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Debugging.Rawlog.txt0000644000175000017500000000623512244263643015522 00000000000000Rawlog ====== Dovecot supports logging post-login IMAP/POP3 traffic (also TLS/SSL encrypted) using 'rawlog' binary. It works by checking if 'dovecot.rawlog/' directory exists in the logged in user's home directory, and writing the traffic to 'yyyymmdd-HHMMSS-pid.in' and '.out' files. Each connection gets their own in/out files. Rawlog will simply skip users who don't have the 'dovecot.rawlog/' directory and the performance impact for those users is minimal. Home directory -------------- Note that for rawlog to work, your [UserDatabase.txt] must have returned a home directory for the user. If you can't get rawlog to work, you should verify that the home directory really is where you expected it to be by setting 'mail_debug=yes' and checking the logs. You should see a line such as: ---%<------------------------------------------------------------------------- Effective uid=1000, gid=1000, home=/home/user ---%<------------------------------------------------------------------------- In above configuration rawlog would expect to find '/home/user/dovecot.rawlog/' directory writable by uid 1000. If your userdb doesn't have a home directory, with v2.1+ you can add: ---%<------------------------------------------------------------------------- userdb { # ... default_fields = home=/home/%u # or temporarily even e.g. default_fields = home=/tmp/temp-home } ---%<------------------------------------------------------------------------- Configuration ------------- To enable rawlog, you must use rawlog as a [PostLoginScripting.txt]: ---%<------------------------------------------------------------------------- service imap { executable = imap postlogin } service pop3 { executable = pop3 postlogin } service postlogin { executable = script-login -d rawlog unix_listener postlogin { } } ---%<------------------------------------------------------------------------- You can also give parameters to rawlog: * -b: Write IP packet boundaries (or whatever read() sees anyway) to the log files. The packet is written between<<< and >>>. * -t: Log a microsecond resolution timestamp at the beginning of each line. * v2.1 and newer: * -f in: Log only to *.in files * -f out: Log only to *.out files * v2.0 and older: * -i: Log only to *.in files * -o: Log only to *.out files Pre-login rawlog (v2.1+) ------------------------ You can enable pre-login rawlog for all users by telling the login processes to log to a rawlog directory, for example: ---%<------------------------------------------------------------------------- service imap-login { executable = imap-login -R rawlogs } ---%<------------------------------------------------------------------------- This tries to write the rawlogs under $base_dir/rawlogs directory. You need to create it first with enough write permissions, e.g.: ---%<------------------------------------------------------------------------- mkdir /var/run/dovecot/login/rawlogs chown dovenull /var/run/dovecot/login/rawlogs chmod 0700 /var/run/dovecot/login/rawlogs ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/SharedMailboxes.Permissions.txt0000644000175000017500000002010112244263660017744 00000000000000Filesystem permissions in shared mailboxes ========================================== IMAP processes need filesystem level permissions to access shared/public mailboxes. This means that: * If you use more than one [UserIds.txt] for your mail users (e.g. you use system users), you'll need to make sure that all users can access the mailboxes on filesystem level. ( [ACL.txt] won't help you with this.) * You can remove write permissions on purpose from public namespace root directory to prevent users from creating new mailboxes under it. Dovecot never modifies permissions for existing mail files or directories. When users share mailboxes between each others, the system must have been set up in a way that filesystem permissions don't get in the way. The easiest way to do that is to use only a single UID. Another possibility would be to use one or more groups for all the mail files that may be shared to other users belonging to the same group. For example if you host multiple domains, you might create a group for each domain and allow mailbox sharing (only) between users in the same domain. System user UNIX groups ----------------------- There's no requirement to use UNIX groups (i.e. typically defined in '/etc/group') for anything. If you don't care about them, you can safely ignore this section. If you use [AuthDatabase.Passwd.txt] userdb, the IMAP process has access to all the UNIX groups defined for that user. You may use these groups when granting filesystem permissions. If you wish to use UNIX groups defined in '/etc/group' but don't use passwd userdb, you can still do this by returning 'system_groups_user' [UserDatabase.ExtraFields.txt], which contains the UNIX user name whose groups are read from the group file. You can also set up extra UNIX groups by listing them in 'mail_access_groups' setting. To have per-user UNIX groups, return 'mail_access_groups' as userdb extra field. The advantage of using this method is that only Dovecot mail processes have access to the group, but nothing else, such as user's SSH session. For example a simple way to set up shared mailbox access for all system users is to make all mail dirs/files 0770/0660 mode and owned by group "sharedmail" and then set 'mail_access_groups=sharedmail'. Using more fine grained groups of course leaks less mail data in case there's a security hole in Dovecot. Permissions for new mailboxes ----------------------------- When creating a new mailbox, Dovecot copies the permissions from the mailbox root directory. For example with mboxes if you have directories: ---%<------------------------------------------------------------------------- drwx--xr-x 8 user group 4096 2009-02-21 18:31 /home/user/mail/ drwxrwxrwx 2 user group 4096 2009-02-21 18:32 /home/user/mail/foo/ ---%<------------------------------------------------------------------------- When creating a new foo/bar/ directory, Dovecot gives it permissions: ---%<------------------------------------------------------------------------- drwx--xr-x 2 user group 4096 2009-02-21 18:33 /home/user/mail/foo/bar/ ---%<------------------------------------------------------------------------- As you can see, the file mode was copied from mail/ directory, not mail/foo/. The group is also preserved. If this causes problems (e.g. different users having different groups create mailboxes, causing permission denied errors when trying to preserve the group) you can set the setgid bit for the root directory: ---%<------------------------------------------------------------------------- chmod g+s /home/user/mail ---%<------------------------------------------------------------------------- This will cause the group to be automatically copied by the OS for all created files/directories under it, even if the user doesn't belong to the group. Permissions for new files in mailboxes -------------------------------------- When creating new files inside a mailbox, Dovecot copies the read/write permissions from the mailbox's directory. For example if you have: ---%<------------------------------------------------------------------------- drwx--xr-x 5 user group 4096 2009-02-21 18:53 /home/user/Maildir/.foo/ ---%<------------------------------------------------------------------------- Dovecot creates files under it with modes: ---%<------------------------------------------------------------------------- drwx--xr-x 2 user group 4096 2009-02-21 18:54 cur/ drwx--xr-x 2 user group 4096 2009-02-21 18:54 new/ drwx--xr-x 2 user group 4096 2009-02-21 18:54 tmp/ -rw----r-- 1 user group 156 2009-02-21 18:54 dovecot.index.log -rw----r-- 1 user group 17 2009-02-21 18:54 dovecot-uidlist ---%<------------------------------------------------------------------------- Note how the g+x gets copied to directories, but for files it's simply ignored. The group is copied the same way as explained in the previous section. When mails are copied between Maildirs, it's usually done by hard linking. If the source and destination directory permissions are different, Dovecot create a new file and copies data the slow way so that it can assign the wanted destination permissions. The source and destination permission lookups are done only by looking at the mailbox root directories' permissions, not individual mail files. This may become a problem if the mail files' permissions aren't as Dovecot expects. Permissions to new /domain/user directories ------------------------------------------- If each user has different UIDs and you have '/var/mail/domain/user/' style directories, you run into a bit of trouble. The problem is that the first user who creates '/var/mail/domain/' will create it as 0700 mode, and other users can't create their own user/ directories under it anymore. The solution is to use a common group for the users and set '/var/mail/' directory's permissions properly (group-suid is required): ---%<------------------------------------------------------------------------- chgrp dovemail /var/mail chmod 02770 /var/mail # or perhaps 03770 for extra security ---%<------------------------------------------------------------------------- and in dovecot.conf: ---%<------------------------------------------------------------------------- mail_location = maildir:/var/vmail/%d/%n/Maildir mail_access_groups = dovemail ---%<------------------------------------------------------------------------- The end result should look like this: ---%<------------------------------------------------------------------------- drwxrwsr-x 3 user dovemail 60 Oct 24 12:04 domain.example.com/ drwx--S--- 3 user user 60 Oct 24 12:04 domain.example.com/user/ ---%<------------------------------------------------------------------------- Note that this requires that the mail_location setting is in its explicit format with %variables. Using 'maildir:~/Maildir' won't work, because Dovecot can't really know how far down it should copy the permissions from. Permissions to new user home directories (v2.2+) ------------------------------------------------ When mail_location begins with '%h' or '~/', its permissions are copied from the first existing parent directory if it has setgid-bit set. This isn't done when the path contains any other %variables. Mail Delivery Agent permissions ------------------------------- When using Dovecot , it uses all the same configuration files as IMAP/POP3, so you don't need to worry about it. When using an external MDA to deliver to a shared mailbox, you need to make sure that the resulting files have proper permissions. For example with Procmail + Maildir, set 'UMASK=007' in '.procmailrc' to make the delivered mail files group-readable. To get the file to use the proper group, set the group to the Maildir's 'tmp/' directory and also set its setgid bit ('chmod g+s'). Dictionary files ---------------- Created dictionary files (e.g. 'acl_shared_dict = file:...') also base their initial permissions on parent directory's permissions. After the initial creation, the permissions are permanently preserved. So if you want to use different permissions, just chown/chmod the file. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Authentication.MasterUsers.txt0000644000175000017500000002727512244263637017642 00000000000000Master users/passwords ====================== It's possible to configure master users who are able to log in as other users. It's also possible to directly log in as any user using a master password, although this isn't recommended. Master users ------------ There are two ways for master users to log in as other users: 1. Give the login username in the [Authentication.Mechanisms.txt] authorization ID field. 2. Specify both the master username and the login username in the same username field. The usernames are separated by a string configured by the 'auth_master_user_separator' setting. UW-IMAP uses "*" as the separator, so that could be a good choice. Using "*" as the separator, the master user would log in as "login_user*master_user". Master users are configured by adding a new [PasswordDatabase.txt] with 'master=yes' setting. The users in the master passdb cannot log in as themselves, only as other people. That means they don't need to exist in the [UserDatabase.txt], because the userdb lookup is done only for the user they're logging in as. You should also add the 'pass=yes' setting to the master passdb if possible. It means that Dovecot verifies that the login user really exists before allowing the master user to log in. Without the setting if a nonexistent login username is given, depending on the configuration, it could either return an internal login error (the userdb lookup failed) or create a whole new user (with eg. [UserDatabase.Static.txt]). 'pass=yes' doesn't work with PAM or LDAP with 'auth_bind=yes', because both of them require knowing the user's password. 'pass=yes' is especially useful with a [PasswordDatabase.CheckPassword.txt] passdb because the script gets both the login and the master username as environment variables. Other passdbs see only the login username in '%u'. In the future there will probably be another setting to make the user verification to be done from userdb. If you want master users to be able to log in as themselves, you'll need to either add the user to the normal passdb or add the passdb to 'dovecot.conf' twice, with and without 'master=yes'. Note that if the passdbs point to different locations, the user can have a different password when logging in as other users than when logging in as himself. This is a good idea since it can avoid accidentally logging in as someone else. Usually it's better to have only a few special master users that are used *only* to log in as other people. One example could be a special "spam" master user that trains the users' spam filters by reading the messages from the user's spam mailbox. ACLs ---- If plugin is enabled, the Master user is still subject to ACLs just like any other user, which means that by default the master user has no access to any mailboxes of the user. The options for handling this are: 1. Adding a global for the master user. Unfortunately currently you can only create per-mailbox ACLs, there is no "default ACL" that applies to all mailboxes. 2. Assigning 'MASTER_USER=$USER' in a [PostLoginScripting.txt]. The result is that the login username's ACL is used, rather than the master username's. 3. Assigning 'master_user=%u' in [UserDatabase.txt], which basically does the same as the post-login script. Example configuration --------------------- ---%<------------------------------------------------------------------------- auth_master_user_separator = * passdb { driver = passwd-file args = /etc/dovecot/passwd.masterusers master = yes pass = yes } passdb { driver = shadow } userdb { driver = passwd } ---%<------------------------------------------------------------------------- Where the 'passwd.masterusers' file would contain the master usernames and passwords: ---%<------------------------------------------------------------------------- admin:{SHA1}nU4eI71bcnBGqeO0t9tXvY1u5oQ= admin2:{SHA1}i+UhJqb95FCnFio2UdWJu1HpV50= ---%<------------------------------------------------------------------------- One way to create this master file is to use the htaccess program as follows: ---%<------------------------------------------------------------------------- htpasswd -b -c -s passwd.masterusers user password ---%<------------------------------------------------------------------------- SQL Example ----------- The master passdb doesn't have to be passwd-file, it could be an SQL query as well: ---%<------------------------------------------------------------------------- auth_master_user_separator = * passdb { driver = sql args = /etc/dovecot/dovecot-sql-master.conf.ext master = yes pass = yes } passdb { driver = sql args = /etc/dovecot/dovecot-sql.conf.ext } userdb { driver = sql args = /etc/dovecot/dovecot-sql.conf.ext } } ---%<------------------------------------------------------------------------- 'dovecot-sql-master.conf.ext' would contain all the normal connection settings and a 'password_query': ---%<------------------------------------------------------------------------- password_query = SELECT password FROM users WHERE userid = '%u' and master_user = true ---%<------------------------------------------------------------------------- Testing ------- ---%<------------------------------------------------------------------------- # telnet localhost 143 * OK Dovecot ready. 1 login loginuser*masteruser masterpass 1 OK Logged in. ---%<------------------------------------------------------------------------- If you had any problems, set 'auth_debug=yes' and look at the logs. Master passwords ---------------- The easiest way to implement this is with SQL: ---%<------------------------------------------------------------------------- password_query = SELECT user, 'master-password' AS password FROM users WHERE userid = '%u' ---%<------------------------------------------------------------------------- If you don't have the users in SQL database, you can still fake it: ---%<------------------------------------------------------------------------- password_query = SELECT '%u' AS user, 'master-password' AS password ---%<------------------------------------------------------------------------- However note that the above will allow logins for any username using the master password, even those that don't really exist. Then in your dovecot.conf, have something like: ---%<------------------------------------------------------------------------- passdb { driver = pam } passdb { driver = sql args = /etc/dovecot-sql-master.conf.ext } ---%<------------------------------------------------------------------------- One way to do this without SQL is to create a [AuthDatabase.PasswdFile.txt] containing every user: ---%<------------------------------------------------------------------------- user1:{plain}master-password user2:{plain}master-password ..etc.. ---%<------------------------------------------------------------------------- Advanced SQL Examples --------------------- In these example we will create 3 kinds of master users. The first will be users who can read all email for all domains. The next example will be users who can read all email for their domain only. The third example will be users who can read email of domains listed in a separate ownership table. We will use MySQL and create 2 tables with the following structure. ---%<------------------------------------------------------------------------- CREATE TABLE `users` ( `uid` int(4) NOT NULL AUTO_INCREMENT, `user_name` varchar(80) NOT NULL, `domain_name` varchar(80) NOT NULL, `password` varchar(60) DEFAULT NULL, `last_login` datetime DEFAULT NULL, `masteradmin` tinyint(1) NOT NULL DEFAULT '0', `owns_domain` tinyint(1) NOT NULL DEFAULT '0', UNIQUE KEY `emaillookup` (`domain_name`,`user_name`), UNIQUE KEY `uid` (`uid`) ) ENGINE=MyISAM AUTO_INCREMENT=995 DEFAULT CHARSET=latin CREATE TABLE `ownership` ( `login_id` varchar(128) NOT NULL, `owned_object` varchar(128) NOT NULL, UNIQUE KEY `login_id_full` (`login_id`,`owned_object`), KEY `login_id` (`login_id`), KEY `owned_object` (`owned_object`), KEY `login_id_index` (`login_id`), KEY `owned_object_index` (`owned_object`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 ---%<------------------------------------------------------------------------- The dovecot.conf file for all 3 master user configurations will be as follows: ---%<------------------------------------------------------------------------- passdb { driver = sql args = /etc/dovecot/ownership-sql.conf master = yes pass = yes } passdb { driver = sql args = /etc/dovecot/domain-owner-sql.conf master = yes pass = yes } passdb { driver = sql args = /etc/dovecot/masteradmin-sql.conf master = yes pass = yes } passdb { args = /etc/dovecot/sql.conf driver = sql } ---%<------------------------------------------------------------------------- Before we get into the master user tricks, we start with normal email authentication. The query for that is as follows: ---%<------------------------------------------------------------------------- password_query = SELECT user_name, domain_name, password FROM users WHERE user_name = '%n' AND domain_name = '%d' ---%<------------------------------------------------------------------------- In this first example master admin suppose you want to allow a few people to be master users over all domains. These users will have the "masteradmin" field set to 1. The query would be: ---%<------------------------------------------------------------------------- password_query = SELECT user_name, domain_name, password FROM users WHERE user_name = '%n' AND domain_name = '%d' AND masteradmin='1' ---%<------------------------------------------------------------------------- In the second example suppose you are hosting multiple domains and you want to allow a few users to become master users of their domain only. Your query would be as follows: ---%<------------------------------------------------------------------------- password_query = SELECT user_name, domain_name, password FROM users WHERE user_name = '%n' \ AND domain_name = '%d' AND owns_domain='1' AND '%d'='%{login_domain}' ---%<------------------------------------------------------------------------- This will allow you to log in using the following to read Joe's email if master@dovecot.org is flagged as the domain_owner. ---%<------------------------------------------------------------------------- joe@dovecot.org*master@dovecot.org ---%<------------------------------------------------------------------------- In this third example we have a table of owners. There are a list of pairs between owner email addresses and domains that are owned. That way if a person controls a lot of domains then they can view all the users in all the domains they control. The query would be as follows: ---%<------------------------------------------------------------------------- password_query = SELECT user_name, domain_name, password FROM users, ownership WHERE \ user_name = '%n' AND domain_name = '%d' AND login_id='%u' AND owned_object='%{login_domain}' ---%<------------------------------------------------------------------------- If you really want to get tricky and efficient you can combine all 3 queries into one giant query that does everything. ---%<------------------------------------------------------------------------- password_query = SELECT user_name, domain_name, password FROM users, ownership WHERE \ user_name = '%n' AND domain_name = '%d' AND ( \ (masteradmin='1') OR \ (owns_domain='1' AND '%d'='%{login_domain}') OR \ (login_id='%u' and owned_object='%{login_domain}')) \ group by uid ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/mutt.txt0000644000175000017500000000613612244263672013370 00000000000000Contents 1. Using mutt with IMAP 1. configuration 2. problems 1. Move read messages to mbox 2. New mail in a folder just left 3. Authentication realm 2. References Using mutt with IMAP ==================== configuration ------------- First, mutt needs to be told to use IMAP. To achieve this, edit '~/.muttrc' (or a file it 'source's) to contain: ---%<------------------------------------------------------------------------- set spoolfile=imap://user@hostname/INBOX set folder=imap://user@hostname/ ---%<------------------------------------------------------------------------- problems -------- Move read messages to mbox -------------------------- If mutt asks 'Move read messages to /home/$user/mbox? ([no]/yes):' Your alternatives (to set in '~/.muttrc') are (pick one and ignore the others): 1. don't ask about moving messages, just do it: ---%<--------------------------------------------------------------------- set move=yes ---%<--------------------------------------------------------------------- 2. don't ask about moving messages and _don't_ do it: ---%<--------------------------------------------------------------------- set move=no ---%<--------------------------------------------------------------------- 3. ask about moving message, default answer 'yes': ---%<--------------------------------------------------------------------- set move=ask-yes ---%<--------------------------------------------------------------------- 4. ask about moving message, default answer 'no': ---%<--------------------------------------------------------------------- set move=ask-no ---%<--------------------------------------------------------------------- New mail in a folder just left ------------------------------ There is a bug with dovecot in <=0.99. It can sometimes make mutt think there is new email in a mailbox you just 'left'...http://www.dovecot.org/list/dovecot/2004-February/002950.html Authentication realm -------------------- If you are using IMAP with SASL and get error messages like 'Invalid realm' in your dovecot log files, try putting this in your '.muttrc' or '/etc/Muttrc]': ---%<------------------------------------------------------------------------- set imap_authenticators="plain" ---%<------------------------------------------------------------------------- It might be easier however to just put the following in your 'dovecot.conf': For Dovecot 1.0: ---%<----------------------------------------------------------------------- auth default { mechanisms = plain } ---%<----------------------------------------------------------------------- For Dovecot 0.99: ---%<----------------------------------------------------------------------- auth_mechanisms = plain ---%<----------------------------------------------------------------------- References ========== * Official mutt homepage [http://www.mutt.org/] * mutt with IMAP documentation [http://mutt.sourceforge.net/imap/] * http://jamespo.org.uk/blog/archives/000271.html (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/SecurityTuning.txt0000644000175000017500000000165612244263660015372 00000000000000Security tuning =============== Dovecot is pretty secure out-of-the box. It uses multiple processes and privilege separation to isolate different parts from each others in case a security hole is found from one part. Some things you can do more: * Allocate each user their own UID and GID (see ) * Use a separate /dovecot-auth/ user for authentication process (see ) * You can chroot authentication and mail processes (see ) * Compiling Dovecot with garbage collection ('--with-gc' configure option) fixes at least in theory any security holes caused by double free()s. However this hasn't been tested much and there may be problems. * There are some security related SSL settings (see ) * Set 'first/last_valid_uid/gid' settings to contain only the range actually used by mail processes (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/SharedMailboxes.Symlinks.txt0000644000175000017500000000334112244263661017252 00000000000000Mailbox sharing with symlinks ============================= It's possible to share mailboxes simply by symlinking them among user's private mailboxes. See for issues related to filesystem permissions. Maildir ------- ---%<------------------------------------------------------------------------- ln -s /home/user2/Maildir/.Work /home/user1/Maildir/.shared.user2 ln -s /home/user3/Maildir/.Work /home/user1/Maildir/.shared.user3 ---%<------------------------------------------------------------------------- Now user1 has a "shared" directory containing "user2" and "user3" child mailboxes, which point to those users' "Work" mailbox. With Maildir++ layout it's not possible to automatically share "mailbox and its children". You'll need to symlink each mailbox separately. With v1.1+ you could do this by using "fs" layout for mailboxes (requires converting existing maildirs from Maildir++). mbox ---- Doing the same as in the above Maildir example: ---%<------------------------------------------------------------------------- mkdir /home/user1/mail/shared ln -s /home/user2/mail/Work /home/user1/mail/shared/user2 ln -s /home/user3/mail/Work /home/user1/mail/shared/user3 ---%<------------------------------------------------------------------------- One additional problem with mbox format is the creation of dotlock files. The dotlocks would be created under user1's directory, which makes them useless. Make sure the locking works properly with only fcntl or flock locking (See ) and just disable dotlocks. Alternatively instead of symlinking an mbox file, put the shared mailboxes inside a directory and symlink the entire directory. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.Courier.txt0000644000175000017500000000664012244263650015733 00000000000000Courier IMAP/POP3 ================= *WARNING: Badly done migration will cause your IMAP and/or POP3 clients to re-download all mails. Read page first carefully.* Courier v0.43 and later to Dovecot v1.1+ ---------------------------------------- courier-dovecot-migrate.pl [http://www.dovecot.org/tools/courier-dovecot-migrate.pl] does a perfect migration from Courier IMAP and POP3, preserving IMAP UIDs and POP3 UIDLs. It reads Courier's 'courierimapuiddb' and 'courierpop3dsizelist' files and produces 'dovecot-uidlist' file from it. Before doing the actual conversion you can run the script and see if it complains about any errors and such, for example: ---%<------------------------------------------------------------------------- # ./courier-dovecot-migrate.pl --to-dovecot --recursive /home Finding maildirs under /home /home/user/Maildir/dovecot-uidlist already exists, not overwritten /home/user/Maildir2: No imap/pop3 uidlist files Total: 69 mailboxes / 6 users 0 errors No actual conversion done, use --convert parameter ---%<------------------------------------------------------------------------- The actual conversion can be done for all users at once by running the script with '--convert --recursive' parameters. Make sure the conversion worked by checking that 'dovecot-uidlist' files were created to all maildirs (including to subfolders). The '--recursive' option goes through only one level down in directory hierarchies. This means that if you have some kind of a directory hashing scheme (or even domain/username/), it won't convert all of the files. You can also convert each user as they log in for the first time, using with a script something like: ---%<------------------------------------------------------------------------- #!/bin/sh # WARNING: Be sure to use mail_drop_priv_before_exec=yes, # otherwise the files are created as root! courier-dovecot-migrate.pl --quiet --to-dovecot --convert Maildir # This is for imap, create a similar script for pop3 too exec /usr/local/libexec/dovecot/imap ---%<------------------------------------------------------------------------- FIXME: The script should rename also folder names that aren't valid mUTF-7. Dovecot can't otherwise access such folders. Dovecot configuration --------------------- Courier by default uses "INBOX." as the IMAP namespace for private mailboxes. If you want a transparent migration, you'll need to configure Dovecot to use a namespace with "INBOX." prefix as well. ---%<------------------------------------------------------------------------- mail_location = maildir:~/Maildir namespace { prefix = INBOX. separator = . inbox = yes } ---%<------------------------------------------------------------------------- Manual conversion ----------------- * Courier's 'courierimapsubscribed' file is compatible with Dovecot's 'subscriptions' file, but you need to remove the "INBOX." prefixes from the mailboxes./This is true even if you set namespace prefix to "INBOX." as described above./ * Courier's 'courierimapuiddb' file is compatible with Dovecot's 'dovecot-uidlist' file, just rename it. * Courier's message flags are compatible with Dovecot (as they are specified by the Maildir specification) * Courier's message keywords implementation isn't Dovecot compatible. There doesn't exist a simple way to convert the keywords manually. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.MailLog.txt0000644000175000017500000000310012244263657015332 00000000000000Mail logger plugin ================== This plugin can be used to log several actions done in a mail session: * Setting and removing \Deleted flag * Expunging * Copying mails to another mailbox * Mailbox creations * Mailbox deletions * Mailbox renames * Any flag changes * Saves Messages' UID and Message-ID header is logged for each action. Here's an example: ---%<------------------------------------------------------------------------- imap(user): copy -> Trash: uid=908, msgid=<123.foo@bar> imap(user): deleted: uid=908, msgid=<123.foo@bar> imap(user): expunged: uid=908, msgid=<123.foo@bar> ---%<------------------------------------------------------------------------- You can enable the plugin globally for all services by setting: ---%<------------------------------------------------------------------------- mail_plugins = $mail_plugins mail_log notify ---%<------------------------------------------------------------------------- The notify plugin is required for the mail_log plugin's operation, so be certain it's also enabled. Configuration ------------- You can configure what and how mail_log plugin logs: ---%<------------------------------------------------------------------------- plugin { # Events to log. Also available: flag_change save mailbox_create mail_log_events = delete undelete expunge copy mailbox_delete mailbox_rename # Also available: flags vsize from subject mail_log_fields = uid box msgid size } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Storage.Mailbox.txt0000644000175000017500000000315412244263644016601 00000000000000Mailbox ======= 'src/lib-storage/mail-storage.h' and 'mail-storage-private.h' describes mailbox API, among others. Mailbox life cycle often goes like: * 'mailbox_alloc()' allocates memory for the mailbox and initializes some internal settings, but doesn't actually try to open it. * 'mailbox_open()' opens the mailbox. Instead of opening a mailbox, you can also create it with 'mailbox_create()'. * If you're immediately syncing the mailbox, you don't need to open it, because it's done implicitly. This reduces your code and error handling a bit. * 'mailbox_close()' closes the mailbox, so that it needs to be opened again if it's wanted to be accessed. This is rarely needed. * 'mailbox_free()' closes and frees the mailbox. There are a lot of functions to deal with mailboxes. The most important ones are: * 'mailbox_get_status()' to get a summary of mailbox, such as number of messages in it. * [Design.Storage.Mailbox.Sync.txt]: 'mailbox_sync_*()' to synchronize changes from the backend to memory. * [Design.Storage.Mailbox.Transaction.txt]: 'mailbox_transaction_*()' for transaction handling. All message reads and writes are done in a transaction. * [Design.Storage.Mailbox.Search.txt]: 'mailbox_search_*()' is used for searching messages. Even simple operations like "get all messages" go through this API, it'll then simply do "search all". * [Design.Storage.Mailbox.Save.txt]: 'mailbox_save_*()' and 'mailbox_copy()' is used for saving/copying new messages to mailbox. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.Vm-pop3d.txt0000644000175000017500000000337412244263650015731 00000000000000Vm-pop3d ======== *WARNING: Badly done migration will cause your IMAP and/or POP3 clients to re-download all mails. Read page first carefully.* Vm-pop3d uses the Message-ID: header data for UIDL, Dovecot does not support this as it is not unique enough. The following Perl script will take the Message-ID: data from all mails in a mbox and put the data into the X-UIDL: header which Dovecot can use with the 'pop3_reuse_xuidl' setting: ---%<------------------------------------------------------------------------- #!/usr/bin/env perl use Email::Simple; my @totalmail=; my $mail = join("",@totalmail); my $email = Email::Simple->new($mail); my $msg_id = $email->header("Message-Id"); $msg_id =~ s#<##g; $msg_id =~ s#>##g; $email->header_set("X-UIDL", $msg_id); print $email->as_string; ---%<------------------------------------------------------------------------- Requires email::simple, though the default setting in email::simple is to wrap headers at 77 characters, which then causes problems when Outlook clients issue UIDL, the workaround for this is to edit the Perl module, on Debian Etch this involves editing '/usr/share/perl5/Email/Simple.pm', find "sub _fold" and change {0,77} to a suitably higher value. On more recent versions of email::simple, you may need to edit 'Headers.pm' instead, with the line to look for being "_default_fold_at" You then run the script like so: ---%<------------------------------------------------------------------------- formail -q- -s perl script.pl < inbox > newinbox ---%<------------------------------------------------------------------------- When Dovecot now looks at newinbox, it will use the X-UIDL: header and clients will not redownload mail. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.PAM.txt0000644000175000017500000002001012244263656016227 00000000000000PAM - Pluggable Authentication Modules ====================================== This is the most common way to authenticate system users nowadays. PAM is not itself a password database, but rather its configuration tells the system how exactly to do the authentication. Usually this means using the 'pam_unix.so' module, which authenticates user from the system's shadow password file. Because PAM is not an actual database, only plaintext authentication mechanisms can be used with PAM. PAM cannot be used as a user database either (although static user templates could be used to provide the same effect). Usually PAM is used with [AuthDatabase.Passwd.txt] (NSS) or [UserDatabase.Static.txt] user databases. Dovecot should work with Linux PAM, Solaris PAM, OpenPAM (FreeBSD) and ApplePAM (Mac OS X). Service name ------------ The PAM configuration is usually in the '/etc/pam.d/' directory, but some systems may use a single file,'/etc/pam.conf'. By default Dovecot uses 'dovecot' as the PAM service name, so the configuration is read from '/etc/pam.d/dovecot'. You can change this by giving the wanted service name in the 'args' parameter. You can also set the service to '%s' in which case Dovecot automatically uses either 'imap' or 'pop3' as the service, depending on the actual service the user is logging in to. Here are a few examples: * Use '/etc/pam.d/imap' and '/etc/pam.d/pop3': ---%<---------------------------------------------------------------------- passdb { driver = pam args = %s } ---%<---------------------------------------------------------------------- * Use '/etc/pam.d/mail': ---%<---------------------------------------------------------------------- passdb { driver = pam args = mail } ---%<---------------------------------------------------------------------- PAM sessions ------------ By giving a 'session=yes' parameter, you can make Dovecot open a PAM session and close it immediately. Some PAM plugins need this, for instance 'pam_mkhomedir'. With this parameter, 'dovecot.conf' might look something like this: ---%<------------------------------------------------------------------------- passdb { driver = pam args = session=yes dovecot } ---%<------------------------------------------------------------------------- PAM credentials --------------- By giving a 'setcred=yes' parameter, you can make Dovecot create PAM credentials. Some PAM plugins need this. The credentials are never deleted however, so using this might cause problems with other PAM plugins. Limiting the number of PAM lookups ---------------------------------- Usually in other software PAM is used to do only a single lookup in a process, so PAM plugin writers haven't done much testing on what happens when multiple lookups are done. Because of this, many PAM plugins leak memory and possibly have some other problems when doing multiple lookups. If you notice that PAM authentication stops working after some time, you can limit the number of lookups done by the auth worker process before it dies: ---%<------------------------------------------------------------------------- passdb { driver = pam args = max_requests=100 } ---%<------------------------------------------------------------------------- The default max_requests value is 100. Username changing ----------------- A PAM module can change the username. Making PAM plugin failure messages visible ------------------------------------------ You can replace the default "Authentication failed" reply with PAM's failure reply by setting: ---%<------------------------------------------------------------------------- passdb { driver = pam args = failure_show_msg=yes } ---%<------------------------------------------------------------------------- This can be useful with e.g. pam_opie to find out which one time password you're supposed to give: ---%<------------------------------------------------------------------------- 1 LOGIN username otp 1 NO otp-md5 324 0x1578 ext, Response: ---%<------------------------------------------------------------------------- Caching ------- Dovecot supports caching password lookups by setting 'auth_cache_size' to non-zero value. For this to work with PAM, you'll also have to give 'cache_key' parameter. Usually the user is authenticated only based on the username and password, but PAM plugins may do all kinds of other checks as well, so this can't be relied on. For this reason the 'cache_key' must contain all the [Variables.txt] that may affect authentication. The commonly used variables are: * '%u' - Username. You'll most likely want to use this. * '%s' - Service. If you use '*' as the service name you'll most likely want to use this. * '%r' - Remote IP address. Use this if you do any IP related checks. * '%l' - Local IP address. Use this if you do any checks based on the local IP address that was connected to. Examples: ---%<------------------------------------------------------------------------- # 1MB auth cache size auth_cache_size = 1024 passdb { driver = pam # username and service args = cache_key=%u%s * } ---%<------------------------------------------------------------------------- ---%<------------------------------------------------------------------------- # 1MB auth cache size auth_cache_size = 1024 passdb { driver = pam # username, remote IP and local IP args = cache_key=%u%r%l dovecot } ---%<------------------------------------------------------------------------- Examples -------- Linux ----- Here is an example '/etc/pam.d/dovecot' configuration file which uses standard UNIX authentication: ---%<------------------------------------------------------------------------- auth required pam_unix.so nullok account required pam_unix.so ---%<------------------------------------------------------------------------- Solaris ------- For Solaris you will have to edit '/etc/pam.conf'. Here is a working Solaris example (using 'args = *' instead of the default 'dovecot' service): ---%<------------------------------------------------------------------------- imap auth requisite pam_authtok_get.so.1 imap auth required pam_unix_auth.so.1 imap account requisite pam_roles.so.1 imap account required pam_unix_account.so.1 imap session required pam_unix_session.so.1 pop3 auth requisite pam_authtok_get.so.1 pop3 auth required pam_unix_auth.so.1 pop3 account requisite pam_roles.so.1 pop3 account required pam_unix_account.so.1 pop3 session required pam_unix_session.so.1 ---%<------------------------------------------------------------------------- Mac OS X -------- On Mac OS X, the '/etc/pam.d/dovecot' file should look like this: ---%<------------------------------------------------------------------------- auth required pam_nologin.so auth sufficient pam_securityserver.so auth sufficient pam_unix.so auth required pam_deny.so account required pam_permit.so password required pam_deny.so session required pam_uwtmp.so ---%<------------------------------------------------------------------------- ...which, as the equivalent of '/etc/pam.d/login' on OS X 10.4, can be represented as the following on that OS: ---%<------------------------------------------------------------------------- passdb { driver = pam args = login } ---%<------------------------------------------------------------------------- On Mac OS X, "passwd" can be used as a userdb to fill in UID, GID, and homedir information after PAM was used as a passdb, even though Directory Services prevents "passdb passwd" from working as a username/password authenticator. This will provide full system user authentication with true homedir mail storage, without resorting to a single virtual mail user or LDAP: ---%<------------------------------------------------------------------------- userdb { driver = passwd } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.Sieve.Extensions.SpamtestVirustest.txt0000644000175000017500000001324112244263656023425 00000000000000Pigeonhole Sieve: Spamtest and Virustest Extensions =================================================== Using the *spamtest* and *virustest* extensions (RFC 5235 [http://tools.ietf.org/html/rfc5235/]), the Sieve language provides a uniform and standardized command interface for evaluating spam and virus tests performed on the message. Users no longer need to know what headers need to be checked and how the scanner's verdict is represented in the header field value. They only need to know how to use the *spamtest* (*spamtestplus*) and *virustest* extensions. This also gives GUI-based Sieve editors the means to provide a portable and easy to install interface for spam and virus filter configuration. The burden of specifying which headers need to be checked and how the scanner output is represented falls onto the Sieve administrator. Configuration ------------- The *spamtest*, *spamtestplus* and *virustest* extensions are not enabled by default and thus need to be enabled explicitly using the 'sieve_extensions' setting. The following settings need to be configured for using the *spamtest* and *spamtestplus* extensions. The *virustest* extension has identical configuration settings, but with a ''sieve_virustest_'' prefix instead of a ''sieve_spamtest_'' prefix: sieve_spamtest_status_type = "score" / "strlen" / "text": This specifies the type of status result that the spam/virus scanner produces. This can either be a numeric score ('score'), a string of identical characters ('strlen'), e.g. ''*******'', or a textual description ('text'), e.g.'{{{Spam}}}' or ''Not Spam''. sieve_spamtest_status_header = [ ":" ]: This specifies the header field that contains the result information of the spam scanner and it may express the syntax of the content of the header. If no matching header is found in the message, the spamtest command will match against "0". : This is a structured setting. The first part specifies the header field name. Optionally, a POSIX regular expression follows the header field name, separated by a colon. Any white space directly following the colon is not part of the regular expression. If the regular expression is omitted, any header content is accepted and the full header value is used. When a regular expression is used, it must specify one match value (inside brackets) that yields the desired spam scanner result. If the header does not match the regular expression or if no value match is found, the 'spamtest' test will match against "0" during Sieve script execution. sieve_spamtest_max_value =: This statically specifies the maximum value a numeric spam score can have. sieve_spamtest_max_header = [ ":" ]: Some spam scanners include the maximum score value in one of their status headers. Using this setting, this maximum can be extracted from the message itself in stead of specifying the maximum manually using the setting 'sieve_spamtest_max_value' explained above. The syntax is identical to the 'sieve_spamtext_status_header' setting. sieve_spamtest_text_valueX =: When the 'sieve_spamtest_status_type' setting is set to 'text', these settings specify that the 'spamtest' test will match against the value "'X'" when the specified string is equal to the text (extracted) from the status header. For *spamtest* and *spamtestplus*, values of X between 0 and 10 are recognized, while *virustest* only uses values between 0 and 5. Examples -------- This section shows several configuration examples. Each example shows a specimen of valid virus/spam test headers that the given configuration willwork on. Example 1 --------- Spam header: 'X-Spam-Score: No, score=-3.2' ---%<------------------------------------------------------------------------- plugin { sieve_extensions = +spamtest +spamtestplus sieve_spamtest_status_type = score sieve_spamtest_status_header = \ X-Spam-Score: [[:alnum:]]+, score=(-?[[:digit:]]+\.[[:digit:]]) sieve_spamtest_max_value = 5.0 } ---%<------------------------------------------------------------------------- Example 2 --------- Spam header: 'X-Spam-Status: Yes' ---%<------------------------------------------------------------------------- plugin { sieve_extensions = +spamtest +spamtestplus sieve_spamtest_status_type = text sieve_spamtest_status_header = X-Spam-Status sieve_spamtest_text_value1 = No sieve_spamtest_text_value10 = Yes } ---%<------------------------------------------------------------------------- Example 3 --------- Spam header: 'X-Spam-Score: sssssss' ---%<------------------------------------------------------------------------- plugin { sieve_extensions = +spamtest +spamtestplus sieve_spamtest_status_header = X-Spam-Score sieve_spamtest_status_type = strlen sieve_spamtest_max_value = 5 } ---%<------------------------------------------------------------------------- Example 4 --------- Spam header: 'X-Spam-Score: status=3.2 required=5.0' Virus header: 'X-Virus-Scan: Found to be clean.' ---%<------------------------------------------------------------------------- plugin { sieve_extensions = +spamtest +spamtestplus +virustest sieve_spamtest_status_type = score sieve_spamtest_status_header = \ X-Spam-Score: score=(-?[[:digit:]]+\.[[:digit:]]).* sieve_spamtest_max_header = \ X-Spam-Score: score=-?[[:digit:]]+\.[[:digit:]] required=([[:digit:]]+\.[[:digit:]]) sieve_virustest_status_type = text sieve_virustest_status_header = X-Virus-Scan: Found to be (.+)\. sieve_virustest_text_value1 = clean sieve_virustest_text_value5 = infected } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/NFS.txt0000644000175000017500000001516412244263650013022 00000000000000NFS === NFS is commonly used in one of these ways: 1. Dovecot is run in a single computer. 2. Dovecot is run in multiple computers, users are redirected more or less randomly to different computers. 3. Dovecot is run in multiple computers, each user is assigned a specific computer which is used whenever possible. *The only way to reliably implement the 2nd setup is with the [Director.txt] service.* Dovecot configuration --------------------- Single-server setup or [Director.txt] setup: ---%<------------------------------------------------------------------------- mmap_disable = yes dotlock_use_excl = no # only needed with NFSv2, NFSv3+ supports O_EXCL and it's faster mail_fsync = always mail_nfs_storage = no mail_nfs_index = no ---%<------------------------------------------------------------------------- Multi-server setup that *tries* to flush NFS caches (increases NFS operations, and *isn't fully reliable*): ---%<------------------------------------------------------------------------- mmap_disable = yes dotlock_use_excl = no # only needed with NFSv2, NFSv3+ supports O_EXCL and it's faster mail_fsync = always mail_nfs_storage = yes mail_nfs_index = yes ---%<------------------------------------------------------------------------- Common issues ------------- Clock synchronization --------------------- Run ntpd in the NFS server and all the NFS clients to make sure their clocks are synchronized. If the clocks are more than one second apart from each others and multiple computers access the same mailbox simultaneously, you may get errors. NFS caching problems -------------------- NFS caching is a big problem when multiple computers are accessing the same mailbox simultaneously. The best fix for this is to prevent it from happening. Configure your setup so that a user always gets redirected to the same server (unless it's down). This also means that mail deliveries must be done by the same server, or alternatively it shouldn't update index files. Dovecot flushes NFS caches when needed if you set 'mail_nfs_storage=yes', but unfortunately this doesn't work 100%, so you can get random errors. Disabling NFS attribute cache helps a lot in getting rid of caching related errors, but this makes the performance MUCH worse and increases the load on NFS server. This can usually be done by giving 'actimeo=0' or 'noac' mount option. Index files ----------- If you keep the index files stored on NFS, you'll need to set 'mmap_disable=yes'. If you're not running lockd you'll have to set 'lock_method=dotlock', but this degrades performance. Note that some NFS installations have problems with lockd. If you're beginning to get all kinds of locking related errors, try if the problems go away with dotlocking. With mbox/Maildir format it's also possible to store index files on local disk instead of on NFS. If the user gets redirected to different servers, the local indexes are automatically created/updated. If the user is (nearly) always redirected to the same server this should be fine and you would likely get higher performance than indexes stored on NFS, but if the server changes it can be slow to recreate the index/cache files. Single computer setup --------------------- This doesn't really differ from keeping mails stored locally. For better performance you should keep index files stored in a local disk. Random redirects to multiple servers ------------------------------------ You should avoid this setup whenever possible. Besides the NFS cache problems described above, mailbox contents can't be cached as well in the memory either. This is more problematic with mbox than with maildir, but in both cases if a client is redirected to a different server when reconnecting, the new server will have to read some data via the NFS into memory, while the original server might have had the data already cached. If you choose to use this setup, at the very least try to make connections from a single IP redirected into the same server. This avoids the biggest problems with clients that use multiple connections. Per-user redirects to multiple servers -------------------------------------- This method performs a lot better than random redirects. It maximizes the caching possibilities and prevents the problems caused by simultaneous mailbox access. New mail deliveries are often still handled by different computers. This isn't a problem with maildir as long as you're not using (i.e. dovecot-uidlist file or index files shouldn't get updated). It shouldn't be a problem with mboxes either as long as you're using fcntl locking. This problem can be fully solved by using LMTP protocol to deliver the mails to the correct server (possibly using Dovecot's LMTP proxy). NFS clients =========== Here's a list of kernels that have been tried as NFS clients: * FreeBSD has a caching bug [http://www.freebsd.org/cgi/query-pr.cgi?pr=123755] which causes problems when mailbox is being accessed from different computers at the same time * Linux 2.6.16: 'utime()' is buggy, fix in here [http://client.linux-nfs.org/Linux-2.6.x/2.6.16/linux-2.6.16-007-fix_setattr_clobber.dif]. With the fix applied, utime() seems to work perfectly. High-volume systems may experience VFS lock sync issues and for these the complete patchset at http://www.linux-nfs.org/Linux-2.6.x/2.6.16/linux-2.6.16-NFS_ALL.dif is suggested and appears to work well in production. * Linux 2.6.18: Seems to have intermittent caching issues. The same .config with 2.6.20.1 has been tested and appears to work well. * Linux 2.4.8: Has caching problems, don't know if they can be solved * Solaris: If it's completely broken, see http://dovecot.org/list/dovecot/2006-December/018145.html * The Connectathon test suite is very useful to verify a healthy NFS setup, see http://www.connectathon.org/nfstests.html Misc notes ========== * readdirplus isn't really needed by Dovecot and it can slow down some NFS servers. Use "nordirplus" mount option to disable it. * Dovecot doesn't care about root_squash setting, all the root-owned files are in /var/run typically which is not in NFS * In an environment using Debian (2.6.18) clients with Isilon NFS cluster nodes - the following mount options were found to be the most successful:'rsize=32768,wsize=32768,hard,fg,lock,nfsvers=3,tcp,retrans=0,actimeo=0 0 0' * As explained above, actimeo=0 will make the performance bad. With v1.1 use mail_nfs_* settings instead. * To learn more about NFS caching and other issues, mostly from a programmer's point of view, see NFS Coding HOWTO [http://iki.fi/tss/nfs-coding-howto.html] (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Clients.txt0000644000175000017500000001251712244263643013776 00000000000000Client issues and configuration =============================== It seems to be quite difficult to implement a working IMAP client. Best Practices for Implementing an IMAP Client [http://www.imapwiki.org/ClientImplementation] tries to help you with it. Contents 1. Client issues and configuration 1. Apple Mail.app 2. Outlook 3. Outlook Express 6 4. Netscape Mail 5. Evolution 6. Mulberry 7. Thunderbird 8. Mutt 9. Pine 10. SquirrelMail 11. Horde IMP 12. RoundCube Webmail 13. @Mail Webmail Apple Mail.app -------------- On Mac OS X Leopard 10.5 Mail.app appears to support subscribe/unsubscribe by right clicking on a mailbox, selecting 'Get Account Info' and selecting 'Subscription List' from tabs. This however doesn't really work with any IMAP server. Apple Mail 3.6 (that comes with OS X 10.5 Leopard) supports subscribing/unsubscribing to folders in the public namespace. Outlook ------- * You should enable 'outlook-no-nuls' workaround with POP3. * Outlook 2003 has problems with older Dovecot's default POP3 UIDL format, which causes it to download the same mails over and over again if "leave mails to server" option is enabled. See 'pop3_uidl_format' setting. * Outlook might not hide or purge deleted items by default. Microsoft has a how-to that shows how to fix this [http://office.microsoft.com/en-us/outlook/HP100804201033.aspx] (Outlook 2007, not Outlook 2003). * If some Outlook users don't see new or sent mails in the appropriate folders after a migration from UW IMAPd even if they are visible in other clients (e.g. Roundcube, Thunderbird, or on the disk itself), and you get the error message "BAD Error in IMAP command UID: Invalid UID messageset" in the log or rawlog: It helps to remove the problematic IMAP account completely from Outlook and recreating it again there. It speaks a different IMAP afterwards, so there are reasons to believe it caches the details of some server on the first connect and doesn't refresh them even if you change the server's hostname in the account settings. Outlook Express 6 ----------------- * Using "Headers only" synchronization is buggy and can cause "Message is no longer available on this server" error when opening a mail. This isn't Dovecot specific problem, and I'm not aware of any possible workarounds at the moment for this in server side. * You should enable 'delay-newmail' workarounds for IMAP. * You should enable 'outlook-no-nuls' and 'oe-ns-eoh' workarounds for POP3. Netscape Mail ------------- I'm not actually sure what version exactly this refers to. * You should enable 'oe-ns-eoh' workaround for POP3. Evolution --------- * Some versions don't support creating subfolders with mbox format. Evolution in Ubuntu Gutsy, 2.12.0-0ubuntu5, does support creating subfolders, at least when the parent folder is empty. Mulberry -------- Seems to be OK. Thunderbird ----------- * If you're using [MailboxFormat.mbox.txt], [MailboxFormat.dbox.txt] or [MailLocation.Maildir.txt] with ':LAYOUT=fs' , * You should enable 'tb-extra-mailbox-sep' workaround for IMAP. Bug report [https://bugzilla.mozilla.org/show_bug.cgi?id=29926]. * If you're using [MailboxFormat.mbox.txt]: * If you are not using a technique to allow folders that contain both sub-folders and messages (e.g. see ) then you will have to disable "Server supports folders that contain sub-folders and messages" setting from Thunderbird.Enhancement request [https://bugzilla.mozilla.org/show_bug.cgi?id=284933]. * Versions of Thunderbird from at least 17 (possibly earlier) up to 24.0 display incorrect new mail counts in the New Mail notification box. This is due to a bug in Thunderbird's handling of the CONDSTORE extension. See Bug Report [https://bugzilla.mozilla.org/show_bug.cgi?id=885220] for details and a client-side workaround. Mutt ---- * New mutt versions supporting IDLE command will hang with Dovecot versions earlier than v1.0beta3. Upgrade Dovecot or disable IDLE by setting imap_idle=no in .muttrc. * [mutt.txt] Pine ---- Seems to be OK. SquirrelMail ------------ * Configuration asks IMAP server name for some workarounds. There has been a Dovecot option since 1.4.6 and 1.5.1. For olderSquirrelMail versions, select the "other" option and remove the default INBOX-prefix. Horde IMP --------- Dovecot namespace detection works automatically with any recent version of IMP (4.1+). Quota support is now integrated into the 'imap' driver (as of horde-groupware V1.2), an example config of /imp/config/servers.php is: ---%<------------------------------------------------------------------------- $servers['imap'] = array( 'name' => 'IMAP Server', 'server' => 'localhost', 'hordeauth' => false, 'protocol' => 'imap/notls', 'port' => 143, 'quota' => array('driver'=>'imap'), ); ---%<------------------------------------------------------------------------- RoundCube Webmail ----------------- Works fine. @Mail Webmail ------------- Uses the namespace returned via Dovecot, full support via IMAP/POP3 using @Mail [http://atmail.com/]. Can also read mailbox quota via the getquotaroot IMAP command. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.txt0000644000175000017500000001506212244263655015625 00000000000000Password Databases ================== Dovecot authenticates users against password databases. It can also be used to configure things like [PasswordDatabase.ExtraFields.Proxy.txt]. You can use multiple databases, so if the password doesn't match in the first database, Dovecot checks the next one. This can be useful if you want to easily support having both virtual users and also local system users (see ). Success/failure databases ------------------------- These databases simply verify if the given password is correct for the user. Dovecot doesn't get the correct password from the database, it only gets a "success" or a "failure" reply. This means that these databases can't be used with non-plaintext [Authentication.Mechanisms.txt]. Databases that belong to this category are: * [PasswordDatabase.PAM.txt]: Pluggable Authentication Modules. * [PasswordDatabase.BSDAuth.txt]: BSD authentication. * [AuthDatabase.CheckPassword.txt]: External checkpassword program without Dovecot extensions. * [PasswordDatabase.IMAP.txt]: Authenticate against remote IMAP server. Lookup databases ---------------- Dovecot does a lookup based on the username and possibly other information (e.g. IP address) and verifies the password validity itself. Fields that the lookup can return: * [Authentication.PasswordSchemes.txt]: User's password. * password_noscheme: Like "password", but if a password begins with "{", assume it belongs to the password itself instead of treating it as a [Authentication.PasswordSchemes.txt] prefix. This is usually needed only if you use plaintext passwords. * [PasswordDatabase.ExtraFields.User.txt]: Returning a user field can be used to change the username. Typically used only for case changes (e.g. "UseR" -> "user"). * username: Like user, but doesn't drop existing domain name (e.g. "username=foo" for "user@domain" gives "foo@domain"). * domain: Updates the domain part of the username. * Other special [PasswordDatabase.ExtraFields.txt]. Databases that support looking up only passwords, but no user or extra fields: * [AuthDatabase.Passwd.txt]: System users (NSS, '/etc/passwd', or similiar). * [PasswordDatabase.Shadow.txt]: Shadow passwords for system users (NSS,'/etc/shadow' or similiar). * Dovecot supports reading all [Authentication.PasswordSchemes.txt] from passwd and shadow databases (if prefix is specified), but that is of course incompatible with all other tools using/modifying the passwords. * [AuthDatabase.VPopMail.txt]: External software used to handle virtual domains. Databases that support looking up everything: * [AuthDatabase.PasswdFile.txt]: '/etc/passwd'-like file in specified location. * [AuthDatabase.LDAP.txt]: Lightweight Directory Access Protocol. * [AuthDatabase.SQL.txt]: SQL database (PostgreSQL, MySQL, SQLite). * [AuthDatabase.Dict.txt]: Dict key-value database (Redis, memcached, etc.) * [AuthDatabase.CheckPassword.txt]: External checkpassword program when used with Dovecot extensions. * [PasswordDatabase.Static.txt]: Static passdb for simple configurations Passdb settings --------------- An example passdb passwd-file with its default settings: ---%<------------------------------------------------------------------------- passdb { driver = passwd-file args = scheme=ssha256 /usr/local/etc/passwd.replica default_fields = override_fields = deny = no master = no pass = no skip = never result_failure = continue result_internalfail = continue result_success = return-ok } ---%<------------------------------------------------------------------------- First we have the settings that provide content for the passdb lookup: * driver: The passdb backend name * args: Arguments for the passdb backend. The format of this value depends on the passdb driver. Each one uses different args. * default_fields: Passdb fields (and [PasswordDatabase.ExtraFields.txt]) that are used, unless overwritten by the passdb backend. They are in format 'key=value key2=value2 ...'. The values can contain <%variables> [Variables.txt]. * override_fields: Same as default_fields, but instead of providing the default values, these values override what the passdb backend returned. Then we have the settings which specify when the passdb is used: * deny: If "yes", used to provide "denied users database". If the user is found from the passdb, the authentication will fail. * master: If "yes", used to provide [Authentication.MasterUsers.txt]. The users listed in the master passdb can log in as other users. * pass: This is an alias for 'result_success = continue' as described below. This was commonly used together with master passdb to specify that even after a successful master user authentication, the authentication should continue to the actual non-master passdb to lookup the user. * skip: Do we sometimes want to skip over this passdb? * never * authenticated: Skip if an earlier passdb already authenticated the user successfully. * unauthenticated: Skip if user hasn't yet been successfully authenticated by the previous passdbs. And finally we can control what happens when we're finished with this passdb: * result_success: What to do if the authentication succeeded (default: return-ok) * result_failure: What to do if authentication failed (default: continue) * result_internalfail: What to do if the passdb lookup had an internal failure (default: continue). If any of the passdbs had an internal failure and the final passdb also returns "continue", the authentication will fail with "internal error". The result values that can be used: * return-ok: Return success, don't continue to the next passdb. * return-fail: Return failure, don't continue to the next passdb. * return: Return earlier passdb's success or failure, don't continue to the next passdb. If this was the first passdb, return failure. * continue-ok: Set the current authentication state to success, and continue to the next passdb. * continue-fail: Set the current authentication state to failure, and continue to the next passdb. * continue: Continue to the next passdb without changing the authentication state. The initial state is failure. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailboxFormat.mbox.txt0000644000175000017500000003160012244263650016075 00000000000000Mbox Mailbox Format =================== Contents 1. Mbox Mailbox Format 1. Locking 1. Dotlock 2. Deadlocks 2. Directory Structure 3. Dovecot's Metadata 4. Dovecot's Speed Optimizations 5. From Escaping 6. Mbox Variants 7. References Usually UNIX systems are configured by default to deliver mails to '/var/mail/username' or '/var/spool/mail/username' mboxes. In IMAP world these files are called INBOX mailboxes. IMAP protocol supports multiple mailboxes however, so there needs to be a place for them as well. Typically they're stored in '~/mail/' or '~/Mail/' directories. The mbox file contains all the messages of a single mailbox. Because of this, the mbox format is typically thought of as a slow format. However with Dovecot's indexing this isn't true. Only expunging messages from the beginning of a large mbox file is slow with Dovecot, most other operations should be fast. Also because all the mails are in a single file, searching is much faster than with maildir. Modifications to mbox may require moving data around within the file, so interruptions (eg. power failures) can cause the mbox to break more or less badly. Although Dovecot tries to minimize the damage by moving the data in a way that data should never get lost (only duplicated), mboxes still aren't recommended to be used for important data. Locking ------- Locking is a mess with mboxes. There are multiple different ways to lock a mbox, and software often uses incompatible locking. See for how to check what locking methods some commonly used programs use. There are at least four different ways to lock a mbox: * *dotlock*: 'mailboxname.lock' file created by almost all software when writing to mboxes. This grants the writer an exclusive lock over the mbox, so it's usually not used while reading the mbox so that other processes can also read it at the same time. So while using a dotlock typically prevents actual mailbox corruption, it doesn't protect against read errors if mailbox is modified while a process is reading. * *flock*: 'flock()' system call is quite commonly used for both read and write locking. The read lock allows multiple processes to obtain a read lock for the mbox, so it works well for reading as well. The one downside to it is that it doesn't work if mailboxes are stored in NFS. * *fcntl*: Very similar to *flock*, also commonly used by software. In some systems this 'fcntl()' system call is compatible with 'flock()', but in other systems it's not, so you shouldn't rely on it.*fcntl* works with NFS if you're using lockd daemon in both NFS server and client. * *lockf*: POSIX 'lockf()' locking. Because it allows creating only exclusive locks, it's somewhat useless so Dovecot doesn't support it. With Linux 'lockf()' is internally compatible with 'fcntl()' locks, but again you shouldn't rely on this. Dotlock ------- Another problem with dotlocks is that if the mailboxes exist in '/var/mail/', the user may not have write access to the directory, so the dotlock file can't be created. There are a couple of ways to work around this: * Give a mail group write access to the directory and then make sure that all software requiring access to the directory runs with the group's privileges. This may mean making the binary itself setgid-mail, or using a separate dotlock helper program which is setgid-mail. With Dovecot this can be done by setting 'mail_privileged_group = mail'. * Set sticky bit to the directory ('chmod +t /var/mail'). This makes it somewhat safe to use, because users can't delete each others mailboxes, but they can still create new files (the dotlock files). The downside to this is that users can create whatever files they wish in there, such as a mbox for newly created user who hadn't yet received mail. Deadlocks --------- If multiple lock methods are used, which is usually the case since dotlocks aren't typically used for read locking, the order in which the locking is done is important. Consider if two programs were running at the same time, both use dotlock and fcntl locking but in different order: * Program A: fcntl locks the mbox * Program B at the same time: dotlocks the mbox * Program A continues: tries to dotlock the mbox, but since it's already dotlocked by B, it starts waiting * Program B continues: tries to fcntl lock the mbox, but since it's already fcntl locked by A, it starts waiting Now both of them are waiting for each others locks. Finally after a couple of minutes they time out and fail the operation. Directory Structure ------------------- By default, when listing mailboxes, Dovecot simply assumes that all files it sees are mboxes and all directories mean that they contain sub-mailboxes. There are two special cases however which aren't listed: * '.subscriptions' file contains IMAP's mailbox subscriptions. * '.imap/' directory contains Dovecot's index files. Because it's not possible to have a file which is also a directory, it's not normally possible to create a mailbox and child mailboxes under it. However if you really want to be able to have mailboxes containing both messages and child mailboxes under mbox, then Dovecot can be configured to do this, subject to certain provisos; see . Dovecot's Metadata ------------------ Dovecot uses C-Client (ie. UW-IMAP, Pine) compatible headers in mbox messages to store metadata. These headers are: * X-IMAPbase: Contains UIDVALIDITY, last used UID and list of used keywords * X-IMAP: Same as X-IMAPbase but also specifies that the message is a "pseudo message" * X-UID: Message's allocated UID * Status: R (\Seen) and O (non-\Recent) flags * X-Status: A (\Answered), F (\Flagged), T (\Draft) and D (\Deleted) flags * X-Keywords: Message's keywords * Content-Length: Length of the message body in bytes Whenever any of these headers exist, Dovecot treats them as its own private metadata. It does sanity checks for them, so the headers may also be modified or removed completely. None of these headers are sent to IMAP/POP3 clients when they read the mail. *The MTA, MDA or LDA should strip all these headers _case-insensitively_ before writing the mail to the mbox.* Only the first message contains the X-IMAP or X-IMAPbase header. The difference is that when all the messages are deleted from mbox file, a "pseudo message" is written to the mbox which contains X-IMAP header. This is the "DON'T DELETE THIS MESSAGE -- FOLDER INTERNAL DATA" message which you hate seeing when using non-C-client and non-Dovecot software. This is however important to prevent abuse, otherwise the first mail which is received could contain faked X-IMAPbase header which could cause trouble. If message contains X-Keywords header, it contains a space-separated list of keywords for the mail. Since the same header can come from the mail's sender, only the keywords are listed in X-IMAP header are used. The UID for a new message is calculated from "last used UID" in X-IMAP header + 1. This is done always, so fake X-UID headers don't really matter. This is also why the pseudo message is important. Otherwise the UIDs could easily grow over 2^31 which some clients start treating as negative numbers, which then cause all kinds of problems. Also when 2^32 is exceeded, Dovecot will also start having some problems. Content-Length is used as long as another valid mail starts after that many bytes. Because the byte count must be exact, it's quite unlikely that abusing it can cause messages to be skipped (or rather appended to the previous message's body). Status and X-Status headers are trusted completely, so it's pretty good idea to filter them in LDA if possible. Dovecot's Speed Optimizations ----------------------------- Updating messages' flags and keywords can be a slow operation since you may have to insert a new header (Status, X-Status, X-Keywords) or at least insert data in the header's value. Some mbox MUAs do this simply by rewriting all of the mbox after the inserted data. If the mbox is large, this can be very slow. Dovecot optimizes this by always leaving some space characters after some of its internal headers. It can use this space to move only minimal amount of data necessary to get the necessary data inserted. Also if data is removed, it just grows these spaces areas. 'mbox_lazy_writes' setting works by adding and/or updating Dovecot's metadata headers only after closing the mailbox or when messages are expunged from the mailbox. C-Client works the same way. The upside of this is that it reduces writes because multiple flag updates to same message can be grouped, and sometimes the writes don't have to be done at all if the whole message is expunged. The downside is that other processes don't notice the changes immediately (but other Dovecot processes do notice because the changes are in index files). 'mbox_dirty_syncs' setting tries to avoid re-reading the mbox every time something changes. Whenever the mbox changes (ie. timestamp or size), it first checks if the mailbox's size changed. If it didn't, it most likely meant that only message flags were changed so it does a full mbox read to find it. If the mailbox shrunk, it means that mails were expunged and again Dovecot does a full sync. Usually however the only thing besides Dovecot that modifies the mbox is the LDA which appends new mails to the mbox. So if the mbox size was grown, Dovecot first checks if the last known message is still where it was last time. If it is, Dovecot reads only the newly added messages and goes into a "dirty mode". As long as Dovecot is in dirty mode, it can't be certain that mails are where it expects them to be, so whenever accessing some mail, it first verifies that it really is the correct mail by finding its X-UID header. If the X-UID header is different, it fallbacks to a full sync to find the mail's correct position. The dirty mode goes away after a full sync. If 'mbox_lazy_writes' was enabled and the mail didn't yet have X-UID header, Dovecot uses MD5 sum of a couple of headers to compare the mails. 'mbox_very_dirty_syncs' does the same as 'mbox_dirty_syncs', but the dirty state is kept also when opening the mailbox. Normally opening the mailbox does a full sync if it had been changed outside Dovecot. From Escaping ------------- In mboxes a new mail always begins with a "From " line, commonly referred to as From_-line. To avoid confusion, lines beginning with "From " in message bodies are usually prefixed with '>' character while the message is being written to in mbox. Dovecot doesn't currently do this escaping however. Instead it prevents this confusion by adding Content-Length headers so it knows later where the next message begins. Dovecot doesn't either remove the '>' characters before sending the data to clients. Both of these will probably be implemented later. Mbox Variants ------------- There are a few minor variants of this format: *mboxo* is the name of original mbox format originated with Unix System V. Messages are stored in a single file, with each message beginning with a line containing "From SENDER DATE". If "From " (case-sensitive, with the space) occurs at the beginning of a line anywhere in the email, it is escaped with a greater-than sign (to ">From "). Lines already quoted as such, for example ">From " or ">>>From " are *not* quoted again, which leads to irrecoverable corruption of the message content. *mboxrd* was named for Raul Dhesi in June 1995, though several people came up with the same idea around the same time. An issue with the mboxo format was that if the text ">From " appeared in the body of an email (such as from a reply quote), it was not possible to distinguish this from the mailbox format's quoted ">From ". mboxrd fixes this by always quoting already quoted "From " lines (e.g. ">From ", ">>From ", ">>>From ", etc.) as well, so readers can just remove the first ">" character. This format is used by qmail and getmail (>=4.35.0). *mboxcl* format was originated with Unix System V Release 4 mail tools. It adds a Content-Length field which indicates the number of bytes in the message. This is used to determine message boundaries. It still quotes "From " as the original mboxo format does (and *not* as mboxrd does it). *mboxcl2* is like mboxcl but does away with the "From " quoting. *MMDF* (Multi-channel Memorandum Distribution Facility mailbox format) was originated with the MMDF daemon. The format surrounds each message with lines containing four control-A's. This eliminates the need to escape From: lines. Dovecot currently uses mboxcl2 format internally, but it's planned to move to combination of mboxrd and mboxcl. References ---------- * Wikipedia [http://en.wikipedia.org/wiki/Mbox] * Qmail mbox [http://www.qmail.org/man/man5/mbox.html] * Mbox family [http://homepages.tesco.net/~J.deBoynePollard/FGA/mail-mbox-formats.html] * CommuniGatePro mbox [http://www.communigate.com/CommuniGatePro/Mailboxes.html#mbox] (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Quota.Maildir.txt0000644000175000017500000000447312244263660015047 00000000000000Maildir++ quota =============== Maildir++ is the most commonly used quota backend with Maildir format. Note that *Maildir++ quota works only with Maildir format*. With other mailbox formats you should use [Quota.Dict.txt]. Dovecot implements the standard Maildir++ specification [http://www.inter7.com/courierimap/README.maildirquota.html], so it's compatible with Courier [http://www.courier-mta.org/], maildrop [http://www.courier-mta.org/maildrop/], Exim [http://www.exim.org], etc. There are two ways to configure Maildir++ quota limits: 1. Configure the limits in Dovecot. You most likely want to do this. See [Quota.txt] for how to do this configuration. 2. Make Dovecot get the limits from existing 'maildirsize' files. Only Maildir++-specific settings are described below. See for more generic configuration. Maildir++ quota relies on 'maildirsize' file having correct information, so if your users can modify the file in some way (e.g. shell access), you're relying on the goodwill of your users for the quota to work. You can't rely on Dovecot noticing external changes to Maildir and updating maildirsize accordingly. This happens eventually when quota is being recalculated, but it may take a while. Quota recalculation also currently doesn't trigger quota warning executions. Maildirsize file ---------------- The 'maildirsize' file in the Maildir root directory contains both the quota limit information and the current quota status. It contains a header in format: ---%<------------------------------------------------------------------------- S,C ---%<------------------------------------------------------------------------- If you don't configure any quota limits in Dovecot ('quota=maildir' with no other settings), Dovecot takes the limits from the header. If the file does not exist, quota isn't enforced. If you configure quota limits in Dovecot, Dovecot makes sure that this header is kept up to date. If the file does not exist, it's simply rebuilt. Once the 'maildirsize' reaches 5120 bytes, the quota is recalculated and the file is recreated. This makes sure that if quota happens to be broken (e.g. externally deleted files) it won't stay that way forever. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.FTS.Solr.txt0000644000175000017500000001526512244263657015377 00000000000000Solr Full Text Search Indexing ============================== Solr [http://lucene.apache.org/solr/] is a Lucene indexing server. Dovecot communicates to it using HTTP/XML queries. Compiling --------- Give '--with-solr' parameter to 'configure' script. You'll also need to have libexpat and libcurl installed (if packages do not exist dovecot will compile without solr support).YUM: expat-devel, curl-devel Configuration ------------- Replace Solr's existing 'solr/conf/schema.xml' using 'doc/solr-schema.xml' from Dovecot. You may want to check if the file contains something you want to modify. See Solr wiki [http://wiki.apache.org/solr/SchemaXml] for how to configure it. On Dovecot's side add: Into 10-mail.conf (note add existing plugins to sting) ---%<------------------------------------------------------------------------- mail_plugins = fts fts_solr ---%<------------------------------------------------------------------------- Into 90-plugins.conf ---%<------------------------------------------------------------------------- plugin { fts = solr fts_solr = url=http://solr.example.org:8983/solr/ } ---%<------------------------------------------------------------------------- Fields listed in 'fts_solr' plugin setting are space separated. They can contain: * url= : Required base URL for Solr. * debug : Enable HTTP debugging. Writes to error log. * break-imap-search : Use Solr also for indexing TEXT and BODY searches. This makes your server non-IMAP-compliant. (This is always enabled in v2.1+) Important notes: * Some mail clients will not submit any search requests for certian fields if they index things localy eg. Thunderbird will not send any requests for fields such as sender/recipients/subject when Body is not included as this data is contained within the local index. Solr commits & optimization --------------------------- Solr indexes should be optimized once in a while to make searches faster and to remove space used by deleted mails. Dovecot never asks Solr to optimize, so you should do this yourself. Perhaps a cronjob that sends the optimize-command to Solr every n hours. With v2.2.3+ Dovecot only does soft commits to the Solr index to improve performance. You must run a hard commit once in a while or Solr will keep increasing its transaction log sizes. For example send the commit command to Solr every few minutes. ---%<------------------------------------------------------------------------- # Optimize should be run somewhat rarely, e.g. once a day curl http://:/solr/update?optimize=true # Commit should be run pretty often, e.g. every minute curl http://:/solr/update?commit=true ---%<------------------------------------------------------------------------- Re-index mailbox ---------------- If you require to force dovecot to reindex a whole mailbox you can run the command shown, this will only take action when a search is done and will apply to the whole mailbox. ---%<------------------------------------------------------------------------- doveadm fts rescan -u ---%<------------------------------------------------------------------------- If you want to index a single mailbox/all mailboxes you can run the command shown, this will happen immediately and will blocking until action is completed. ---%<------------------------------------------------------------------------- doveadm index [-u |-A] [-S ] [-q] [-n ] ---%<------------------------------------------------------------------------- Sorting by relevancy -------------------- Solr/Lucene supports returning a relevancy score for search results. If you want to sort the search results by the score, use Dovecot's non-standard X-SCORE sort key: ---%<------------------------------------------------------------------------- 1 SORT (X-SCORE) UTF-8 ---%<------------------------------------------------------------------------- Indexes ------- Dovecot creates the following fields: * id: Unique ID consisting of uid/uidv/user/box. * Note that your user names really shouldn't contain '/' character. * uid: Message's IMAP UID. * uidv: Mailbox's UIDVALIDITY. This changes if mailbox gets recreated. * box: Mailbox name * user: User name who owns the mailbox (indexing shared/public mailboxes is probably broken currently) * hdr: Indexed message headers * body: Indexed message body * any: "Copy field" from hdr and body, i.e. searching based on this will search from both headers and bodies. Lucene does duplicate suppression based on the "id" field, so even if Dovecot sends the same message multiple times to Solr it gets indexed only once. This might happen currently if multiple searches are started at the same time. You might want to build a cronjob to go through the Lucene indexes once in a while to delete indexed messages (or entire mailboxes) that no longer exist on the filesystem. It shouldn't normally find any such messages though. Testing ------- ---%<------------------------------------------------------------------------- # telnet localhost imap * OK [CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE SORT SORT=DISPLAY THREAD=REFERENCES THREAD=REFS MULTIAPPEND UNSELECT CHILDREN NAMESPACE UIDPLUS LIST-EXTENDED I18NLEVEL=1 ESEARCH ESORT SEARCHRES WITHIN CONTEXT=SEARCH LIST-STATUS STARTTLS AUTH=PLAIN AUTH=LOGIN] I am ready. 1 login username password 2 select Inbox 3 SEARCH text "test" ---%<------------------------------------------------------------------------- Sharding -------- If you have more users than fit into a single Solr box, you can split users off to different servers. A couple of different ways you could do it are: * Have some HTTP proxy redirecting the connections based on the URL * Configure Dovecot's userdb lookup to return a different host for 'fts_solr' setting using [UserDatabase.ExtraFields.txt]. * LDAP: 'user_attrs = ..., solrHost=fts_solr=url=http://%$:8983/solr/' * MySQL: 'user_query = SELECT concat('url=http://', solr_host, ':8983/solr/') AS fts_solr, ...' External Tutorials ------------------ External sites with tutorials on using Solr under Dovecot * Installing Apache Solr with Dovecot for fulltext search results [http://atmail.com/kb/2009/installing-apache-solr-with-dovecot-for-fulltext-search-results/] - Guide for installing all the dependencies for Solr to work under CentOS/Fedora. Step by step tutorial. * Installing Apache Solr with Dovecot for fulltext search results (ATmail support guide) [http://support.atmail.com/display/AKB/Installing+Apache+Solr+with+Dovecot+for+fulltext+search+results] (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MboxChildFolders.txt0000644000175000017500000002230712244263650015561 00000000000000Mail folders containing both messages and sub-folders ----------------------------------------------------- Under mbox, it is not normally possible to have a mail folder which contains both messages and sub-folders. This is because there would be a filesystem name collision between the name of the mbox file containing the messages and the name of the directory containing the sub-folders, for example: * Mail folder "foo" containing messages would be stored in a file at '~/mail/foo'. * Mail folder "foo/bar" containing messages would be stored in a file at '~/mail/foo/bar', but this cannot happen because this relies on the existence of a directory '~/mail/foo/' which can't exist because there is already a file with that name. If however there is a requirement to be able to have a mail folder which contains both messages and sub-folders, then there are a couple of ways to do it: 1. Maildir++ layout 2. Messages in named file These approaches are described in more detail below. Maildir++ layout ---------------- Under mbox, Dovecot normally stores mail folders in "filesystem" layout. In this layout, mail folders are stored in mbox files (potentially under subdirectories) with the same relative path as the mail folder path, for example: * '~/mail/foo' - mbox file containing mail for mail folder "foo"; can not create any mail sub-folders of "foo" * '~/mail/bar/baz' - mbox file containing mail for mail folder "bar/baz"; can not create any mail sub-folders of "bar/baz" * ('~/mail/inbox' - mbox file containing mail for INBOX) However, Dovecot can be configured to keep mbox mail in a Maildir++-like layout. This makes Dovecot keep mail in mbox files where all the mailbox folder naming levels are separated with dots (with a leading dot), for example: * '~/mail/.foo' - mbox file containing mail for mail folder "foo" * '~/mail/.foo.bar' - mbox file containing mail for mail folder "foo/bar". We can now do this. * '~/mail/.bar.baz' - mbox file containing mail for mail folder "bar/baz" * ('~/mail/inbox' - mbox file containing mail for INBOX) This can be enabled by adding ':LAYOUT=maildir++' to the mail location, for example: ---%<------------------------------------------------------------------------- # Incomplete example. Do not use! mail_location = mbox:~/mail:LAYOUT=maildir++ ---%<------------------------------------------------------------------------- However there is a problem. Under mbox, setting 'LAYOUT=maildir++' alone leaves Dovecot unable to place index files, which would likely result in performance issues. So when using 'LAYOUT=maildir++' with mbox, it is advisable to also configure 'INDEX'. Now, mail files (other than 'inbox') all have names beginning with a dot, so if we like we can store other things in the '~/mail' directory by using names which do not begin with a dot. So we could think to use 'INDEX' to indexes at '~/mail/index/', for example: ---%<------------------------------------------------------------------------- # Incomplete example. Do not use! mail_location = mbox:~/mail:LAYOUT=maildir++:INDEX=~/mail/index ---%<------------------------------------------------------------------------- If we do this, then indexes will be kept at '~/mail/index/' and this will not clash with any names used for mail folders. There is one more thing we may want to consider though. By default Dovecot will maintain a list of subscribed folders in a file '.subscriptions' under the mail location root. In this case that means it would end up at '~/mail/.subscriptions'. This would then mean that it would be impossible to create a mail folder called "subscriptions". We can get around this by using the 'CONTROL' parameter to move the '.subscriptions' file somewhere else, for example into the directory '~/mail/control' (again choosing a name which doesn't begin with a dot so we don't collide with the names of mbox files storing mail folders). That gives us: ---%<------------------------------------------------------------------------- # Trick mbox configuration which allows a mail folder which contains both # messages and sub-folders mail_location = mbox:~/mail:LAYOUT=maildir++:INDEX=~/mail/index:CONTROL=~/mail/control ---%<------------------------------------------------------------------------- This then allows mail folders which contains both messages and sub-folders without possibility of naming collisions between mail folders and other data. There is one further wrinkle. Specifying ':LAYOUT=maildir++' for mbox changes the default hierarchy separator from a slash to a dot. This should not be a problem for IMAP clients as the hierarchy separator is exposed through IMAP. However anything which expects to just "know" that the hierarchy separator is a slash may get confused. This can be worked around by using a [Namespaces.txt] to set the folder separator back to a slash again. Messages in named file ---------------------- Under mbox, Dovecot normally stores mail folders in "filesystem" layout. In this layout, mail folders are stored in mbox files (potentially under subdirectories) with the same relative path as the mail folder path, for example: * '~/mail/foo' - mbox file containing mail for mail folder "foo"; can not create any mail sub-folders of "foo" * '~/mail/bar/baz' - mbox file containing mail for mail folder "bar/baz"; can not create any mail sub-folders of "bar/baz" * ('~/mail/inbox' - mbox file containing mail for INBOX) In the example above, we can't create any sub-folders of "foo" because there is a file 'foo' in the way. So we could think to get rid of that file and put a directory there instead. But if we do that then we need somewhere to put the messages for folder "foo". We could think to put them in a specially-named file in the directory 'foo/'. Then if we wanted to create a sub-folder of "foo" we would be fine because we could then do that. The rule would then be that messages go into the specially-named file in the directory corresponding to the mail folder name. We want want to choose a special name which would be unlikely to collide with a folder name. We could think to use something like 'mBoX-MeSsAgEs'. Now, it turns out that you can configure Dovecot to do this using the 'DIRNAME' parameter. For example, using a configuration of: ---%<------------------------------------------------------------------------- # Incomplete example. Do not use! mail_location = mbox:~/mail:DIRNAME=mBoX-MeSsAgEs ---%<------------------------------------------------------------------------- we would get a layout like this: * '~/mail/inbox' - mbox file containing mail for INBOX * '~/mail/foo/mBoX-MeSsAgEs' - mbox file containing mail for mail folder "foo" * '~/mail/foo/bar/mBoX-MeSsAgEs' - mbox file containing mail for mail folder "foo/bar" However there is a problem. Under mbox, setting 'DIRNAME' alone leaves Dovecot unable to place index files, which would likely result in performance issues, or worse, if the index directory gets created first, this will obstruct the creation of the mbox file. So when using 'DIRNAME' with mbox, it is also necessary to configure 'INDEX'. The question then arises where to put index files. Any directory under the '~/mail' directory could be considered as a mail folder. We could think to use a name beginning with a dot, for example '~/mail/.index' but that would then mean that it would not be possible to create a mail folder called ".index"; unlikely, but it would be nice to have as few implementation-specific restrictions as possible. In addition, by default, Dovecot will create a file '.subscriptions' at the mail location root to hold a list of mailbox subscriptions. This would make it impossible to create a mail folder called ".subscriptions". But we can move the '.subscriptions' file to another directory by using the 'CONTROL' parameter. To get round these issues, we can add another directory layer which separates these purposes. For example, using the configuration: ---%<------------------------------------------------------------------------- # Trick mbox configuration which allows a mail folder which contains both # messages and sub-folders mail_location = mbox:~/mail/mailboxes:DIRNAME=mBoX-MeSsAgEs:INDEX=~/mail/index:CONTROL=~/mail/control ---%<------------------------------------------------------------------------- would result in the following layout: * '~/mail/mailboxes/foo/mBoX-MeSsAgEs' - mbox file containing messages for mail folder "foo" * '~/mail/mailboxes/foo/bar/mBoX-MeSsAgEs' - mbox file containing messages for mail folder "foo/bar" * '~/mail/mailboxes/inbox' - mbox file containing messages for INBOX * '~/mail/control/.subscriptions' - file containg list of subscribed mailboxes * '~/mail/index/INBOX/dovecot.index.*' - index files for INBOX * '~/mail/index/foo/dovecot.index.*' - index files for mail folder "foo" * '~/mail/index/foo/bar/dovecot.index.*' - index files for mail folder "foo/bar" * '~/mail/index/dovecot.mailbox.log' - other index files Restrictions on mail folder names are then minimised; we can't have mail folders with the names "mBoX-M ''eSsAgEs", "dovecot.index.*, or "dovecot.mailbox.log". Unlike the Maildir++ layout approach above, because we are still using "filesystem" layout, the hierarchy separator remains as a slash. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/UserDatabase.ExtraFields.txt0000644000175000017500000000737512244263664017162 00000000000000User database extra fields ========================== A user database lookup typically returns [UserDatabase.txt] fields. Other possibilities are: * *nice*: Set the mail process's priority to be the given value. * *chroot*: Chroot to given directory. Overrides 'mail_chroot' setting in 'dovecot.conf'. * *system_groups_user*: Specifies the username whose groups are read from '/etc/group' (or wherever NSS is configured to taken them from). The logged in user has access to those groups. This may be useful for shared mailboxes. * *userdb_import*: This allows returning multiple extra fields in one TAB-separated field. It's useful for userdbs which are a bit less flexible for returning a variable number of fields (e.g. SQL). * *uidgid_file*: Get uid and gid for user based on the given filename. * It's possible to override settings from 'dovecot.conf' (most commonly quota_rule to set per-user quota limits). * The extra fields are also passed to [PostLoginScripting.txt]. These fields can be returned the exact same way as uid, gid, home and mail fields. Below are examples for some user databases. Overriding settings ------------------- Most commonly settings are overridden from plugin section. For example if your plugin section has 'quota=maildir:storage=1024' value and the userdb lookup returns 'quota=maildir:storage=2048', the original quota setting gets overridden. In fact if the lookup always returns a quota field, there's no point in having the quota setting in plugin section at all, because it always gets overridden anyway. To understand how imap and pop3 processes see their settings, it may be helpful to know how Dovecot internally passes them: 1. First all actual settings are first read into memory. 2. Next if [PostLoginScripting.txt], it may modify the settings if wanted. 3. Last all the extra fields returned by userdb lookup are used to override the settings. Any unknown setting is placed into plugin {} section (e.g. foo=bar will be parsed as if it were plugin { foo=bar }). If you want to override settings inside sections, you can separate the section name and key with '/'. For example: ---%<------------------------------------------------------------------------- namespace default { inbox = yes } ---%<------------------------------------------------------------------------- The inbox setting can be overridden by returning 'namespace/default/inbox=yes' extra field. Examples -------- SQL --- dovecot-sql.conf: ---%<------------------------------------------------------------------------- user_query = SELECT home, uid, gid, \ '*:storage=100M' as quota_rule, mail_plugins \ FROM users WHERE userid = '%u' ---%<------------------------------------------------------------------------- LDAP ---- dovecot-ldap.conf: ---%<------------------------------------------------------------------------- user_attrs = homeDirectory=home,uidNumber=uid,gidNumber=gid,quotaDovecot=quota,mail_plugins ---%<------------------------------------------------------------------------- passwd-file ----------- Below are examples that show how to give two userdb extra fields ("mail" and "quota"). Note that all userdb extra fields must be prefixed with "userdb_", otherwise they're treated as [PasswordDatabase.ExtraFields.txt]. ---%<------------------------------------------------------------------------- user:{plain}pass:1000:1000::/home/user::userdb_mail=mbox:~/mail userdb_quota_rule=*:storage=100M user2:{plain}pass2:1001:1001::/home/user2::userdb_mail=maildir:~/Maildir userdb_quota_rule=*:storage=200M ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/Debugging.Authentication.txt0000644000175000017500000000645312244263643017250 00000000000000Debugging Authentication ======================== The most important thing to do is to set 'auth_debug=yes', and preferrably also 'auth_debug_passwords=yes'. After that you'll see in the logs exactly what dovecot-auth is doing, and that should help you to fix the problem. PLAIN SASL mechanism -------------------- With IMAP and POP3 it's easy to log in manually using the IMAP's LOGIN command or POP3's USER and PASS commands (see and for details), but with SMTP AUTH you'll need to use PLAIN authentication mechanism, which requires you to build a base64-encoded string in the correct format. The PLAIN authentication is also used internally by both IMAP and POP3 to authenticate to dovecot-auth, so you see it in the debug logs. The PLAIN mechanism's authentication format is: NUL NUL . Authorization ID is the username who you want to log in as, and authentication ID is the username whose password you're giving. If you're not planning on doing a [Authentication.MasterUsers.txt], you can either set both of these fields to the same username, or leave the authorization ID empty. Encoding with mmencode ---------------------- printf(1) and mmencode(1) should be available on most Unix or GNU/Linux systems. (If not, check with your distribution. GNU coreutils includes printf(1), and metamail includes mmencode(1). In Debian, mmencode is called mimencode(1).) ---%<------------------------------------------------------------------------- $ printf 'username\0username\0password' | mmencode dXNlcm5hbWUAdXNlcm5hbWUAcGFzc3dvcmQ= ---%<------------------------------------------------------------------------- This string is what a client would use to attempt PLAIN authentication as user "username" with password "password." With ''auth_debug_passwords=yes', it would appear in your logs. Decoding with mmencode ---------------------- You can use mmencode -u to interpret the encoded string pasted into stdin as follows: ---%<------------------------------------------------------------------------- # mmencode -u bXl1c2VybmFtZUBkb21haW4udGxkAG15dXNlcm5hbWVAZG9tYWluLnRsZABteXBhc3N3b3Jk myusername@domain.tldmyusername@domain.tldmypassword # ---%<------------------------------------------------------------------------- You should see the correct user address (twice) and password. The null bytes won't display. Encoding with Perl ------------------ Unfortunately, mmencode on FreeBSD chokes on "\0". As an alternate, if you have MIME::Base64 on your system, you can use a perl statement to do the same thing: ---%<------------------------------------------------------------------------- perl -MMIME::Base64 -e 'print encode_base64("myusername\@domain.tld\0myusername\@domain.tld\0mypassword");' ---%<------------------------------------------------------------------------- As mmencode -u doesn't encounter any "\0" you can still do: ---%<------------------------------------------------------------------------- perl -MMIME::Base64 -e 'print encode_base64("myusername\@domain.tld\0myusername\@domain.tld\0mypassword");' | mmencode -u ---%<------------------------------------------------------------------------- to check that you have encoded correctly. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.ManageSieve.Clients.txt0000644000175000017500000000606712244263656020260 00000000000000ManageSieve Client Issues ========================= Although this ManageSieve server should comply with the draft specification of theManageSieve protocol, quite a few clients don't. This page lists the known client problems. * *The TLS problem*: * The core of the TLS problem is that a ManageSieve server is required to send an unsolicited CAPABILITY response right after successful TLS negotiation. Older Cyrus servers did not do this and many clients incorporated this protocol error as the standard, meaning that these do not expect the CAPABILITY response and thus fail with subsequent commands. However, now that Cyrus' Timsieved has changed its behaviour towards protocol compliance, all those clients will follow eventually. The following clients are known to have this TLS issue: Thunderbird Sieve add-on: TLS broken for old versions. Starting with version 0.1.5 the Thunderbird Sieve add-on properly supports TLS. KMail + kio_sieve: TLS broken for old versions. This issue is fixed at least in kmail 1.9.9 / kde 3.5.9. SquirrelMail/AvelSieve: For some users the !Avelsieve client stores scripts but fails to retrieve them later. This problem is directly caused byAvelSieve's TLS support. A quick way to fix this is not to enable TLS forManageSieve. AvelSieve stable (v1.0.1) does not have TLS support at all, so you will see this happen only with the development or SVN versions. Another issue is that (at least with avelsieve-1.9.7) it is impossible to delete the last rule of a script. For avelsieve-1.9.7 you find a patch that fixes these two issues here [http://www.rename-it.nl/dovecot/client-patches/avelsieve-1.9.7-dovecot.patch]. * *Smartsieve, Websieve*: * These clients are specifically written for Cyrus timsieved and fail on multiple stages of the protocol when connected to PigeonholeManageSieve. * /(Stephan Bosch)/: I intend to look at these in the future, but currently these are very much unavailable for use with Dovecot. But, feel free to fix these yourself:) * /(Steffen "Stefreak" Neubauer)/: I've fixed the problems for smartsieve. Just replace your lib/Managesieve.php with this here:https://www.commail.org/sieve/lib/Managesieve.phps - HAVE FUN! * /(Mark Titorenko)/: Stephan provides an updated link to Steffen's ManageSieve patch in this Dovecot mailing list post dated 2009-09-01 [http://www.mail-archive.com/dovecot@dovecot.org/msg21862.html] * *Ruby/Managesieve*: Ruby command line client and library to managesieve works fine Ruby/Managesieve [http://managesieve.rubyforge.org/] * *Ruby/Sieve-Parser*: Ruby library for sieve parsing Sieve-Parser [http://rubygems.org/gems/sieve-parser/] *NOTE*: If you add new issues to this list, notify the author or send an e-mail to the Dovecot mailing list [http://dovecot.org/mailinglists.html]. In any case, you must make sure that the issue is properly explained and that the author can contact you for more information. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Storage.ErrorHandling.txt0000644000175000017500000000604312244263644017744 00000000000000Lib-storage Error Handling ========================== 'src/lib-storage/mail-error.h' describes different types of errors and has some other error-related functions and macros. Only errors returning "int" can actually return a failure. * Functions that return a pointer will never return failure with NULL. Only "find" type of functions can return NULL, which means "not found". * Iterators usually work by the init() returning iterator pointer and next() returning a boolean. If there were any errors in either init() or next(), deinit() finally returns a failure. Getting lib-storage errors -------------------------- * 'mailbox_list_*()' functions set their errors to the given mailbox_list structure. You can get these errors with 'mailbox_list_get_last_error()'. * All other functions that have some way of accessing mail_storage (mailbox, mail, transactions, etc.) set their errors to the storage. You can get these errors with 'mail_storage_get_last_error()'. * Mail user and namespace functions have their own error handling, typically by returning error strings as parameters. * Both '*_get_last_error()' functions should be called soon after the error is noticed, before other failing lib-storage calls overwrite the error. * In deinit failures it usually doesn't matter if you get the first or the last error, so it's easier to just call all the different deinit functions and finally look up what the last failure was. Setting lib-storage errors -------------------------- Errors can be set with two calls: * 'mail_storage_set_error()' and 'mailbox_list_set_error()' should be used when the error is user's fault in some way. For example invalid mailbox name, out of quota, etc. The error string will be shown to user. It won't be written to a log file. * 'mail_storage_set_critical()' and 'mailbox_list_set_critical()' should be used when the error is a problem in the system and sysadmin should be notified. For example out of disk space or just in general an unexpected syscall failure. The error string that will be shown to user is the "Internal error occurred", but it will be logged as an error. * The reason for the separation of these two is: 1. Only log errors that sysadmin can do something about. 2. Never show user anything even potentially sensitive about the system, such as path names. There are also a few other calls that aren't used as often, but can be helpful: * 'mail_storage_set_internal_error()' and 'mailbox_list_set_internal_error()' simply set the user-visible error message to "Internal error occurred". These can be used if the actual error was already logged. * 'mail_storage_set_error_from_errno()' and 'mailbox_list_set_error_from_errno()' set the user-visible error message based on some common 'errno' values. Currently: * EACCESS, EPERM, EROFS: Permission denied * ENOSPC, EDQUOT: Not enough disk space * ENOENT, ENOTDIR: Not found * ELOOP: Directory structure is broken (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.BSDAuth.txt0000644000175000017500000000031512244263655017051 00000000000000BSDAuth ======= This is similar to [PasswordDatabase.PAM.txt], but used by OpenBSD. It supports 'cache_key' parameter the same way as PAM. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Indexes.MailIndexApi.txt0000644000175000017500000000410612244263644017503 00000000000000Mail Index API ============== 'lib-index/mail-index.h' contains the functions to access the index files. 'mail-cache.h' contains the functions to access the cache file. The purpose of the main structures are: * 'struct mail_index': Global state of the index. * 'struct mail_index_view': You can have multiple views to the index. The views see new messages come and expunged messages go only when it's being explicitly synchronized. With mmaped indexes you can't really trust the record data (flags, keywords, extensions) not to change. This doesn't matter with IMAP. * 'struct mail_index_map': Index file is accessed via maps. Views can point to one or more maps. Maps can be shared by different views. Maps can contain either mmap()ed memory areas pointing to the index file, or a in-memory copy of it. * 'struct mail_index_transaction': In-memory list of changes to be written to the transaction log. The writing is done only when the transaction is committed. Views and maps -------------- In general you access all the data in the index files via views. The mails are accessed using sequence numbers, which change only when the view is synchronized. For accessing messages with their UIDs, you'll first need to convert them to sequences with either 'mail_index_lookup_uid()' or 'mail_index_lookup_uid_range()'. 'mail_index_lookup()' can be used to look up a single record's UID and flags. The returned record points to the latest map, so that it contains the latest flag changes. If the message was already expunged from the latest map, it returns 0. 'mail_index_lookup_full()' can be used to get also the map where the message was found. This can be important with extensions. If extension record's state depends on the extension header, they must be looked up from the same map. For this reason there exists 'mail_index_map_get_header_ext()' and 'mail_index_lookup_ext_full()' functions which take the map as parameter. The non-map versions return the data from the latest map if the message hasn't been expunged. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/HowTo.PopBSMTPAndDovecot.txt0000644000175000017500000003427012244263646016752 00000000000000Contents 1. POP3 (IMAP) before SMTP 1. Are you sure you want this? 1. Problems with POP-before-SMTP 2. Advantages of POP-before-SMTP over SMTP AUTH 2. Pop-before-smtp.pl 3. DRAC 4. SQL 1. Example for postgresql, postfix 2. Example for MySQL, postfix 5. relay-ctrl POP3 (IMAP) before SMTP ======================= /sometimes also called SMTP-after-POP3 or SMTP-after-IMAP/ Are you sure you want this? --------------------------- POP-before-SMTP is generally considered a kludge, originally invented to make up for the lack of authentication in the original SMTP [http://en.wikipedia.org/wiki/Smtp] specification for clients on dynamic IP addresses.ESMTP [http://en.wikipedia.org/wiki/Extended_SMTP] resolved that shortcoming long ago, and all modern mail clients and servers support it by now. You should consider implementing ESMTP AUTH [http://en.wikipedia.org/wiki/SMTP-AUTH] in your mail transport/submission agent, and using it in your clients, rather than using POP-before-SMTP. See also or . Problems with POP-before-SMTP ----------------------------- * *Shared IP addresses* are in widespread use. You are opening your server not only to your user, but to anyone else who might be sharing the same IP address, other users, other computers in the same NAT. If you lose the connection, the next one who is assigned your IP also inherits your relay permit. This might include virus-infected spambot machines. Or consider a public wireless hotspot or an Internet cafe: both types of establishments are known to be frequented by spammers. * *Not properly implemented* in all mail clients: it only works right if the client checks for new mail immediately before attempting to send. And it can be very unsafe if longer timeouts are used, such that the user has time to write an email. * Probably others. [RobMcGee.txt] ( [RobMcGee.txt]) just thought it was wrong to have a HOWTO page here without a warning about why /not/ to. Know what you are doing. If you are setting up a new mail service from scratch, by all means, do it right! Advantages of POP-before-SMTP over SMTP AUTH -------------------------------------------- * Likely to be relatively easier to implement in your mail submission agent. What's easier is a matter of opinion, and it varies, of course, but probably all MTA/MSA servers support some form of access lists without patching or recompiling. * Simple non-technical instructions for users: /"Remember to check for new mail before you try to send mail."/ Pop-before-smtp.pl ================== If you want to use pop-before-smtp.pl (from http://popbsmtp.sourceforge.net/) together with Dovecot, you can use this regular expression to match successful POP3 and IMAP logins: ---%<------------------------------------------------------------------------- $pat = '^(... .. ..:..:..) \S+ (?:pop3|imap)-login: Login: .+ \[(\d+\.\d+\.\d+\.\d+)\]'; ---%<------------------------------------------------------------------------- v1.0RC2 seems to need this format to work properly: ---%<------------------------------------------------------------------------- $pat = '^dovecot: (... .. ..:..:..) \S+ (?:pop3|imap)-login: Login: \S+ \S+ \S+ lip=(\d+\.\d+\.\d+\.\d+)'; ---%<------------------------------------------------------------------------- Note: This only works with IPv4, anyone who wants to fix it for IPv6, please do so:) worked for me on Fedora: ---%<------------------------------------------------------------------------- $pat = '(?:pop3|imap)-login: (... .. ..:..:..) Info: Login: \S+ \[(\d+\.\d+\.\d+\.\d+)\]'; ---%<------------------------------------------------------------------------- With v1.0 Alpha 4, the following pattern works: ---%<------------------------------------------------------------------------- $pat = '^(... .. ..:..:..) \S+ (?:dovecot: )?(?:imap|pop3)-login: Login: \S+ \S+ rip=(\d+\.\d+\.\d+\.\d+)' ---%<------------------------------------------------------------------------- This works with RHEL 4.3 (at least until IPv6 really catches): ---%<------------------------------------------------------------------------- $pat = '(?:pop3|imap)-login: (... .. ..:..:..) Info: Login: \S+ \[::ffff:(\d+\.\d+\.\d+\.\d+)\]'; ---%<------------------------------------------------------------------------- DRAC ==== The DRAC historical plugin for Dovecot 1.x, located here [http://mail.cc.umanitoba.ca/drac/], doesn't work with Dovecot 2.x, since it relies on the "IP" environment variable, not set anymore by Dovecot 2.x a more recent version of this plugin is available here: DRAC Plugin for Dovecot 2.x [http://sourceforge.jp/projects/dovecot2-drac/]. The README file explains how to compile it. Change the path to your Dovecot 2.x source code into the Makefile to compile it. DRAC runs as a separate daemon, maintaining a BerkeleyDB database of IPs that have successfully authenticated via POP3 or IMAP, expiring them after 30 minutes. Installing it therefore requires that both your POP3/IMAP server and your SMTP daemon (Postfix/Sendmail/qmail) be set up to support it. DRAC-PLUGIN.c is a small C program, and accessing BerkeleyDB databases is efficient so it works pretty well. By following the instructions you will install a file drac_plugin.so in your dovecot 'lib/' directories for IMAP and/or POP3 loadable modules. To turn on the new DRAC plugin in dovecot, you must set up these lines in your dovecot.conf. There is a separate section for ''protocol imap'' and another under ''protocol pop3''; make sure you enable both. ---%<------------------------------------------------------------------------- # Support for dynamically loadable modules mail_plugin_dir = /usr/lib/dovecot/imap # not mandatory mail_plugins = drac # provide a list of all plugins you want to load here ---%<------------------------------------------------------------------------- Permissions note: the directory containing the drac_plugin.so file has to be readable by ordinary users. Check your Dovecot error log for help. To get DRAC working on your machine, download the main DRAC [http://mail.cc.umanitoba.ca/drac/] daemon, edit the makefile as directed in the instructions, and make and install it. You will also want to ensure that you register the rpcs by executing rpcgen. See the Makefile for more details. SQL === Advantage: you do not have a multi-megabyte Perl daemon reading your logs Disadvantage: for each login you need the time and space to execute this script 1. tell your MTA to look up IPs authorized to relay in an SQL table 2. delete old IPs from the table regularly (cron job for example, or a modification to the script below) 3. tell dovecot to update the SQL table upon successful login Dovecot 1.0 (and probably 0.99) can update a SQL table with the script below. /!\ *Note* that *you* must set up a script that deletes old IPs separately, and *you* also must configure your MTA properly. The script *only* performs the 'update on successful login' step, which alone is insecure without expiring older IPs!/Add your working examples to this section. This Wiki depends on your help!/ ---%<------------------------------------------------------------------------- #!/bin/sh # This script created 2005-08-21 by Lorens Kockum # Released into the Public Domain # Changes: # 2006-06-06 Matthias Andree # - changed $* to "$@" for more robust argument quoting # Action: when called by dovecot 1.0 as described below, updates an SQL table # with logged-in IP and current time, and then executes the relevant process. # Output: normally nothing # dovecot.conf should be modified with these lines (where # /usr/lib/dovecot/popbsmtp.sh represents this script): # protocol pop3 { # mail_executable = /usr/lib/dovecot/popbsmtp.sh /usr/lib/dovecot/pop3 # } # protocol imap { # mail_executable = /usr/lib/dovecot/popbsmtp.sh /usr/lib/dovecot/imap # } # The HOME= lines are necessary to find $HOME/.my.cnf containing login info, # because mail_executable is executed as root, but without a home directory. # Of course this script must not be writable by anyone else than root. ( # drop out IPs from local networks that can relay anyway IP=`echo $IP | grep -v '^192\.168\.'` if [ -n "$IP" ] then export HOME=/root/ echo "replace into popbsmtp VALUES('$IP',now());" | mysql mail export HOME=/ fi ) >> /var/log/dovecot3 2>&1 exec "$@" ---%<------------------------------------------------------------------------- Example for postgresql, postfix ------------------------------- /usr/lib/dovecot/popbsmtp.sh ---%<------------------------------------------------------------------------- #!/bin/sh ( if [ -n "$IP" ] then /usr/bin/psql -U popbsmtp -d popbsmtp -c "begin;update auth set accessed=now() where host=substring('$IP' from 8);commit;insert into auth(host, accessed) values(substring('$IP' from 8),now());" fi ) >> /var/log/dovecot3 2>&1 exec "$@" ---%<------------------------------------------------------------------------- The substring call was necessary because $IP has '::ffff:' or something like that in front of the IP address on my system. The update followed by an insert, with the update in a transaction is necessary to replicate mysql's REPLACE INTO functionality. The INSERT will produce an error if the IP already exists but it doesn't matter as the UPDATE will have committed by then. /etc/postfix/main.cf ---%<------------------------------------------------------------------------- smtpd_recipient_restrictions = permit_mynetworks permit_sasl_authenticated permit_tls_clientcerts check_client_access pgsql:/etc/postfix/popbsmtp.cf reject_unauth_destination check_policy_service unix:private/policy ---%<------------------------------------------------------------------------- /etc/postfix/popbsmtp.cf ---%<------------------------------------------------------------------------- hosts = localhost user = username password = secret dbname = popbsmtp query = SELECT 'OK' as result FROM auth WHERE host = '%s' ---%<------------------------------------------------------------------------- /etc/cron.hourly/popbsmtp_purge ---%<------------------------------------------------------------------------- #!/bin/bash /usr/bin/psql -U popbsmtp -d popbsmtp -c "DELETE FROM auth WHERE (now() - accessed) > '30 minutes'::interval" ---%<------------------------------------------------------------------------- Example for MySQL, postfix -------------------------- Note that you can use this even if pop/imap and smtp are not on the same host as it is the case in my setup. First you have to create a table (in this example named "popbsmtp") with 2 fields: * address (varchar 39, primary) * last_seen (datetime) varchar size 39 is for IPv6 addresses.You should definitely consider adding IPv6 support to your popbsmtp solution because postfix and dovecot do well with IPv6. /!\ *address field* must be *primary* for "REPLACE into" to work. /opt/dovecot-popbsmtp.sh ---%<------------------------------------------------------------------------- #!/bin/sh ( if [ -n "$IP" ] then echo "REPLACE INTO virtual_mail.popbsmtp (address,last_seen) VALUES ('$IP', NOW( ))" \ | mysql -u user -p secret -h host > /dev/null 2>&1 fi ) exec "$@" ---%<------------------------------------------------------------------------- mail_executable in dovecot.conf looks something like this: ---%<------------------------------------------------------------------------- mail_executable = /opt/dovecot-popbsmtp.sh /usr/libexec/dovecot/imap ---%<------------------------------------------------------------------------- postfix map (/etc/postfix/mysql_popbsmtp_access_maps.cf): ---%<------------------------------------------------------------------------- hosts = mysqlhost user = user password = secret dbname = virtual_mail query = SELECT 'OK' FROM popbsmtp WHERE last_seen >= DATE_SUB(NOW(),INTERVAL 30 MINUTE) AND address = '%s' ---%<------------------------------------------------------------------------- In postfix main.cf add the following access map to your recipient restrictions (/!\ *before* "reject_unauth_destination"): ---%<------------------------------------------------------------------------- check_client_access mysql:$config_directory/mysql_popbsmtp_access_maps.cf ---%<------------------------------------------------------------------------- The 30 minute relay access period is handled by the INTERVAL in DATE_SUB. So it's safe anyway, but you should definitely run a cron job daily that deletes older records. That's to keep the table clean and speed up lookups. You might also need to run "OPTIMIZE TABLE" via the cron job to free up allocated space. relay-ctrl ========== relay-ctrl [http://untroubled.org/relay-ctrl/] consists of a few small programs designed to fit in qmail-like command chains. The most important: * 'relay-ctrl-allow' runs after a successful POP/IMAP login, recording the client IP and timestamp * 'relay-ctrl-check' runs before the SMTP server, enabling relaying if the client IP has authenticated recently 'relay-ctrl-allow' expects to find the client IP in the environment as '$TCPREMOTEIP'. Dovecot provides it as '$IP', so you'll need this tiny 'dovecot-settcpremoteip' wrapper script: ---%<------------------------------------------------------------------------- #!/bin/sh # # Wrapper for relay-ctrl-allow that sets TCPREMOTEIP. TCPREMOTEIP="${IP}"; export TCPREMOTEIP exec "$@" ---%<------------------------------------------------------------------------- Edit 'dovecot.conf' and set 'mail_executable' appropriately, e.g., for IMAP (this is one long line): ---%<------------------------------------------------------------------------- mail_executable = /usr/local/bin/envdir /etc/relay-ctrl /usr/local/bin/relay-ctrl-chdir /usr/local/bin/dovecot-settcpremoteip /usr/local/bin/relay-ctrl-allow /usr/local/libexec/dovecot/imap Dove ---%<------------------------------------------------------------------------- Restart Dovecot. Verify that your IMAP client still works. Verify that relay-ctrl has recorded your client IP. Hook 'relay-ctrl-check' into your SMTP service, as documented in the relay-ctrl README, and you're done. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/TestInstallation.txt0000644000175000017500000001717012244263661015676 00000000000000Contents 1. Check that it's running 2. Check that it's listening 3. Check that it's allowing logins 4. Check that it's allowing remote logins 5. Check that it finds INBOX 6. Check that it finds other mailboxes 7. Check that real mail clients work 8. Make a graceful exit For testing POP3 installation, see . Check that it's running ----------------------- First check with 'ps' that 'dovecot' process is actually running. If it's not, you had an error in 'dovecot.conf' and the error message was written to log. Go back to and if you can't find it. Check that it's listening ------------------------- Next check that Dovecot is listening for connections: ---%<------------------------------------------------------------------------- # telnet localhost 143 Trying 127.0.0.1... Connected to localhost. Escape character is '^]'. * OK [CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE STARTTLS AUTH=PLAIN] Dovecot ready. ---%<------------------------------------------------------------------------- If you got "connection refused", make sure that Dovecot is configured to serve the imap protocol and listening on the expected interfaces/addresses.The simplest way to do that would be using [Tools.Doveconf.txt]: ---%<------------------------------------------------------------------------- # doveconf protocols listen protocols = imap pop3 lmtp sieve listen = *, :: ---%<------------------------------------------------------------------------- If the protocols setting shouldn't contain 'imap' so add it. Also make sure, that relevant '!include' or '!try_include' configuration lines are not commented. Next check that it also works from remote host: ---%<------------------------------------------------------------------------- # telnet imap.example.com 143 Trying 1.2.3.4... Connected to imap.example.com. Escape character is '^]'. * OK [CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE STARTTLS AUTH=PLAIN] Dovecot ready. ---%<------------------------------------------------------------------------- If that didn't work, check all possible firewalls in between, and check that 'listen' setting is '*' in 'dovecot.conf'. If you have only imaps enabled, see "remote login" section below for how to test using openssl s_client. Check that it's allowing logins ------------------------------- ---%<------------------------------------------------------------------------- # telnet localhost 143 a login "username" "password" ---%<------------------------------------------------------------------------- Replace the username and password with the ones you added to 'passwd.dovecot' in . Note that all IMAP commands begin with a tag, which is basically any string you want, but it must be there. So don't leave out the "a" in the above example. If the password contains '"' character, escape it with '\' (e.g.'"foo\"bar"'). You should get an "a OK Logged in." reply. If you get "Authentication failed" error, set 'auth_verbose = yes' and 'auth_debug = yes' in 'dovecot.conf', restart Dovecot and try again. The log file should now show enough information to help you fix the problem. Check that it's allowing remote logins -------------------------------------- You'll need to try this from another computer, since all local IPs are treated as secure: ---%<------------------------------------------------------------------------- # telnet imap.example.com 143 a login "username" "password" ---%<------------------------------------------------------------------------- If the connection is hanging instead of giving '* Dovecot ready', you have a firewall that's preventing the connections. Otherwise, the only difference here compared to step above is that you might get: ---%<------------------------------------------------------------------------- * BAD [ALERT] Plaintext authentication is disabled, but your client sent password in plaintext anyway. If anyone was listening, the password was exposed. a NO Plaintext authentication disabled. ---%<------------------------------------------------------------------------- If this is the case, you didn't set 'disable_plaintext_auth = no'. You could alternatively use OpenSSL to test that the server works with SSL: * Test using imaps port (assuming you haven't disabled imaps port): ---%<---------------------------------------------------------------------- # openssl s_client -connect imap.example.com:993 * OK Dovecot ready. ---%<---------------------------------------------------------------------- * Test using imap port and STARTTLS command (works also with imap port): ---%<---------------------------------------------------------------------- # openssl s_client -connect imap.example.com:143 -starttls imap * OK Dovecot ready. ---%<---------------------------------------------------------------------- Check that it finds INBOX ------------------------- After logging in, check that the INBOX is found: ---%<------------------------------------------------------------------------- b select inbox * FLAGS (\Answered \Flagged \Deleted \Seen \Draft) * OK [PERMANENTFLAGS (\Answered \Flagged \Deleted \Seen \Draft \*)] Flags permitted. * 1 EXISTS * 1 RECENT * OK [UIDVALIDITY 1106186941] UIDs valid * OK [UIDNEXT 2] Predicted next UID b OK [READ-WRITE] Select completed. ---%<------------------------------------------------------------------------- It should contain the mail that you sent to yourself in step. If anything goes wrong, set 'mail_debug = yes' and try again. The log file should now contain debugging information of where Dovecot is trying to find the mails. Fix 'mail_location' setting and try again. Check that it finds other mailboxes ----------------------------------- If you already have other mailboxes created, you can check that Dovecot finds them: ---%<------------------------------------------------------------------------- c list "" * * LIST (\NoInferiors) "/" "test" * LIST (\NoInferiors) "/" "INBOX" c OK List completed. ---%<------------------------------------------------------------------------- If they weren't found, set 'mail_debug = yes' and look at the debugging information. Fix 'mail_location' setting and try again. Check that real mail clients work --------------------------------- Since mail clients can be configured in various ways, please check first if the problem is with Dovecot configuration or with the client's configuration. You can rule out it being Dovecot's problem with the "telnet" methods described above. If you can't log in, * Make sure SSL/TLS settings are correct. * Make sure the client uses plaintext authentication method, unless you've specifically configured Dovecot to accept others. If you can see only INBOX, * Clear out any "IMAP namespace prefix" or similar settings from clients. * Check if client is configured to show only "subscribed mailboxes". If so, you'll have to subscribe to the mailboxes you wish to see. You can see a list of subscribed mailboxes with: ---%<---------------------------------------------------------------------- d lsub "" * * LSUB () "/" "INBOX" d OK Lsub completed. ---%<---------------------------------------------------------------------- Most IMAP clients have been tested with Dovecot and they work. Make a graceful exit -------------------- To close the connection to Dovecot issue a logout: ---%<------------------------------------------------------------------------- e logout * BYE Logging out e OK Logout completed. ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.Cyrus.txt0000644000175000017500000000650112244263650015424 00000000000000Cyrus ===== *WARNING: Badly done migration will cause your IMAP and/or POP3 clients to re-download all mails. Read page first carefully.* For POP3 UIDL compatibility, use one of: Cyrus versions up to v2.1.3: ---%<----------------------------------------------------------------------- # Cyrus (old format - up to v2.1.3) pop3_uidl_format = %u ---%<----------------------------------------------------------------------- Cyrus versions v2.1.4 and newer: ---%<----------------------------------------------------------------------- # Cyrus (new format - v2.1.4 and above) pop3_uidl_format = %v.%u ---%<----------------------------------------------------------------------- Mail storage migration ---------------------- There exists several scripts which can be used to convert Cyrus mail storages to Maildir. They all read the Cyrus mail directories directly, so they don't need a running Cyrus installation. * cyrus2dovecot [http://cyrus2dovecot.sw.fu-berlin.de/] (by Freie Universität Berlin) allows you to perform a server transition which is fully transparent to both POP and IMAP users, as virtually all available metadata is preserved during the conversion. This includes message UIDs, INTERNALDATEs, IMAP folder subscriptions, the UIDVALIDITY and UIDNEXT values for each folder, as well as all IMAP flags (including the first 26 user-defined keywords).Cyrus2Dovecot is supposed to work with all Cyrus releases up to (at least) version 2.3.x. So far, it has been tested with Cyrus 1.4, 2.1.18, 2.2.12, and 2.3.12p2. * cyrus2courier [http://madness.at/projects/] is Dovecot-compatible. A non-official v1.6ts release [http://dovecot.org/tools/] works up to Cyrus v2.3.9. It should be able to preserve message UIDs, INTERNALDATEs, flags and the first 26 keywords. It works only with the supported Cyrus versions, so if Cyrus once again changes its internal formats this tool might break again. * cyrus2maildir.py [http://www.majid.info/mylos/weblog/2006/03/08-1.html] (for Cyrus v2.2) preserves (only) INTERNALDATEs and \Seen flags. * cyrus2dovecot [http://trukenmueller.de/cyrus2dovecot] (by Trukenmüller) doesn't preserve timestamps or flags. You can also do the [Migration.Dsync.txt]. Migration of passwords ---------------------- Some installations of Cyrus store passwords using /Cyrus/ SASL (not to be confused with other SASL implementations). Passwords are stored in '/etc/sasldb2', in Berkeley DB format. On Debian, the command 'db4.2_dump -p /etc/sasldb2' may allow you access to the passwords. This could be incorporated into a script to copy the passwords to an LDAP directory for use with other mail servers (e.g. Dovecot). For Fedora Core 3 (and probably other versions) the command is just 'db_dump -p /etc/sasldb2'. Namespaces ---------- Cyrus uses one of the following namespace configurations depending on the altnamespace and unixhierarchysep options: ---%<------------------------------------------------------------------------- namespace inbox { prefix = INBOX. # no altnamespace #prefix = "" # altnamespace separator = . # no unixhierarchysep #separator = / # unixhierarchysep } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.FTS.Lucene.txt0000644000175000017500000000344012244263657015663 00000000000000Lucene Full Text Search Indexing ================================ Requires Dovecot v2.1+ to work properly. The CLucene version must be v2.3 (not v0.9). Dovecot builds only a single Lucene index for all mailboxes. The Lucene indexes are stored in 'lucene-indexes/' directory under the mail root index directory (e.g.'~/Maildir/lucene-indexes/'). Configuration ------------- ---%<------------------------------------------------------------------------- mail_plugins = $mail_plugins fts fts_lucene plugin { fts = lucene # Lucene-specific settings, good ones are: fts_lucene = whitespace_chars=@. } ---%<------------------------------------------------------------------------- The fts-lucene settings include: * whitespace_chars=: List of characters that are translated to whitespace. You may want to use "@." so that e.g. in "'first.last@example.org'" it won't be treated as a single word, but rather you can search separately for "first", "last" and "example". * default_language=: Default stemming language to use for mails. The default is english. Requires that Dovecot is built with libstemmer, which also limits the languages that are supported. * textcat_conf= textcat_dir=: If specified, enable guessing the stemming language for emails and search keywords. This is a little bit problematic in practice, since indexing and searching languages may differ and may not find even exact words because they stem differently. Libraries --------- * CLucene [http://sourceforge.net/projects/clucene/files/]: Get v2.3.3.4 (not v0.9) * libstemmer [http://snowball.tartarus.org/download.php]: Builds libstemmer.o, which you can rename to libstemmer.a * textcat [http://textcat.sourceforge.net/] (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.Autocreate.txt0000644000175000017500000000160612244263656016112 00000000000000Autocreate plugin (v2.0 and older) ================================== With v2.1+ you don't need this plugin. Use [MailboxSettings.txt] instead. This plugin allows administrator to specify mailboxes that must always exist for all users. They can optionally also be subscribed. The mailboxes are created and subscribed always after user logs in. Namespaces are fully supported, so namespace prefixes need to be used where necessary. Example: ---%<------------------------------------------------------------------------- protocol imap { mail_plugins = $mail_plugins autocreate } plugin { autocreate = Trash autocreate2 = Spam #autocreate3 = ..etc.. autosubscribe = Trash autosubscribe2 = Spam #autosubscribe3 = ..etc.. } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/LoginProcess.txt0000644000175000017500000001075712244263647015014 00000000000000Login processes =============== The main purpose of login processes is to handle the IMAP and POP3 connections before the user has logged in. The login processes don't need to be able to do anything else than let the user log in, so they can run in highly restricted environment. By default they are run as a non-privileged "dovenull" user chrooted into a non-writable directory containing only authentication UNIX sockets. Login processes also handle proxying the SSL and TLS connections even after the user has logged in. This way all the SSL code runs in the same restricted environment, which means that a security hole in the SSL library gives the attacker access only to the restricted chroot, rather than possibly all the users' mails. The default login settings should be good enough for small sites. There are two ways to run the login processes: the high-security mode and the high-performance mode. Both are discussed separately below. High-security mode ------------------ You can enable high-security mode with: ---%<------------------------------------------------------------------------- service imap-login { service_count = 1 #process_min_avail = 0 #process_limit = $default_process_limit #vsz_limit = 64M } service pop3-login { service_count = 1 } ---%<------------------------------------------------------------------------- This is the default. It works by using a new imap-login or pop3-login process for each incoming connection. Since the processes run in a highly restricted chroot, running each connection in a separate process means that in case there is a security hole in Dovecot's pre-authentication code or in the SSL library, the attacker can't see other users' connections and can't really do anything destructive. The only way out of it is to find and exploit a kernel security hole. Since one login process can handle only one connection, the service's 'process_limit' setting limits the number of users that can be logging in at the same time (defaults to 'default_process_limit=100'). SSL/TLS proxying processes are also counted here, so if you're using SSL/TLS you'll need to make sure this count is higher than the maximum number of users that can be logged in simultaneously. * If the maximum login process count is reached, the oldest process in logging-in state (ie. non-proxying) is destroyed. * To avoid startup latency for new client connections, set 'process_min_avail' to higher than zero. That many idling processes are always kept around waiting for new connections. * 'vsz_limit' should be fine at its default 64MB value. High-performance mode --------------------- You can enable high-performance mode with: ---%<------------------------------------------------------------------------- service imap-login { service_count = 0 #client_limit = $default_client_limit #process_min_avail = 0 #vsz_limit = 64M } service pop3-login { service_count = 0 } ---%<------------------------------------------------------------------------- It works by using a number of long running login processes, each handling a number of connections. This loses much of the security benefits of the login process design, because in case of a security hole (in Dovecot or SSL library) the attacker is now able to see other users logging in and steal their passwords, read their mails, etc. * 'process_min_avail' should be set to be at least the number of CPU cores in the system, so that all of them will be used. * Otherwise new processes are created only once an existing one's connection count reaches client_limit * Default client_limit * process_limit = 1000*100 = 100k connections * vsz_limit should be increased to avoid out of memory errors, especially if you're using SSL/TLS. Login access check sockets -------------------------- Dovecot login processes can check via UNIX socket if the incoming connection should be allowed to log in. This is most importantly implemented to enable TCP wrappers support for Dovecot. TCP wrappers support -------------------- You must have built Dovecot with support for TCP wrappers. You can do this by giving '--with-libwrap' parameter to 'configure'. Add to dovecot.conf: ---%<------------------------------------------------------------------------- login_access_sockets = tcpwrap service tcpwrap { unix_listener login/tcpwrap { group = $default_login_user mode = 0600 user = $default_login_user } } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.txt0000644000175000017500000000405012244263643013577 00000000000000Dovecot Design ============== * [Design.Processes.txt] * [Design.Indexes.txt] * [Design.Indexes.MailIndexApi.txt] * [Design.AuthProcess.txt] * [Design.AuthProtocol.txt] * [Design.MailProcess.txt] * [Design.DoveadmProtocol.txt] Code APIs --------- * [Design.Code.txt] - explanations how and why the coding style is the way it is Look at the *.h files for the actual API documentation. The documentation below doesn't attempt to list full API documentation. liblib: * [Design.Memory.txt] * [Design.Buffers.txt] * [Design.Arrays.txt] * [Design.Strings.txt] * [Design.InputStreams.txt] * [Design.OutputStreams.txt] * [Design.Plugins.txt] lib-storage: * [Design.Storage.MailUser.txt] contains everything related to a single user. * [Design.Storage.MailNamespace.txt]: A single user can contain multiple [Namespaces.txt]. * [Design.Storage.MailboxList.txt] is used to list/manage a list of mailboxes for a single namespace (1:1 relationship). * [Design.Storage.MailStorage.txt] is used to access mails in a specific location with a specific mailbox format. Multiple namespaces can point to the same storage. A single namespace may in future (but not currently) point to multiple storages (e.g. a mixed mbox and Maildir directory). * [Design.Storage.Mailbox.txt] is used to access a specific mailbox in a storage. * [Design.Storage.Mail.txt] is used to access a specific mail in a mailbox. * [Design.Storage.ErrorHandling.txt]. * [Design.Storage.Plugins.txt] - how to hook into lib-storage functions. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.Sieve.Examples.txt0000644000175000017500000003510212244263656017314 00000000000000Pigeonhole Sieve examples ========================= Contents 1. Pigeonhole Sieve examples 1. Mail filtering by various headers 2. Flagging or Highlighting your mail 3. Spam/Virus rules 1. Direct filtering using message header 2. Filtering using the spamtest and virustest extensions 4. Plus Addressed mail filtering 5. Vacation auto-reply 6. Include scripts 7. Archiving a Mailinglist by Date 8. Translation from Procmail Below are some simple Sieve code examples, more can be found from http://libsieve.sourceforge.net/script1.php and http://fastmail.wikia.com/wiki/SieveExamples. Mail filtering by various headers --------------------------------- Use if/elsif/else to store messages into various folders/subfolders: * ---%<---------------------------------------------------------------------- require ["fileinto", "envelope"]; if address :is "to" "dovecot@dovecot.org" { fileinto "Dovecot-list"; } elsif envelope :is "from" "owner-cipe-l@inka.de" { fileinto "lists.cipe"; } elsif anyof (header :contains "X-listname" "lugog@cip.rz.fh-offenburg.de", header :contains "List-Id" "Linux User Group Offenburg") { fileinto "ml.lugog"; } else { # The rest goes into INBOX # default is "implicit keep", we do it explicitly here keep; } ---%<---------------------------------------------------------------------- "anyof" means logical OR, "allof" is AND. Forward mails with "order" or "buy" in their subject to another address: * ---%<---------------------------------------------------------------------- if header :contains "subject" ["order", "buy"] { redirect "orders@company.dom"; } ---%<---------------------------------------------------------------------- Message-ID and recipient of forwarded message are stored in a '.dovecot.lda-dupes' at users home directory to prevent mail loops. Flagging or Highlighting your mail ---------------------------------- Some mail readers use these flags: ---%<------------------------------------------------------------------------- require "imap4flags"; require "regex"; if anyof (exists "X-Cron-Env", header :regex ["subject"] [".* security run output", ".* monthly run output", ".* daily run output", ".* weekly run output"]) { addflag "$label1"; # ie 'Important'/red label within Thunderbird # Other flags: # addflag "$label1"; # Important: #ff0000 => red # addflag "$label2"; # Work: #ff9900 => orange # addflag "$label3"; # personal: #009900 => green # addflag "$label4"; # todo: #3333ff => blue # addflag "$label5"; # later: #993399 => violet # } ---%<------------------------------------------------------------------------- Local copy of your emails: ---%<------------------------------------------------------------------------- require ["envelope", "imap4flags"]; if envelope "from" "my_address@my_domain.com" { setflag "\\seen"; } ---%<------------------------------------------------------------------------- /Useful, when you want sieve to manage your incoming *and* outgoing email (you must ask your mail reader to Bcc your mail to your dovecot in this case)./ Spam/Virus rules ---------------- Most spam and virus scanners add a special header to mail messages, so that users can apply filtering accordingly. Depending on how the Sieve interpreter is configured, filtering can either be performed by evaluating these headers directly, or using the spamtest and virustest extensions. Direct filtering using message header ------------------------------------- Evaluating the headers directly is always possible as long as the headers are actually added to the messages by the scanner software. For example, to fileSpamAssassin-tagged mails into a folder called "Spam": ---%<------------------------------------------------------------------------- require "fileinto"; if header :contains "X-Spam-Flag" "YES" { fileinto "Spam"; } ---%<------------------------------------------------------------------------- The following example discards SpamAssassin-tagged mails with level higher than or equal to 10: ---%<------------------------------------------------------------------------- if header :contains "X-Spam-Level" "**********" { discard; stop; } ---%<------------------------------------------------------------------------- Some spam scanners only produce a numeric score in a header. Then, the test becomes more involved: ---%<------------------------------------------------------------------------- require ["comparator-i;ascii-numeric","relational"]; if allof ( not header :matches "x-spam-score" "-*", header :value "ge" :comparator "i;ascii-numeric" "x-spam-score" "10" ) { discard; stop; } ---%<------------------------------------------------------------------------- *NOTE:* Be very careful when matching against spam score headers using the relational extension and the i;ascii-numeric comparator. This comparator can only be used to match unsigned integers. Strings that do not begin with a digit character represent positive infinity and will therefore always be larger than any score mentioned in your rule! That is why the above example first checks the minus sign explicitly. Filtering using the spamtest and virustest extensions ----------------------------------------------------- When the spamtest [http://tools.ietf.org/html/rfc5235#section-3.2] and virustest [http://tools.ietf.org/html/rfc5235#section-3.3] extensions are configured on the server ( [Pigeonhole.Sieve.Extensions.SpamtestVirustest.txt] is explained how), users (and GUIs) can have a much easier way to filter spam and virus messages respectively. To filter spam, the spamtest extension can for example be used as follows: ---%<------------------------------------------------------------------------- require "spamtestplus"; require "fileinto"; require "relational"; require "comparator-i;ascii-numeric"; /* If the spamtest fails for some reason, e.g. spam header is missing, file * file it in a special folder. */ if spamtest :value "eq" :comparator "i;ascii-numeric" "0" { fileinto "Unclassified"; /* If the spamtest score (in the range 1-10) is larger than or equal to 3, * file it into the spam folder: */ } elsif spamtest :value "ge" :comparator "i;ascii-numeric" "3" { fileinto "Spam"; /* For more fine-grained score evaluation, the :percent tag can be used. The * following rule discards all messages with a percent score * (relative to maximum) of more than 85 %: */ } elsif spamtest :value "gt" :comparator "i;ascii-numeric" :percent "85" { discard; } /* Other messages get filed into INBOX */ ---%<------------------------------------------------------------------------- The virustest extension can be used in a similar manner: ---%<------------------------------------------------------------------------- require "virustest"; require "fileinto"; require "relational"; require "comparator-i;ascii-numeric"; /* Not scanned ? */ if virustest :value "eq" :comparator "i;ascii-numeric" "0" { fileinto "Unscanned"; /* Infected with high probability (value range in 1-5) */ } if virustest :value "eq" :comparator "i;ascii-numeric" "4" { /* Quarantine it in special folder (still somewhat dangerous) */ fileinto "Quarantine"; /* Definitely infected */ } elsif virustest :value "eq" :comparator "i;ascii-numeric" "5" { /* Just get rid of it */ discard; } ---%<------------------------------------------------------------------------- Plus Addressed mail filtering ----------------------------- Using the subaddress [http://tools.ietf.org/html/rfc5233/] extension, it is possible to match against the 'detail' part of an e-mail address, e.g. a ''+tag'' suffix to the local part of the address. This is for example useful when you don't want just any +tag to create a directory, but you want to use tagged addresses such as with amavisd-new. This example would place email addressed to user+spam@example.com into user's Spam folder. ---%<------------------------------------------------------------------------- require ["fileinto", "envelope", "subaddress"]; if envelope :detail "to" "spam"{ fileinto "Spam"; } ---%<------------------------------------------------------------------------- The following more advanced example uses the subaddress [http://tools.ietf.org/html/rfc5233/] extension to handle recipient addresses structured as 'sales+@company.com' in a special way. The '' part is extracted from the address using variables [http://tools.ietf.org/html/rfc5229/] extension, transformed into a format with the first letter in upper case and subsequently used to create the folder name where the message is stored. The folder name is structured as 'users/'. If the '+' detail is omitted from the recipient address, the message is filed in the 'sales' folder. ---%<------------------------------------------------------------------------- require ["variables", "envelope", "fileinto", "subaddress"]; if envelope :is :user "to" "sales" { if envelope :matches :detail "to" "*" { /* Save name in ${name} in all lowercase except for the first letter. * Joe, joe, jOe thus all become 'Joe'. */ set :lower :upperfirst "name" "${1}"; } if string :is "${name}" "" { /* Default case if no detail is specified */ fileinto "sales"; } else { /* For sales+joe@ this will become users/Joe */ fileinto "users/${name}"; } } ---%<------------------------------------------------------------------------- To work with Postfix, this requires that the envelope "to" still contains the full address, so pass it with the -a flag. ---%<------------------------------------------------------------------------- dovecot unix - n n - - pipe flags=DRhu user=mail:mail argv=/usr/local/libexec/dovecot/dovecot-lda -f ${sender} -d ${user}@${nexthop} -a ${recipient} ---%<------------------------------------------------------------------------- or ---%<------------------------------------------------------------------------- mailbox_command = /usr/lib/dovecot/dovecot-lda -a "$RECIPIENT" ---%<------------------------------------------------------------------------- Vacation auto-reply ------------------- Auto-responder functionality is implemented using the vacation [http://tools.ietf.org/html/rfc5230/] extension. The following script sends out-of-office replies when the message is not spam: ---%<------------------------------------------------------------------------- require ["fileinto", "vacation"]; # Move spam to spam folder if header :contains "X-Spam-Flag" "YES" { fileinto "spam"; # Stop here so that we do not reply on spams stop; } vacation # Reply at most once a day to a same sender :days 1 :subject "Out of office reply" # List of additional recipient addresses which are included in the auto replying. # If a mail's recipient is not the envelope recipient and it's not on this list, # no vacation reply is sent for it. :addresses ["j.doe@company.dom", "john.doe@company.dom"] "I'm out of office, please contact Joan Doe instead. Best regards John Doe"; ---%<------------------------------------------------------------------------- It's also possible to include the original subject using the variables [http://tools.ietf.org/html/rfc5229/] extension: ---%<------------------------------------------------------------------------- require ["variables", "vacation"]; # Store old Subject line so it can be used in vacation message if header :matches "Subject" "*" { set "subjwas" ": ${1}"; } vacation :days 1 :subject "Out of office reply${subjwas}" :addresses ["j.doe@company.dom", "john.doe@company.dom"] "I'm out of office, please contact Joan Doe instead. Best regards John Doe"; ---%<------------------------------------------------------------------------- Include scripts --------------- It's possible to include other Sieve scripts in your script: ---%<------------------------------------------------------------------------- require ["include"]; include :global "global-spam"; include :personal "my-own-spam"; ---%<------------------------------------------------------------------------- The lookup directories can be specified with: ---%<------------------------------------------------------------------------- plugin { # Directory for :personal include scripts. The default is to use home directory. sieve_dir = %h/sieve # Directory for :global include scripts (not to be confused with sieve_global_path). # If unset, the include fails. sieve_global_dir = /etc/dovecot/sieve/ } ---%<------------------------------------------------------------------------- Both 'sieve_dir' and 'sieve_global_dir' may also be overridden by [UserDatabase.ExtraFields.txt]. It's not currently possible to use subdirectories for the scripts. Having a '/' character in the script name always fails the include. This is just an extra check to avoid potential problems with including scripts within mail directories. Archiving a Mailinglist by Date ------------------------------- You can archive messages from mailing lists in a date-structured folder tree as follows: ---%<------------------------------------------------------------------------- require ["variables","date","fileinto","mailbox"]; # Extract date info if currentdate :matches "year" "*" { set "year" "${1}"; } if currentdate :matches "month" "*" { set "month" "${1}"; } # Archive Dovecot mailing list items by year and month. # Create folder when it does not exist. if header :is "list-id" "dovecot.dovecot.org" { fileinto :create "INBOX.Lists.${year}.${month}.dovecot"; } ---%<------------------------------------------------------------------------- For example, in March 2013 this puts messages from the Dovecot mailing list in a folder called 'INBOX.Lists.2013.03.dovecot'. It combines the date [http://tools.ietf.org/html/rfc5260#section-4] and variables [http://tools.ietf.org/html/rfc5229/] extensions to extract the required date strings. Using the ':create' argument for the 'fileinto' command, the indicated folder is created automatically if it doesn't exist. The ':create' argument is provided by the mailbox [http://tools.ietf.org/html/rfc5490#section-3] extension. Translation from Procmail ------------------------- There exists a script which attempts to translate simple Procmail rules into Sieve rules:http://www.earth.ox.ac.uk/~steve/sieve/procmail2sieve.pl (dovecot.org mirror [http://dovecot.org/tools/procmail2sieve.pl]) Here's the original post announcing it: http://dovecot.org/list/dovecot/2007-March/020895.html (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.Trash.txt0000644000175000017500000000300012244263657015066 00000000000000Trash Plugin ============ This requires [Quota.txt] to be loaded and configured to use non- [Quota.FS.txt]. Normally if a message can't be saved/copied because it would bring user over quota, the save/copy fails with "Quota exceeded" error. The trash plugin can be used to avoid such situations by making Dovecot automatically expunge oldest messages from configured mailboxes until the message can be saved. If the new message is large enough that it wouldn't fit even if all messages from configured mailboxes were expunged, then none are and user gets the "Quota exceeded" error. The configuration file is a text file where each line is in format: ' '. Mails are deleted in lowest -> highest priority number order. dovecot.conf: ---%<------------------------------------------------------------------------- mail_plugins = $mail_plugins quota trash plugin { trash = /etc/dovecot/dovecot-trash.conf.ext } ---%<------------------------------------------------------------------------- dovecot-trash.conf.ext: ---%<------------------------------------------------------------------------- # Spam mailbox is emptied before Trash 1 Spam # Trash mailbox is emptied before Sent 2 Trash # If both Sent and "Sent Messages" mailboxes exist, the next oldest message # to be deleted is looked up from both of the mailboxes. 3 Sent 3 Sent Messages ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/LDA.Sendmail.txt0000644000175000017500000001036512244263647014533 00000000000000Dovecot LDA with Sendmail ========================= The following describes how to configure Sendmail to use 'dovecot-lda' where 'root' permission is not granted and Dovecot runs under a single user ID. It may need some adjustment for more typical setups. Other assumptions are that Sendmail is configured for virtual hosting and that local-system mail delivery is not handled by 'dovecot-lda'. Allowing that 'sendmail.mc' has 'MAILER(procmail)dnl' included, edit 'sendmail.cf' adding these lines after the 'Mprocmail' definition: ---%<------------------------------------------------------------------------- ######################*****############## ### DOVECOT Mailer specification ### ##################*****################## Mdovecot, P=/usr/local/libexec/dovecot/dovecot-lda, F=DFMPhnu9, S=EnvFromSMTP/HdrFromSMTP, R=EnvToSMTP/HdrFromSMTP, T=DNS/RFC822/X-Unix, A=/usr/local/libexec/dovecot/dovecot-lda -d $u ---%<------------------------------------------------------------------------- If you're using 'sendmail.mc' then put the lines above into a new file '/usr/share/sendmail-cf/mailer/dovecot.m4' and put 'MAILER(dovecot)' into your 'sendmail.mc' =================================== Another method of doing the above is by editing your 'hostname.mc' with the following three lines: ---%<------------------------------------------------------------------------- FEATURE(`local_procmail', `/usr/local/libexec/dovecot/dovecot-lda',`/usr/local/libexec/dovecot/dovecot-lda -d $u') MODIFY_MAILER_FLAGS(`LOCAL', `-f') MAILER(procmail) ---%<------------------------------------------------------------------------- After editing 'hostname.mc' with the above, be sure to remake your 'hostname.cf' file. This is confirmed to work with: * dovecot-1.0.7 * FreeBSD 6.3-RELEASE-p3 i386 * sendmail Version 8.14.2 * Compiled with: DNSMAP LOG MAP_REGEX MATCHGECOS MILTER MIME7TO8 MIME8TO7 NAMED_BIND NETINET NETINET6 NETUNIX NEWDB NIS PIPELINING SASLv2 SCANF STARTTLS TCPWRAPPERS USERDB XDEBUG =================================== If 'sendmail' runs under a different non-'root' UID via * 'define(`confRUN_AS_USER', `sendmail')dnl' in 'sendmail.mc', then the /env_put(t_strconcat("RESTRICT_/ lines in 'deliver.c' must be commented-out. Now add a ---%<------------------------------------------------------------------------- virtualdomain.example.com vmail:vmail ---%<------------------------------------------------------------------------- line for each virtual domain to 'mailertable.cf' and run 'makemap hash mailertable.db < mailertable.cf'. The 'dovecot' (or some other random text) after the colon character is required, else 'sendmail' will fail to pass command arguments to 'dovecot-lda' correctly. Make sure all the virtual domains are in the 'virtuserdomains' file. =========================================== (Fedora 14: dovecot 2.0.8 & sendmail 8.14.4) Summing up all previous experience, one may keep all virtual user accounts under one system account. The sendmail's "U=" mailer option with changing the owner of lda (to "keeper" here for instance): ---%<------------------------------------------------------------------------- -rwxr-xr-x. 1 keeper mail 14536 Dec 7 16:43 /usr/libexec/dovecot/dovecot-lda ---%<------------------------------------------------------------------------- allows to run virtual users under one system account without applying SUID. Sendmail can pass a user account to LDA with or without the domain. Passing a user name without the domain can be achievedwith S=/R= rewriting rules of the local mailer. Finally, into '/usr/share/sendmail-cf/mailer/dovecot.m4' goes the block of lines: ---%<------------------------------------------------------------------------- Mdovecot, P=/usr/libexec/dovecot/dovecot-lda, F=l59DFMPhnu, S=EnvFromL/HdrFromL, R=EnvToL/HdrToL, M=51200000, U=keeper:mail, T=DNS/RFC822/X-Unix, A=/usr/libexec/dovecot/dovecot-lda -d $u ---%<------------------------------------------------------------------------- dovecot.m4 [http://sites.google.com/site/mclroy/dovecot/dovecot-m4] can be a bit more complex. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/SharedMailboxes.Shared.txt0000644000175000017500000002446612244263661016662 00000000000000Mailbox sharing between users ============================= To enable mailbox sharing, you'll need to create a shared namespace. See for more information about ACL-specific settings. ---%<------------------------------------------------------------------------- # User's private mail location. mail_location = maildir:~/Maildir # When creating any namespaces, you must also have a private namespace: namespace { type = private separator = / prefix = #location defaults to mail_location. inbox = yes } namespace { type = shared separator = / prefix = shared/%%u/ location = maildir:%%h/Maildir:INDEX=~/Maildir/shared/%%u subscriptions = no list = children } mail_plugins = acl protocol imap { mail_plugins = $mail_plugins imap_acl } plugin { acl = vfile } ---%<------------------------------------------------------------------------- This creates a shared/ namespace under which each user's mailboxes are. If you have multiple domains and allow sharing between them, you might want to set 'prefix=shared/%%d/%%n/' instead (although %%u works just fine too). If you don't, you might want to drop the domain part and instead use 'prefix=shared/%%n/'. 'list=children' specifies that if no one has shared mailboxes to the user, the "shared" directory isn't listed by the LIST command. If you wish it to be visible always, you can set 'list=yes'. The 'location' setting specifies how to access other users' mailboxes. If you use %%h, the user's home directory is asked from auth process via auth-userdb socket. See for how to configure the socket. If the users' mailboxes can be found using a template, it's faster not to use the %%h. For example: ---%<------------------------------------------------------------------------- location = maildir:/var/mail/%%d/%%n/Maildir:INDEX=~/Maildir/shared/%%u ---%<------------------------------------------------------------------------- % vs %% ------- %var expands to the logged in user's variable, while %%var expands to the other users' variables. For example if your name is "myself" and "someone1" and "someone2" have shared mailboxes to you, the variables could be expanded like: * %u expands to "myself" * %%u expands to "someone1" or "someone2" * %h might expand to "/home/myself" * %%h might expand to "/home/someone1" or "/home/someone2" * ~/ equals %h/ Note that in e.g. mail_location setting you might need both. For example in: ---%<------------------------------------------------------------------------- mail_location = maildir:%%h/Maildir:INDEX=%h/Maildir/shared/%%u ---%<------------------------------------------------------------------------- What it means is: * %%h/Maildir points to the other user's Maildir, e.g. "/home/someone1". * :INDEX=%h/Maildir/shared/%%u points to a per-user directory under your own Maildir, e.g. "/home/myself/Maildir/someone1" or "/home/myself/Maildir/someone2". This is necessary to keep a local copy of the other users' index files. dbox ---- With dbox the index files are a very important part of the mailboxes. You must not try to :INDEX= to have copies of index files. This will only result in mailbox corruption. This also means that with dbox there's currently no way to have private \Seen flags. v2.2+ has support for private index files that makes this possible. Filesystem permissions ---------------------- Dovecot assumes that it can access the other users' mailboxes. If you use multiple UNIX UIDs, you may have problems setting up the permissions so that the mailbox sharing works. Dovecot never modifies existing files' permissions. See for more information. Shared mailbox listing ---------------------- With the above configuration it's possible to open shared mailboxes if you know their name, but they won't be visible in the mailbox list. This is because Dovecot has no way of knowing what users have shared mailboxes to whom. Iterating through all users and looking inside their mail directories would be horribly inefficient for more than a couple users. To overcome this problem Dovecot needs a dictionary, which contains the list of users who have shared mailboxes and to whom they have shared. If the users aren't properly listed in this dictionary, their shared mailboxes won't be visible. Currently there's no way to automatically rebuild this dictionary, so make sure it doesn't get lost. If it does, each user having shared mailboxes must use the IMAP SETACL command (see below) to get the dictionary updated for themselves. You could use any dictionary backend, including SQL, but a simple flat file should work pretty well too: ---%<------------------------------------------------------------------------- plugin { acl_shared_dict = file:/var/lib/dovecot/db/shared-mailboxes.db } ---%<------------------------------------------------------------------------- The IMAP processes must be able to write to the 'db/' directory. If you're using system users, you probably want to make it mode 0770 and group 'sharedusers' and set 'mail_access_groups=sharedusers' (or something similar). If you use multiple domains and don't wish users to share their mailboxes to users in other domains, you can use separate dict files for each domain: ---%<------------------------------------------------------------------------- plugin { # assumes mailboxes are in /var/mail/%d/%n: acl_shared_dict = file:/var/mail/%d/shared-mailboxes.db } ---%<------------------------------------------------------------------------- Using SQL dictionary -------------------- 'dovecot.conf': ---%<------------------------------------------------------------------------- plugin { acl_shared_dict = proxy::acl } dict { acl = pgsql:/etc/dovecot/dovecot-dict-sql.conf.ext } ---%<------------------------------------------------------------------------- See for more information, especially about permission issues. Database tables: ---%<------------------------------------------------------------------------- CREATE TABLE user_shares ( from_user varchar(100) not null, to_user varchar(100) not null, dummy char(1) DEFAULT '1', -- always '1' currently primary key (from_user, to_user) ); COMMENT ON TABLE user_shares IS 'User from_user shares folders to user to_user.'; CREATE INDEX to_user ON user_shares (to_user); -- because we always search for to_user CREATE TABLE anyone_shares ( from_user varchar(100) not null, dummy char(1) DEFAULT '1', -- always '1' currently primary key (from_user) ); COMMENT ON TABLE anyone_shares IS 'User from_user shares folders to anyone.'; ---%<------------------------------------------------------------------------- '/etc/dovecot/dovecot-dict-sql.conf.ext': ---%<------------------------------------------------------------------------- connect = host=localhost dbname=mails user=sqluser password=sqlpass map { pattern = shared/shared-boxes/user/$to/$from table = user_shares value_field = dummy fields { from_user = $from to_user = $to } } map { pattern = shared/shared-boxes/anyone/$from table = anyone_shares value_field = dummy fields { from_user = $from } } ---%<------------------------------------------------------------------------- IMAP ACL commands ----------------- Mailbox sharing is expected to be done using IMAP SETACL command. It is the only way to update the shared mailbox list dictionary. Below is a quick introduction to IMAP ACL commands. See RFC 4314 [http://www.ietf.org/rfc/rfc4314.txt] for more details. * MYRIGHTS : Returns the user's current rights to the mailbox. * GETACL : Returns the mailbox's all ACLs. * SETACL [+|-]: Give the specified rights to the mailbox. * DELETEACL [-]: Delete 's ACL from the mailbox. is one of: * anyone: Matches all users, including anonymous users. * authenticated: Like "anyone", but doesn't match anonymous users. * $group: Matches all users belonging to the group ($ is not part of the group name). * $!group: See group-override in (Dovecot-specific feature). * user: Matches the given user. The $group syntax is not a standard, but it is mentioned in RFC 4314 examples and is also understood by at least Cyrus IMAP. Having '-' before the identifier specifies negative rights. See for list of . Sharing mailboxes to everyone ----------------------------- By default Dovecot doesn't allow using the IMAP "anyone" or "authenticated" identifier, because it would be an easy way to spam other users in the system. If you wish to allow it, set: ---%<------------------------------------------------------------------------- plugin { acl_anyone = allow } ---%<------------------------------------------------------------------------- Note that you can also do this only for some users by using the second table "anyone_shares". Every user listed in this table shares his folders with everyone. See also [UserDatabase.ExtraFields.txt]. IMAP ACL examples ----------------- Let's begin with some simple example that first gives "read" and "lookup" rights, and later adds "write-seen" right: ---%<------------------------------------------------------------------------- 1 SETACL Work user@domain rl 1 OK Setacl complete. 2 SETACL Work user@domain +s 2 OK Setacl complete. 3 GETACL Work * ACL "Work" "user@domain" lrs "myself" lrwstipekxacd 3 OK Getacl completed. ---%<------------------------------------------------------------------------- Let's see how negative rights work by testing it on ourself. See how we initially have "lookup" right, but later we don't: ---%<------------------------------------------------------------------------- 1 MYRIGHTS Work * MYRIGHTS "Work" lrwstipekxacd 1 OK Myrights completed. 2 SETACL Work -myself l 2 OK Setacl complete. 3 GETACL Work * ACL "Work" "-myself" l "user@domain" lr "myself" lrwstipekxacd 3 OK Getacl completed. 4 myrights Work * MYRIGHTS "Work" rwstipekxacd 4 OK Myrights completed. ---%<------------------------------------------------------------------------- Troubleshooting --------------- * Make sure the % and %% variables are specified correctly in the namespace location.'mail_debug=yes' will help you see if Dovecot is trying to access correct paths. * 'doveadm acl debug -u user@domain shared/user/box' can be helpful in figuring out why a mailbox can't be accessed. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.Gmail.txt0000644000175000017500000000271312244263650015351 00000000000000Migration from Gmail to Dovecot =============================== You can use [Migration.Dsync.txt] protocol, but there are a few things different with Gmail compared to other IMAP servers: * With Gmail when you delete a mail from POP3, the mail is only hidden from future POP3 sessions, but it's still available via IMAP. If you wish to preserve this functionality, there's a 'pop3_deleted_flag' setting in Dovecot v2.2.2+. * Gmail has labels. If a message has multiple labels, it shows up in multiple IMAP folders, but it's still the same message and uses quota only once for that message. Dovecot currently doesn't have such support, so the migration will copy the message to multiple folders and each instance will use up quota. There's currently no easy fix for this, although there are some future plans to optionally not count message copies towards quota. * Gmail has virtual folders: "All Mail", "Starred" and "Important". From migration point of view this means that the migration should skip these folders, since their mails are in other folders anyway. With v2.2.3+ you can tell dsync to skip these folders:'doveadm sync -x '\All' -x '\Flagged' -x '\Important'' - by using the \flag parameters dsync finds the folders by their SPECIAL-USE flag rather than their name (which may be different for different user depending on their language). (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailboxFormat.Cydir.txt0000644000175000017500000000154412244263647016214 00000000000000Cydir ===== This mailbox format is very similar to Cyrus IMAP's internal mail store: * Messages are stored in "." named files * Cyrus's 'cyrus.index' is equivalent to Dovecot's 'dovecot.index'. Dovecot however also requires 'dovecot.index.log' for its indexing to work. * Cyrus's 'cyrus.cache' is equivalent to Dovecot's 'dovecot.index.cache'. Cydir is a very simple format internally. It relies on Dovecot's [IndexFiles.txt] completely for its functionality. If the index files are lost, all the message flags are lost. Currently the code can't even rebuild index files if they're lost. Cydir is mostly meant to be used for benchmarking and stress testing index handling code. Its code is small and simple, so it can also act as an example for writing new mail storage backends. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Dict.txt0000644000175000017500000000266112244263644013260 00000000000000Dict Proxy Process ================== Dict can be used by: * [Quota.Dict.txt] * [SharedMailboxes.Shared.txt] * [Plugins.Expire.txt] When using these, the mail processes need to have access the dict socket. By default only "dovecot" user has access to dict socket, which doesn't typically work in any installations. However, giving too wide permissions by default might allow untrusted users to access the dict and cause problems. If all users share a single UNIX UID (e.g. "vmail"), you could make the dict socket accessible only to it: ---%<------------------------------------------------------------------------- service dict { unix_listener dict { mode = 0600 user = vmail } } ---%<------------------------------------------------------------------------- If you use multiple UNIX UIDs, you can add an extra group for all Dovecot mail processes. This works even if you have untrusted system users who have shell access to the server: ---%<------------------------------------------------------------------------- mail_access_groups = dovecot service dict { unix_listener dict { mode = 0660 group = dovecot } } ---%<------------------------------------------------------------------------- However, it works with only if it's started as root. If this isn't possible, look into using instead. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.Sieve.Extensions.Vacation.txt0000644000175000017500000000612012244263656021436 00000000000000Pigeonhole Sieve: Vacation Extension ==================================== The Sieve vacation extension (RFC5230 [http://tools.ietf.org/html/rfc5230/]) defines a mechanism to generate automatic replies to incoming email messages. It takes various precautions to make sure replies are only sent when appropriate. Script authors can specify how often replies can be sent to a particular contact. In the original vacation extension, this interval is specified in days with a minimum of one day. When more granularity is necessary and particularly when replies must be sent more frequently than one day, the vacation-seconds extension (RFC6131 [http://tools.ietf.org/html/rfc5230/]) can be used. This allows specifying the minimum reply interval in seconds with a minimum of zero (a reply is then always sent), depending on administrator configuration. Configuration ============= The *vacation* extension is available by default. In contrast, the *vacation-seconds* extension - which implies the vacation extension when used - is not available by default and needs to be enabled explicitly by adding it to the 'sieve_extensions' setting. The configuration also needs to be adjusted accordingly to allow a non-reply period of less than a day. The *vacation* and *vacation-seconds* extensions have their own specific settings. The settings that specify a period (currently all of them) are specified in *s*(econds), unless followed by a *d*(ay), *h*(our) or *m*(inute) specifier character. The following settings can be configured for the vacation extension in the 'plugin' section (default values are indicated): sieve_vacation_min_period = 1d : This specifies the minimum period that can be specified for the :days and :seconds tags of the vacation command. A minimum of 0 indicates that users are allowed to make the Sieve interpreter send a vacation response message for every incoming message that meets the other reply criteria (refer to RFC5230). A value of zero is however not recommended. sieve_vacation_max_period = 0 : This specifies the maximum period that can be specified for the :days tag of the vacation command. The configured value must be larger than the sieve_vacation_min_period setting. A value of 0 has a special meaning: it indicates that there is no upper limit. sieve_vacation_default_period = 7d : This specifies the default period that is used when no :days or :seconds tag is specified. The configured value must lie between the sieve_vacation_min_period and sieve_vacation_max_period. Invalid values for the settings above will make the Sieve interpreter log a warning and revert to the default values. Example ------- ---%<------------------------------------------------------------------------- plugin { # Use vacation-seconds sieve_extensions = +vacation-seconds # One hour at minimum sieve_vacation_min_period = 1h # Ten days default sieve_vacation_default_period = 10d # Thirty days at maximum sieve_vacation_max_period = 30d } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Namespaces.txt0000644000175000017500000002527412244263650014456 00000000000000Namespaces ========== Dovecot supports fully configurable namespaces. Their original and primary purpose is to provide Namespace IMAP extension (RFC 2342 [http://www.faqs.org/rfcs/rfc2342.html]) support, which allows giving IMAP clients hints about where to locate mailboxes and whether they're private, shared or public. Unfortunately most IMAP clients don't support this extension. Dovecot namespaces can be used for several other purposes too: * Changing the hierarchy separator * Providing backwards compatibility when switching from another IMAP server * Provides support for [SharedMailboxes.Public.txt] and [SharedMailboxes.Shared.txt] mailboxes * Allows having mails in multiple different locations with possibly different formats Configuration ------------- In v2.1+ there's a default inbox namespace added in '10-mail.conf'. If the configuration doesn't explicitly specify a namespace (as was in v2.0 and older) a default namespace is created automatically. The section name in namespaces (e.g. 'namespace sectionname { .. } ' is used only internally within configuration. It's not required at all, but it allows you to update an existing namespace (like how '15-mailboxes.conf' does) or have userdb override namespace settings for specific users ('namespace/sectionname/prefix=foo/'). Namespace types --------------- There are 3 types of namespaces: * private: Typically contains only user's own private mailboxes. * shared: Contains other users' [SharedMailboxes.Shared.txt]. * public: Contains [SharedMailboxes.Public.txt]. Hierarchy separators -------------------- Hierarchy separator specifies the character that is used to separate a parent mailbox from its child mailbox. For example if you have a mailbox "foo" with a child mailbox "bar", the full path to the child mailbox would be "foo/bar" if the separator was '/'. With a separator '.' it would be "foo.bar". IMAP clients, Sieve scripts and many parts of Dovecot configuration use the configured separator when referring to mailboxes. This means that if you change the separator, you may break things. However, changing the separator doesn't change the on-disk "layout separator". For example: +-----------------------------+--------+-----+----------+---------------------+ | mail_location | Layout | NS | Mailbox | Directory | | | sep | sep | name | | +-----------------------------+--------+-----+----------+---------------------+ | maildir:~/Maildir | . | . | foo.bar | ~/Maildir/.foo.bar/ | +-----------------------------+--------+-----+----------+---------------------+ | maildir:~/Maildir | . | / | foo/bar | ~/Maildir/.foo.bar/ | +-----------------------------+--------+-----+----------+---------------------+ | maildir:~/Maildir:LAYOUT=fs | / | . | foo.bar | ~/Maildir/foo/bar/ | +-----------------------------+--------+-----+----------+---------------------+ | maildir:~/Maildir:LAYOUT=fs | / | / | foo/bar | ~/Maildir/foo/bar/ | +-----------------------------+--------+-----+----------+---------------------+ Note how the "namespace separator" changes only the "Mailbox name", but doesn't change the directory where the mails are stored. The "layout separator" can only be changed by changing the LAYOUT, which also affects the entire directory structure. The layout separator also restricts the mailbox names. For example if the layout separator is '.', you can't just set separator to '/' and create a mailbox named "foo.bar". If you need to do this, you can use [Plugins.Listescape.txt] plugin to add escape the mailbox names as necessary. A commonly used separator is '/'. It probably causes the least amount of trouble with different IMAP clients.'^' separator is troublesome with Thunderbird.When '\' should be used it must be quoted, so one sets separator = "\\" You should use the same hierarchy separator for all namespaces. All list=yes namespaces must use the same separator, but if you find it necessary (e.g. for backwards compatibility namespaces) you may use different separators for list=no namespaces. Namespace settings ------------------ * type: See the "Namespace types" section above * separator: See the "Hierarchy separators" section above * prefix: The namespace prefix how it's visible in the NAMESPACE reply (if hidden=no) and mailbox list (if list=yes). * location: [MailLocation.txt]. The default is to use 'mail_location' setting. * inbox: "yes", if this namespace contains the user's INBOX. There is only one INBOX, so only one namespace can have inbox=yes. * hidden: "yes", if this namespace shouldn't be listed in NAMESPACE reply. * list: "yes" (default), if this namespace and its mailboxes should be listed by LIST command when the namespace prefix isn't explicitly specified as a parameter. "children" means the namespace prefix list listed only if it has child mailboxes. * subscriptions: "yes" (default) if this namespace should handle its own subscriptions. If "no", then the first parent namespace with subscriptions=yes will handle it. For example if it's "no" for a namespace with prefix=foo/bar/, Dovecot first sees if there's a prefix=foo/ namespace with subscriptions=yes and then a namespace with an empty prefix. If neither is found, an error is given. * ignore_on_failure: Normally Dovecot fails if it can't successfully create a namespace. Set this to "yes" to continue even if the namespace creation fails (e.g. public namespace points to inaccessible location). * disabled: Set to "yes" to quickly disable this namespace. Especially useful when returned by a userdb lookup to give per-user namespaces. * alias_for: If multiple namespaces point to the same location, they should be marked as aliases against one primary namespace. This avoids duplicating work for some commands (listing the same mailbox multiple times). The value for alias_for is the primary namespace's prefix. For example if the primary namespace has empty prefix, set 'alias_for=' for the alias namespace. Or if primary has 'prefix=INBOX/', use 'alias_for=INBOX/'. * mailbox { .. } settings can be used to autocreate/autosubscribe mailboxes and set their SPECIAL-USE flags. Shared Mailboxes ---------------- See . Examples -------- Mixed mbox and Maildir ---------------------- If you have your INBOX as mbox in '/var/mail/username' and the rest of the mailboxes in Maildir format under '~/Maildir', you can do this by creating two namespaces: ---%<------------------------------------------------------------------------- namespace { separator = / prefix = "#mbox/" location = mbox:~/mail:INBOX=/var/mail/%u inbox = yes hidden = yes list = no } namespace { separator = / prefix = location = maildir:~/Maildir } ---%<------------------------------------------------------------------------- Without the 'list = no' setting in the first namespace, clients would see the "#mbox" namespace as a non-selectable mailbox named "#mbox" but with child mailboxes (the mbox files in the '~/mail' directory), ie. like a directory. So specifically with 'inbox = yes', having 'list = no' is often desirable. Backwards Compatibility: UW-IMAP -------------------------------- When switching from UW-IMAP and you don't want to give users full access to filesystem, you can create hidden namespaces which allow users to access their mails using their existing namespace settings in clients. ---%<------------------------------------------------------------------------- # default namespace namespace inbox { separator = / prefix = inbox = yes } # for backwards compatibility: namespace compat1 { separator = / prefix = mail/ hidden = yes list = no alias_for = } namespace compat2 { separator = / prefix = ~/mail/ hidden = yes list = no alias_for = } namespace compat3 { separator = / prefix = ~%u/mail/ hidden = yes list = no alias_for = } ---%<------------------------------------------------------------------------- Backwards Compatibility: Courier IMAP ------------------------------------- *Recommended:* You can continue using the same INBOX. namespace as Courier: ---%<------------------------------------------------------------------------- namespace inbox { separator = . prefix = INBOX. inbox = yes } ---%<------------------------------------------------------------------------- *Alternatively:* Create the INBOX. as a compatibility name, so old clients can continue using it while new clients will use the empty prefix namespace: ---%<------------------------------------------------------------------------- namespace inbox { separator = / prefix = inbox = yes } namespace compat { separator = . prefix = INBOX. inbox = no hidden = yes list = no alias_for = } ---%<------------------------------------------------------------------------- The "separator=/" allows the INBOX to have child mailboxes. Otherwise with "separator=." it wouldn't be possible to know if "INBOX.foo" means INBOX's "foo" child or the root "foo" mailbox in "INBOX." compatibility namespace. With "separator=/" the difference is clear with "INBOX/foo" vs. "INBOX.foo". The alternative configuration is not recommended, as it may introduce there problems: * Although clients may do LIST INBOX.*, they may still do LSUB *, resulting in mixed results. * If clients used empty namespace with Courier, they now see the mailboxes with different names, resulting in redownloading of all mails (except INBOX). * Some clients may have random errors auto-detecting the proper default folders (Sent, Drafts etc) if the client settings refer to old paths while the server lists new paths. Per-user Namespace Location From SQL ------------------------------------ You need to give the namespace a name, for example "docs" below: ---%<------------------------------------------------------------------------- namespace docs { type = public separator = / prefix = Public/ } ---%<------------------------------------------------------------------------- Then you have an SQL table like: ---%<------------------------------------------------------------------------- CREATE TABLE Namespaces ( .. Location varchar(255) NOT NULL, .. ) ---%<------------------------------------------------------------------------- Now if you want to set the namespace location from the Namespaces table, use something like: ---%<------------------------------------------------------------------------- user_query = SELECT Location as 'namespace/docs/location' FROM Namespaces WHERE .. ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailLocation.txt0000644000175000017500000002554712244263647014763 00000000000000Mail location ============= * For mbox-specific settings, see * For Maildir-specific settings, see * For dbox-specific settings, see There are three different places where the mail location is looked up from: 1. 'mail_location' setting in 'dovecot.conf' is used if nothing else overrides it. 2. 'mail' [UserDatabase.txt] overrides 'mail_location' setting. 3. 'location' setting inside namespaces overrides everything. Usually this should be used only for public and shared namespaces. By default the 'mail_location' setting is empty, which means that Dovecot attempts to locate automatically where your mails are. This is done by looking at '~/Maildir', '/var/mail/username', '~/mail' and '~/Mail' in that order. It's usually a good idea to explicitly specify where the mails are, even if the autodetection happens to work. Autodetection commonly fails for new users who don't have the mail directory created yet. Format ------ The format of the mailbox location specification is as follows: * / [MailboxFormat.txt]/ : /path/ [ : /key/ = /value/ … ] where: * /mailbox-format/ is a tag identifying one of the formats described at [MailboxFormat.txt]. * /path/ is the path to a directory where the mail is stored. This must be an absolute path, not a relative path. Even if relative paths appear to work, this usage is deprecated and will likely stop working at some point. * /key/ = /value/ can appear zero or more times to set various optional parameters. Possible values for /key/ are: * 'INDEX' : specifies the location of [MailLocation.txt]. * 'INBOX' : specifies the location of the [MailLocation.txt]. * 'LAYOUT' : specifies the directory layout under the [MailLocation.mbox.txt] or [MailLocation.Maildir.txt] formats. * 'CONTROL' : specifies the location of control files under the [MailLocation.mbox.txt] or [MailLocation.Maildir.txt] formats. * 'SUBSCRIPTIONS' : specifies the file used for storing subscriptions. The default is "subscriptions". If you're trying to avoid name collisions with a mailbox named "subscriptions", then also consider setting 'MAILBOXDIR'. * 'MAILBOXDIR' : specifies directory name under which all mailbox directories are stored. With [MailboxFormat.dbox.txt] the default is "mailboxes/" while with other mailbox formats the default is empty. Typically this should be changed only for [Plugins.Lazyexpunge.txt] with mdbox. * 'DIRNAME' : specifies the directory name used for mailbox directories, or in the case of mbox specifies the mailbox message file name. With [MailboxFormat.dbox.txt] the default is "dbox-Mails/" while with other mailbox formats the default is empty. Can be used under either [MailLocation.mbox.txt], [MailLocation.Maildir.txt] or [MailLocation.dbox.txt] formats. Note that this directory is used only for the mail directory and the alt directory, not for index/control directories (but see below). * 'FULLDIRNAME' : Same as 'DIRNAME', but use the directory name also for index and control directory paths. This should be used instead of 'DIRNAME' for new installations. (v2.2.8+) * 'ALT' : specifies the [MailLocation.dbox.txt] path for dbox formats. * The colons and equals signs are literal and there are no spaces in an actual mailbox location specification. Variables --------- You can use several variables in the 'mail_location' setting. See for a full list, but the most commonly used ones are: * '%u': Full username. * '%n': User part in user@domain, same as %u if there's no domain. * '%d': Domain part in user@domain, empty if there's no domain. Typical settings ---------------- Typically with Maildir it would be set to: ---%<------------------------------------------------------------------------- mail_location = maildir:~/Maildir ---%<------------------------------------------------------------------------- with mbox: ---%<------------------------------------------------------------------------- mail_location = mbox:~/mail:INBOX=/var/mail/%u ---%<------------------------------------------------------------------------- or if you'd like to use the [MailboxFormat.dbox.txt] format: ---%<------------------------------------------------------------------------- # single-dbox mail_location = sdbox:~/dbox ---%<------------------------------------------------------------------------- or: ---%<------------------------------------------------------------------------- # multi-dbox mail_location = mdbox:~/mdbox ---%<------------------------------------------------------------------------- Use only absolute paths. Even if relative paths would appear to work, they might just as well break some day. Directory hashing ----------------- You can use two different kinds of hashes in [Variables.txt]: * %H modifiers returns a 32bit hash of the given string as hex. For example '%2.256H' would return max. 256 different hashes in range 00 .. ff. * %M returns a MD5 hash of the string as hex. This can be used for two level hashing by getting substrings of the MD5 hash. For example '%1Mu/%2.1Mu/%u' returns directories from '0/0/user' to 'f/f/user'. Index files ----------- Index files are by default stored under the same directory as mails. With maildir they are stored in the actual maildirs, with mbox they are stored under '.imap/' directory. You may want to change the index file location if you're using or if you're setting up [SharedMailboxes.txt]. You can change the index file location by adding ':INDEX=' to mail_location. For example: ---%<------------------------------------------------------------------------- mail_location = maildir:~/Maildir:INDEX=/var/indexes/%u ---%<------------------------------------------------------------------------- The index directories are created automatically, but note that it requires that Dovecot has actually access to create the directories. Either make sure that the index root directory ('/var/indexes' in the above example) is writable to the logged in user, or create the user's directory with proper permissions before the user logs in. If you really want to, you can also disable the index files completely by appending ':INDEX=MEMORY'. Private index files (v2.2+) --------------------------- Since v2.2 the recommended way to enable private flags for shared mailboxes is to create private indexes with :INDEXPVT=. See for more information. INBOX path ---------- INBOX path can be specified to exist elsewhere than the rest of the mailboxes, for example: ---%<------------------------------------------------------------------------- mail_location = mbox:~/mail:INBOX=/var/mail/%u mail_location = maildir:~/Maildir:INBOX=~/Maildir/.INBOX ---%<------------------------------------------------------------------------- Note that it's still not possible to mix maildir and mbox formats this way. You need to use [Namespaces.txt] for that. Homeless users -------------- Having a home directory for users is highly recommended. The [Pigeonhole.Sieve.txt] already requires a home directory to work, and it probably won't be the last feature to require a home. See [VirtualUsers.txt] for more reasons why it's a good idea, and how to give Dovecot a home directory even if you don't have a "real home directory". If you really don't want to set any home directory, you can use something like: ---%<------------------------------------------------------------------------- mail_location = maildir:/home/%u/Maildir ---%<------------------------------------------------------------------------- Per-user mail locations ----------------------- It's possible to override the default 'mail_location' for specific users by making the [UserDatabase.txt] return 'mail' [UserDatabase.ExtraFields.txt]. See the [UserDatabase.txt] page for the specific userdb you're using for more information how to do this. Below are however a couple of examples. Note that %h doesn't work in the userdb queries or templates. ~/ gets expanded later, so use it instead. SQL --- ---%<------------------------------------------------------------------------- user_query = SELECT home, uid, gid, mail FROM users WHERE user = '%u' ---%<------------------------------------------------------------------------- LDAP ---- ---%<------------------------------------------------------------------------- user_attrs = homeDirectory=home, uidNumber=uid, gidNumber=gid, mailLocation=mail ---%<------------------------------------------------------------------------- Passwd-file ----------- ---%<------------------------------------------------------------------------- user:{PLAIN}password:1000:1000::/home/user::userdb_mail=mbox:~/mail:INBOX=/var/mail/%u ---%<------------------------------------------------------------------------- Mixing mbox and maildir ----------------------- It's possible to use both mboxes and maildirs for the same user by configuring multiple namespaces. See . Having both mboxes and maildirs mixed within the same namespace isn't currently supported. Custom mailbox location detection --------------------------------- Dovecot by default detects the mailboxes in this order: 1. maildir: ~/Maildir 2. mbox: ~/mail, and /var/mail/%u if it exists 3. mbox: ~/Mail, and /var/mail/%u if it exists If you need something else, you can override the 'mail_executable' setting to run a script, which sets the MAIL environment properly. For example: ---%<------------------------------------------------------------------------- #!/bin/sh if [ -d $HOME/.maildir ]; then export MAIL=maildir:$HOME/.maildir else export MAIL=mbox:$HOME/mail:INBOX=/var/mail/$USER fi export USERDB_KEYS="$USERDB_KEYS mail" exec "$@" ---%<------------------------------------------------------------------------- Custom namespace location ------------------------- If you need to override namespace's location, first give it a name ("user" below): ---%<------------------------------------------------------------------------- namespace user { .. } ---%<------------------------------------------------------------------------- Then in the script use: ---%<------------------------------------------------------------------------- #!/bin/sh # do the lookup here location=mbox:$HOME/mail export USERDB_KEYS="$USERDB_KEYS namespace/user/location" exec env "NAMESPACE/USER/LOCATION=$location" "$@" ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Storage.Plugins.txt0000644000175000017500000001236212244263644016630 00000000000000Mail Plugins ============ Typically plugins add hooks in their init() function by calling 'mail_storage_hooks_add()', and remove the hooks at deinit() with 'mail_storage_hooks_remove()'. Hooks that are currently supported: * mail_user_created: A new mail user was created. It doesn't yet have any namespaces. * mail_storage_created: A new mail storage was created. It's not connected to any namespaces/mailbox lists yet. * mailbox_list_created: A new mailbox list was created. It's not connected to any storages yet. Because of this, some internal virtual methods haven't been overridden by the storage yet, so plugins rarely want to use this hook. Instead they should use: * mail_namespace_storage_added: Storage was connected to its first namespace/mailbox list. This hook should usually be used if plugin wants to override mailbox_list's methods. * mail_namespaces_created: User's all namespaces have been created. This hook is called only per user at startup. More internal namespaces may be created later when using shared mailboxes. * mailbox_allocated: 'mailbox_alloc()' was called. * mailbox_opened: Mailbox (and its index) was actually opened, either explicitly with 'mailbox_open()' or implicitly by some other function. Overriding methods ------------------ When the hook gets called, you usually want to override some method of the created object. This is the easy part, for example: ---%<------------------------------------------------------------------------- static void plugin_mailbox_allocated(struct mailbox *box) .. box->v.transaction_begin = plugin_transaction_begin; ---%<------------------------------------------------------------------------- The problem is that once 'plugin_transaction_begin()' is called, it should call the original 'transaction_begin()'. There may also be multiple plugins that want to override the same method, so the idea is to just have each plugin call the previous 'transaction_begin()'. The next problem is where do you save the previous value? Most objects have a 'module_contexts' array for storing per-plugin pointers for this purpose. There are several helper functions to make setting and accessing them in a quite safe way. Easiest way to set up the module context is to just copy&paste code from an existing plugin that sets the same context. Here's some documentation about it anyway: First you start by creating register for the plugin. There are different registers for different types of objects: * mail_user_module_register: For mail_user. * mailbox_list_module_register: For mailbox_list. * mail_storage_module_register: For mail_storage, mailbox, mailbox_transaction and mail_search. * mail_module_register: For mail. We'll assume you want to use mail_storage_module_register: ---%<------------------------------------------------------------------------- static MODULE_CONTEXT_DEFINE_INIT(plugin_storage_module, &mail_storage_module_register); ---%<------------------------------------------------------------------------- If you need to make it external, use: ---%<------------------------------------------------------------------------- extern MODULE_CONTEXT_DEFINE(plugin_storage_module, &mail_storage_module_register); struct plugin_storage_module plugin_storage_module = MODULE_CONTEXT_INIT(&mail_storage_module_register); ---%<------------------------------------------------------------------------- Next you'll need to allocate memory for the structure you want to place in the context. If you only want to override some methods, you can use: ---%<------------------------------------------------------------------------- union mailbox_module_context *mbox; struct mailbox_vfuncs *v = box->vlast; mbox = p_new(box->pool, union mailbox_module_context, 1); mbox->super = *v; box->vlast = &mbox->super; v->transaction_begin = plugin_transaction_begin; MODULE_CONTEXT_SET_SELF(box, plugin_storage_module, mbox); ---%<------------------------------------------------------------------------- If you want to store some more plugin-specific data to the object instead of just the super methods, you can do: ---%<------------------------------------------------------------------------- struct plugin_mailbox { /* must be called module_ctx */ union mailbox_module_context module_ctx; }; /* .. */ struct plugin_mailbox *mbox; struct mailbox_vfuncs *v = box->vlast; mbox = p_new(box->pool, struct plugin_mailbox, 1); mbox->module_ctx.super = *v; box->vlast = &mbox->super; v->transaction_begin = plugin_transaction_begin; MODULE_CONTEXT_SET(box, plugin_storage_module, mbox); ---%<------------------------------------------------------------------------- Note that when using union directly you use 'MODULE_CONTEXT_SET_SELF()', while when it's inside a struct you use 'MODULE_CONTEXT_SET()'. Once all this initialization is done, you can look up the module context with: ---%<------------------------------------------------------------------------- #define PLUGIN_CONTEXT(obj) MODULE_CONTEXT(obj, plugin_storage_module) /* .. */ struct plugin_mailbox *mbox = PLUGIN_CONTEXT(box); ---%<------------------------------------------------------------------------- (Yes, this API seems a bit too difficult to use and could use a redesign.) (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.ExtraFields.Host.txt0000644000175000017500000000400512244263655020745 00000000000000Login referrals =============== Login referrals are an IMAP extension specified by RFC 2221 [http://www.apps.ietf.org/rfc/rfc2221.html]. They're not supported by many clients, so you probably don't want to use them normally. Login referrals can be used in two ways: 1. Tell the client to log into another server without allowing to log in locally. 2. Suggest the client to log into another server, but log it in anyway. The following fields can be used to configure login referrals: * 'host=s': The destination server's hostname. This field is required for login referrals to be used. * 'port=s': The destination server's port. The default is 143. * 'destuser=s': Tell client to use a different username when logging in. * 'reason=s': Optional reason to use as the reply to the login command. The default is "Logged in, but you should use this server instead." Using the above settings you can suggest client to log in elsewhere. To require it, you'll also have to return: * 'nologin': User is not allowed to log in. * 'reason=s': Optional reason. The default is "Try this server instead.". Client support -------------- The following clients are known to support login referrals: * Pine * Outlook (but not Outlook Express) Examples -------- Forward user to another server after successful authentication: ---%<------------------------------------------------------------------------- password_query = SELECT password, host, 'Y' as nologin FROM users WHERE userid = '%u' ---%<------------------------------------------------------------------------- Forward all users to another server without authentication: ---%<------------------------------------------------------------------------- password_query = \ SELECT NULL AS password, 'Y' AS nopassword \ 'imap2.example.com' AS host, \ 'This server is down, try another one.' AS reason, \ 'Y' AS nologin, \ 'Y' AS nodelay ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Authentication.RestrictAccess.txt0000644000175000017500000000540712244263640020271 00000000000000Restricting Access ================== Restricting IMAP/POP3 access ---------------------------- Below examples show how you can give POP3 access to everyone, but IMAP access only for some people. The exact solution you want depends on what passdb you use. The solutions can also be modified for other types of IMAP/POP3/SMTP/etc. access checks. PAM --- Set PAM service name to '%s', ie.: ---%<------------------------------------------------------------------------- passdb { driver = pam args = %s } ---%<------------------------------------------------------------------------- That way PAM uses '/etc/pam.d/imap' for IMAP, and '/etc/pam.d/pop3' for POP3. In '/etc/pam.d/imap' you could then use eg. the pam_listfile.so module: ---%<------------------------------------------------------------------------- # allow IMAP access only for users in /etc/imapusers file auth required pam_listfile.so item=user sense=allow file=/etc/imapusers onerr=fail ---%<------------------------------------------------------------------------- SQL --- You can use the '%s' variable which expands to 'imap' or 'pop3' in 'password_query', eg: ---%<------------------------------------------------------------------------- password_query = SELECT password FROM users WHERE userid = '%u' and (imap_allowed = true or '%s' = 'pop3') ---%<------------------------------------------------------------------------- LDAP ---- Just like with SQL, you can use '%s' in pass_filter, eg.: ---%<------------------------------------------------------------------------- pass_filter = (&(objectClass=posixAccount)(uid=%u)(service=%s)) ---%<------------------------------------------------------------------------- That would require setting both service=pop3 and service=imap attributes to the user objects. passwd-file ----------- You can create a deny passwd-file based on the service: ---%<------------------------------------------------------------------------- passdb { driver = passwd-file args = /etc/dovecot/deny.%s deny = yes } ---%<------------------------------------------------------------------------- This makes Dovecot look for '/etc/dovecot/deny.imap' and '/etc/dovecot/deny.pop3' files. If the user exists in it, the access is denied. The files don't need to have anything else than one username per line. Note that this deny passdb must be before other passdbs. It also means that it can be used with any other passdb, not just with passwd-file passdbs. Restricting IP Access --------------------- It's possible to allow a user to authenticate only from a specific IP or network. This is especially useful for master users. This can be done by returning [PasswordDatabase.ExtraFields.AllowNets.txt] extra field in passdb. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailboxFormat.MH.txt0000644000175000017500000000517012244263647015445 00000000000000MH Mailbox Format ================= The MH mailbox format originated with a system developed by the RAND corporation and the University of California. Each email message is stored in a single file, with directories indicating folders and subfolders. The index or order of the messages in the folder determine what each message is named (which may not correspond to the inode index). The "safe" way to guarantee a message gets written to a mail folder is to first write the message out to a randomly chosen temporary file name, then link or rename the file to the number LAST+1, where LAST is the last sequential message in the folder. If the rename fails, increment the counter and try again. MH folders also maintains a meta-file called '~/Mail/.mh_context' that contains information about the most current folder and message chosen. Each sub-folder also contains a meta-file called '.mh_sequences' or '.xmhcache', which maintains keyword association lists for stored queries. New messages are stored in the "unseen" sequence for a folder. Procmail itself does not bother making changes to this file, rather simply delivers the message to the folder and leaves determining new messages as an exercise for the MUA. For example: ---%<------------------------------------------------------------------------- unseen: 1-3 8 15 projectB: 2-8 10 ---%<------------------------------------------------------------------------- shows two stored sequences of messages. Command-line utilities can then use these sequences as shortcuts.'show unseen', for example, is short-hand for 'show 1-3 8 15'. Deleted emails are indicated by prepending a "," to the name. One of the largest problems that IMAP servers have with MH format is the volatility of the email message name itself. The command-line utility 'sortm' is used to sort mail folders by date or string matching. To do this, messages are actually renamed to reflect the new sort order. IMAP servers are required to maintain an index of the folder contents, so when the names of the file entries cannot be guaranteed to be stable, IMAP servers have to throw out previous index caches and re-index. When operating with a shell account on a machine that also provides IMAP access to folders, users are encouraged not to re-sort email locally while accessing the IMAP server remotely. Links * NMH [http://www.nongnu.org/nmh/]: New MH Client * Original RAND MH [http://rand-mh.sourceforge.net] Code * MH-Book [http://rand-mh.sourceforge.net/book/] * Mutt Manual [http://www.mutt.org/doc/manual/manual-4.html#ss4.6]: Describing how it handles MH folders (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.Sieve.Extensions.txt0000644000175000017500000000061112244263656017672 00000000000000Pigeonhole Sieve Extensions =========================== The following Sieve language extensions have a dedicated wiki page with specific configuration and usage information: * [Pigeonhole.Sieve.Extensions.Vacation.txt] * [Pigeonhole.Sieve.Extensions.SpamtestVirustest.txt] (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/AuthDatabase.Dict.txt0000644000175000017500000001416012244263637015604 00000000000000Key-value authentication database (v2.1.9+) =========================================== Key-value databases can be used as auth backends. They probably should be used only for caching in front of e.g. SQL auth backends, since they don't currently support user iteration. Auth configuration ------------------ 'dovecot.conf': ---%<------------------------------------------------------------------------- passdb { driver = dict args = /etc/dovecot/dovecot-dict-auth.conf } userdb { driver = dict args = /etc/dovecot/dovecot-dict-auth.conf } ---%<------------------------------------------------------------------------- Dict configuration ------------------ See the 'dovecot-dict-auth.conf.ext' file from example-config for full list of configuration options. Basically you need these: '/etc/dovecot/dovecot-dict-auth.conf.ext': ---%<------------------------------------------------------------------------- uri = redis:host=127.0.0.1:port=6379 password_key = dovecot/passdb/%u user_key = dovecot/userdb/%u iterate_disable = yes default_pass_scheme = plain ---%<------------------------------------------------------------------------- Example values -------------- Currently only JSON object values are supported. For example userdb lookup should return something like: ---%<------------------------------------------------------------------------- { "uid": 123, "gid": 123, "home": "/home/username" } ---%<------------------------------------------------------------------------- Complete example for authenticating via a UNIX socket ----------------------------------------------------- The Dict auth backend can be used to query a local UNIX socket for users. This can be handy for accessing user databases which would otherwise only be accessible via the [AuthDatabase.CheckPassword.txt] backend and a scripting language. When given a <"proxy:"> [Quota.Dict.txt] URL the Dict backend speaks a simple protocol over a UNIX socket. The protocol is defined in 'src/lib-dict/dict-client.h' (Mercurial [http://hg.dovecot.org/dovecot-2.2/file/tip/src/lib-dict/dict-client.h]). Auth configuration ------------------ 'dovecot.conf': ---%<------------------------------------------------------------------------- passdb { driver = dict args = /etc/dovecot/dovecot-dict-auth.conf } userdb { # optional driver = prefetch } userdb { driver = dict args = /etc/dovecot/dovecot-dict-auth.conf } ---%<------------------------------------------------------------------------- Dict configuration ------------------ The last "dictionary name" ("somewhere") argument is redundant here. '/etc/dovecot/dovecot-dict-auth.conf.ext': ---%<------------------------------------------------------------------------- uri = proxy:/var/run/auth_proxy_dovecot/socket:somewhere password_key = passdb/%u user_key = userdb/%u iterate_disable = yes #default_pass_scheme = plain ---%<------------------------------------------------------------------------- Server process for answering Dict lookups ----------------------------------------- The server process listening on '/var/run/lookup_proxy_dovecot/socket' can be written in any language.Here's an example in Perl: ---%<------------------------------------------------------------------------- package AuthProxyDovecot; use base qw( Net::Server::PreFork ); use strict; use warnings; use JSON::XS; AuthProxyDovecot->run() or die "Could not initialize"; sub default_values { return { port => '/var/run/auth_proxy_dovecot/socket|unix', log_level => 2, log_file => 'Sys::Syslog', syslog_logsock => 'unix', syslog_ident => 'auth_proxy_dovecot', syslog_facility => 'daemon', background => 1, setsid => 1, pid_file => '/var/run/auth_proxy_dovecot.pid', user => 'root', group => 'root', no_client_stdout => 1, max_spare_servers => 2, min_spare_servers => 1, min_servers => 2, max_servers => 10, }; } ## end sub default_values ################################################## sub process_request { my $self = shift; my $socket = $self->{server}->{client}; my %L_handler = ( passdb => sub { my ($arg) = @_; my $ret = { password => '$1$JrTuEHAY$gZA1y4ElkLHtnsrWNHT/e.', userdb_home => "/home/username/", userdb_uid => 1000, userdb_gid => 1000, }; return $ret; }, userdb => sub { my ($arg) = @_; my $ret = { home => "/home/username/", uid => 1000, gid => 1000, }; return $ret; }, ); # protocol from src/lib-dict/dict-client.h my $json = JSON::XS->new; eval { while (<$socket>) { $self->log(2, "Got request: $_"); chomp; my $cmd = substr($_,0,1); next if $cmd eq 'H'; # "hello" my $ret; if ($cmd eq 'L') { my ($namespace,$type,$arg) = split ('/',substr($_,1),3); $self->log(4,"I:$namespace, $type, $arg"); if ($namespace eq 'shared') { my $f = $L_handler{$type}; if (defined $f && defined $arg) { $ret = $f->($self->{lookup}, $arg); } } } if ($ret) { my $json = JSON::XS->new->indent(0)->utf8->encode($ret); $self->log(4,"O:$json"); syswrite $socket, "O".$json."\n"; } else { syswrite $socket, "F\n" unless $ret; } } 1; }; if ($@) { $self->log(2, "Invalid request: $@"); } } sub pre_loop_hook { my $self = shift; $self->log(1, 'Starting server'); } sub pre_server_close_hook { my $self = shift; $self->log(1, 'Server is shut down'); } 1; __END__ ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Processes.txt0000644000175000017500000001513612244263644015554 00000000000000Dovecot processes ================= Dovecot is split into multiple processes where each process does only one thing. This is partially because it makes the code cleaner, but alsobecause it allows setting up different privileges for each process. Themost important processes are: * Master process (dovecot) * Login processes (imap-login, pop3-login) * Authentication process (dovecot-auth) * Mail processes (imap, pop3) Master process -------------- This process keeps all the other processes running. If a child process dies, another one is restarted automatically. It always runs as root,unless you're specifically running everything under a single normal UID. The master process reads the configuration file and exports the settings to other processes via environment variables. All logging also goes through master process. This avoids problems with rotating log files, as there's only a single process to send a signal toreopen the log file. Also writing to the same log file (if not using syslog)isn't necessarily safe to do in multiple processes concurrently. Making the logging go through master process also gives a couple of advantages from security and reliability point of view: All log lines canbe prefixed with the process's name and the username of the user who was logged in, withoutthe possibility for the process itself to forge them. Flooding logs canalso be prevented. By default Dovecot allows non-privileged processes towrite 10 lines per second before it begins to delay reading their input,which finally causes the badly behaving process to start blocking onwriting to stderr instead of eating all the CPU and disk space. In the Dovecot 2.0 design, the master process is split to three parts: the Masterprocess which does nothing more than keep the processes running, the configprocess which handles reading the configuration file (supporting also eg. SQL storages!) and the log process which handles the logging. Login processes --------------- The login processes implement the required minimum of the IMAP and POP3 protocolsbefore a user logs in successfully. There are separate processes (and binaries) to handle IMAP and POP3 protocols. These processes are run with least possible privileges. Unfortunately the default UNIX security model still allows them to do much more than theywould have to: Accept new connections on a socket, connect to new UNIXsockets and read and write to existing file descriptors. Still, the loginprocess is by default run under a user account that has no special accessto anything, and inside a non-writable chroot where only a couple of filesexist. Doing any damage inside there should be difficult. When a new connection comes, one of the login processes accept()s it. After that the client typically does nothing more than ask the server'scapability list and then log in. The client may also start TLS sessionbefore logging in. Authentication is done by talking to the authentication process. The login process is completely untrusted by the authentication process, so even if anattacker is able to execute arbitrary code inside a login process, they won't be able tolog in without a valid username and password. After receiving a successful authentication reply from the authentication process, the login process sends the file descriptor to the master processwhich creates a new mail process and transfers the fd into it. Before doingthat, the master process verifies from the authentication process that theauthentication really was successful. By default each login process will handle only a single connection and afterwards kill itself (but see SSL proxying below). This way attacker can't see other people'sconnections. This can however be disabled ('login_process_per_connection=no'), in which case the security of the design suffers greatly. The login processes handle SSL/TLS connections themselves completely. They keep proxying the connection to mail processes for the entire lifetime ofthe connection. This way if a security hole is found from the SSL library,an authenticated user still can't execute code outside the login process. See for more information about different settings related to login processes. Authentication process ---------------------- The authentication process handles everything related to the actual authentication: SASL authentication mechanisms, looking up and verifyingthe passwords and looking up user information. It listens for two different kinds of connections: untrusted authentication client connections (from login processes) and master connections (frommaster process, but also from Dovecot LDA). The client connections are onlyallowed to try to authenticate. The master connections are allowed to askif an authentication request with a given ID was successful, and also to lookup user information based on a username. This user lookup feature is usedby Dovecot LDA. Each client connection tells their process ID to the authentication process in a handshake. If a connection with the same PID already exists, an erroris logged and the new connection is refused. Although this makes DoSattacks possible, it won't go unnoticed for long and I don't see this as areal issue for now. Having the authentication process know the PID of the client connection allows all authentication requests to be mapped to one specific client connection.Since the master process knows the login process's real PID, it's used whenasking from authentication process if the request was successful. This makes it impossible for a login process to try to fake another login process'slogin requests. Faking PIDs will also be quite pointless. Once the master process has done the verification request for a successful authentication request, the request is freed from memory. The requests arealso freed about 2 minutes after their creation, regardless of the statethey currently are in. For blocking password and user database backends (eg. MySQL) separate "worker processes" are used. Initially only one of them exists, butmore are created as needed. [PasswordDatabase.PAM.txt] can be configured to use worker processes instead of doing the forking itself, but this isn'tcurrently done by default and there may be problems related to it. Also [PasswordDatabase.CheckPassword.txt] currently does the forking itself. Mail processes -------------- These processes handle the actual post-login mail handling using the privileges of the logged in user. It's possible to chroot these processes,but practically it's usually more trouble than worth. See [Design.MailProcess.txt] for their internal design documentation. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/QuickConfiguration.txt0000644000175000017500000001115512244263657016203 00000000000000Quick Configuration =================== If you just want to get Dovecot running with typical configuration in a typical environment, here's what you'll have to do: Contents 1. Quick Configuration 1. Configuration file 1. Installing from sources 2. Split configuration files 2. Authentication 3. Mail Location 4. Mbox 5. Maildir 6. Client Workarounds 7. SSL and Plaintext Authentication 8. NFS 9. Running Configuration file ------------------ Prebuilt packages usually install the configuration files into '/etc/dovecot/'. You'll find the correct path by running: ---%<------------------------------------------------------------------------- doveconf -n | head -n 1 ---%<------------------------------------------------------------------------- It's a good idea to read through all the config files and see what settings you might want to change. Installing from sources ----------------------- If you compiled and installed Dovecot from sources, Dovecot has installed only a '/usr/local/etc/dovecot/README' file, which contains the path to the installed example configuration files, usually '/usr/local/share/doc/dovecot/example-config'. Copy them to etc/: ---%<------------------------------------------------------------------------- cp -r /usr/local/share/doc/dovecot/example-config/* /usr/local/etc/dovecot/ ---%<------------------------------------------------------------------------- Split configuration files ------------------------- The default configuration starts from 'dovecot.conf', which contains an '!include conf.d/*.conf' statement to read the rest of the configuration. The idea is that the settings are nicely grouped into different files to make it easier for new admins to scan through related settings. It doesn't matter which config file you add which setting. In the production system it's often easier to just have a single 'dovecot.conf' file, which you can create easily using ---%<------------------------------------------------------------------------- doveconf -n > dovecot.conf ---%<------------------------------------------------------------------------- Authentication -------------- You'll probably be using [PasswordDatabase.PAM.txt] authentication. See the [PasswordDatabase.PAM.txt] page for how to configure it. A typical configuration with Linux would be to create '/etc/pam.d/dovecot' which contains: ---%<------------------------------------------------------------------------- auth required pam_unix.so account required pam_unix.so ---%<------------------------------------------------------------------------- If you're using something else, see [PasswordDatabase.txt] and [UserDatabase.txt]. Mail Location ------------- You can let Dovecot do its automatic mail location detection, but if that doesn't work, you can set the location manually in 'mail_location' setting. See for more information. Mbox ---- Make sure that all software accessing the mboxes are using the same locking methods in the same order. The order is important to prevent deadlocking. From Dovecot's side you can change these from 'mbox_read_locks' and 'mbox_write_locks' settings. See for more information. If you're using '/var/mail/' directory for INBOXes, you may need to set 'mail_privileged_group = mail' so Dovecot can create dotlocks there. For better performance you may want to set 'mbox_very_dirty_syncs = yes' option. Maildir ------- For better performance you may want to set 'maildir_very_dirty_syncs = yes' option. Client Workarounds ------------------ Check 'imap_client_workarounds' and 'pop3_client_workarounds' and see if you want to enable more of them than the defaults. SSL and Plaintext Authentication -------------------------------- If you intend to use SSL, set 'ssl_cert' and 'ssl_key' settings. Otherwise set 'ssl = no'. Easiest way to get SSL certificates built is to use Dovecot's 'doc/mkcert.sh' script. See . By default 'disable_plaintext_auth = yes', which means that Dovecot will fail the authentication if the client doesn't use SSL (or use [Authentication.Mechanisms.txt]). This is recommended in most situations, since it prevents leaking passwords. However, if you don't offer SSL for some reason, you'll probably want to set 'disable_plaintext_auth = no'. NFS --- If you're using NFS or some other remote filesystem that's shared between multiple computers, you should read . Running ------- See and . (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/HowTo.ImapcProxy.txt0000644000175000017500000001406312244263646015530 00000000000000Dovecot imapc proxy =================== Using Dovecot as a secure IMAP Proxy in front of Exchange, using Exchange Authentication and IMAPC. This requires Dovecot 2.1.rc1 or newer. Many thanks to Timo on the Dovecot mailing list for all his help! This " " is based on already having Dovecot already compiled and installed. 1. Create an unprivileged, non-system account user and group for the proxy, with a home directory. This needs to have a writable home directory, but no other privileges. ---%<--------------------------------------------------------------------- [root@localhost]# useradd imapproxy ---%<--------------------------------------------------------------------- 2. Verify that the user can not login: ---%<--------------------------------------------------------------------- [root@localhost]# grep imapproxy /etc/shadow ---%<--------------------------------------------------------------------- You should see something like: ---%<--------------------------------------------------------------------- imapproxy:!!:nnnn:0:nn:n::: ---%<--------------------------------------------------------------------- The important part is the "!!". This indicates that the account is locked. If you don't see this, lockout the account (check man passwd) 3. Create '/etc/dovecot/dovecot.conf' or ('/usr/local/etc/dovecot/dovecot.conf') as appropriate: ---%<--------------------------------------------------------------------- ## Dovecot configuration file mail_uid = imapproxy mail_gid = imapproxy protocols = imap listen = *, :: mail_location = imapc:~/imapc # Change the line below to reflect the IP address of your Exchange Server. imapc_host = 10.1.2.3 imapc_port = 143 passdb { driver = imap # Change the line below to reflect the IP address of your Exchange Server. args = host=10.1.2.3 default_fields = userdb_imapc_user=%u userdb_imapc_password=%w } userdb { driver = prefetch } # /home/imapproxy is the home directory for the imapproxy user, and # %u is a subdir that will be automatically created for each IMAP user when they connect mail_home = /home/imapproxy/%u auth_mechanisms = plain login # This is the auth service used by Postfix to do dovecot auth. service auth { unix_listener auth-userdb { } inet_listener { port = 12345 } } ## ## SSL settings ## # These will need to ba adjusted to point to *your* certificates, not mine 8-) # The ssl_ca line refers to the intermediate certificate bundle which may or may not be required by your SSL provider ssl_cert = [Logging.txt] as well. * A sample universal [DovecotInit.txt]. * A sample Mac OS X 10.4 [LaunchdInstall.txt] Stopping -------- Killing the Dovecot master process with a normal TERM signal does a clean shutdown. This can be done easily with: ---%<------------------------------------------------------------------------- doveadm stop ---%<------------------------------------------------------------------------- 'shutdown_clients' setting controls whether existing IMAP and POP3 sessions are killed. Processes --------- When Dovecot is running, it uses several processes: ---%<------------------------------------------------------------------------- # ps auxw|grep "dovecot" root 7245 0.1 0.1 2308 1096 pts/0 S+ 19:53 0:00 dovecot dovecot 7246 0.0 0.0 2084 824 pts/0 S+ 19:53 0:00 dovecot/anvil root 7247 0.0 0.0 2044 908 pts/0 S+ 19:53 0:00 dovecot/log root 7250 0.0 0.3 4988 3740 pts/0 S+ 19:53 0:00 dovecot/config root 7251 0.0 0.2 10024 2672 pts/0 S+ 19:53 0:00 dovecot/auth root 7303 0.6 0.3 10180 3116 pts/0 S+ 19:57 0:00 dovecot/auth -w vmail 7252 0.0 0.1 3180 1264 pts/0 S+ 19:53 0:00 dovecot/imap vmail 7255 0.0 0.1 3228 1596 pts/0 S+ 19:54 0:00 dovecot/pop3 dovenull 7260 0.0 0.1 4028 1940 pts/0 S+ 19:54 0:00 dovecot/imap-login dovenull 7262 0.0 0.1 4016 1916 pts/0 S+ 19:54 0:00 dovecot/pop3-login ---%<------------------------------------------------------------------------- * 'dovecot' process is the Dovecot master process which keeps everything running. * 'anvil' keeps track of user connections * 'log' writes to log files. All logging, except from master process, goes through it. * 'config' parses the configuration file and sends the configuration to other processes. * 'auth' handles all authentication. * 'auth -w' process is an "authentication worker" process. It's used only with some "blocking" authentication databases, such as MySQL. * 'imap-login' and 'pop3-login' processes handle new IMAP and POP3 connections until user has logged in. They also handle proxying SSL connections even after login. * 'imap' and 'pop3' processes handle the IMAP and POP3 connections after user has logged in. Reloading Configuration ----------------------- Sending HUP signal to Dovecot reloads configuration. This can be done easily with: ---%<------------------------------------------------------------------------- doveadm reload ---%<------------------------------------------------------------------------- An acknowledgement is written to log file: ---%<------------------------------------------------------------------------- Jun 14 19:59:59 master: Warning: SIGHUP received - reloading configuration ---%<------------------------------------------------------------------------- Running Multiple Invocations of Dovecot --------------------------------------- You may wish to invoke a second session (or even multiple sessions) of Dovecot for testing different functionality, configurations, etc. In order to run multiple instances of Dovecot, you must: 1. Create a differently named copy of the dovecot.conf configuration file with these changes: 1. Change 'base_dir' to the new run directory 2. Change services' inet_listener port numbers to new, unused values (in '10-master.conf'). 3. Optionally change 'instance_name' to show a different "dovecot/" prefix in ps output. (v2.0.18+) 4. If you're using authentication sockets (for SMTP AUTH or deliver), you'll need to change them as well.'auth_socket_path' specifies the socket path for deliver. * Alternatively if all the instances have identical authentication configuration, you can have only a single Dovecot instance serve the auth sockets and have the other instances use them. 2. Invoke 'dovecot' (and 'dovecot-lda') with the '-c' parameter and the modified configuration file, e.g.:'dovecot -c /usr/local/etc/dovecot2.conf' 3. In order to tell the logs apart, you can set different log facilities for the instances, e.g.'syslog_facility=local6', then configure syslogd to write local6 into "dovecot-otherinstance.log". Alternatively specify the log paths directly in 'log_path' and related settings. Rotating Log Files ------------------ If you specified log file paths manually in 'dovecot.conf' instead of using syslog, you can send USR1 signal to Dovecot to make it close and reopen the log files. This can be done easily with: ---%<------------------------------------------------------------------------- doveadm log reopen ---%<------------------------------------------------------------------------- Troubleshooting --------------- If you can't see the Dovecot processes running after starting 'dovecot', something is most likely wrong in your 'dovecot.conf'. Look at the error from Dovecot's log file. See for how to find the log. If you really can't find any error messages from any logs, try starting Dovecot with 'dovecot -F'. If you see it crash like: ---%<------------------------------------------------------------------------- sh: segmentation fault (core dumped) dovecot -F ---%<------------------------------------------------------------------------- Then it's a bug in Dovecot. Please report it with your configuration file. If it simply quits without giving any error, then it wrote the error to a log file and you just didn't find it. Try specifying the log file manually and make sure you're really looking at the correct file. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.Stats.txt0000644000175000017500000000340612244263657015115 00000000000000Stats plugin ============ Stats plugin can be used to send statistics (CPU, disk usage, etc.) from mail processes to the stats process. The stats process can later be queried what's going on in the system. With imap_stats plugin you can get per-command level statistics for IMAP commands. Configuration ------------- ---%<------------------------------------------------------------------------- mail_plugins = $mail_plugins stats protocol imap { mail_plugins = $mail_plugins imap_stats } plugin { # How often to send statistics to the stats process. (Must be specified or stats are disabled.) stats_refresh = 5s # Track per-command statistics. stats_track_cmds = yes } ---%<------------------------------------------------------------------------- Since the stats process tracks quite a lot of statistics, its memory usage has to be limited by forgetting the more detailed stats first. These settings configure how the memory usage it handled: ---%<------------------------------------------------------------------------- stats_memory_limit = 16 M stats_command_min_time = 1 mins stats_domain_min_time = 12 hours stats_ip_min_time = 12 hours stats_session_min_time = 15 mins stats_user_min_time = 1 hours ---%<------------------------------------------------------------------------- Looking ======= The 'doveadm stats dump' can be used to output raw statistics data. Ideally there would be a nice GUI that could be used to zoom around in the data, but that's not implemented yet. For now there is an internal 'doveadm stats top' command, but a much better one can be found as a perl script:stats-top.pl [http://www.dovecot.org/tools/stats-top.pl], which also requires stats.pl [http://www.dovecot.org/tools/stats.pl]. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.ExtraFields.NoDelay.txt0000644000175000017500000000075712244263655021375 00000000000000Nodelay extra field =================== If the authentication fails, Dovecot typically waits 0-2 seconds before sending back the "authentication failed" reply. If this field is set, no such delay is done. This is commonly used with [PasswordDatabase.ExtraFields.Proxy.txt] and [PasswordDatabase.ExtraFields.Host.txt]. Note that if PAM is used as the passdb, it adds an extra delay which can't be removed. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MissingMailboxes.txt0000644000175000017500000000371512244263650015650 00000000000000Missing mailboxes ================= Namespaces ---------- Dovecot by default doesn't use any "personal IMAP namespace prefix", which clients often call either "IMAP namespace" or "IMAP prefix". With Courier you probably had this set to "INBOX.", with UW-IMAP you might have set it to "mail/". So, the solution is simply to set this field empty and restart your IMAP client. If this helps, but you don't want to modify the clients' configuration, see . Mail location ------------- If it didn't help, you might have [MailLocation.txt] setting wrong. If it's unset, Dovecot tries to detect where your mail is stored by looking at '~/Maildir', '~/mail', '/var/spool/mail/' and '/var/mail/' directories. Depending on what you want, Dovecot might have guessed wrong. See for how to figure out what exactly is the problem. Missing INBOX (mbox) -------------------- See if the mails are stored in '~/mbox' file. If '~/mbox' file exists, UW-IMAP moves mails there from '/var/mail/user'. Dovecot supports this with [Plugins.Snarf.txt]. Subscriptions ------------- Dovecot uses different filenames for list of mailbox subscriptions. You'll need to rename these to ones that Dovecot wants (currently '.subscriptions' for mbox and 'subscriptions' for Maildir). See for more information. Troubleshooting --------------- If it's still not working, check first if the problem is with IMAP client or server configuration. Easiest way to do this is to talk IMAP directly. (include the A, B, C): ---%<------------------------------------------------------------------------- telnet imap.example.org 143 A login username password B list "" * C logout ---%<------------------------------------------------------------------------- If you see a list of expected mailboxes, the problem is with your IMAP client. If not, set 'mail_debug=yes' and look at the logs. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Storage.Mailbox.Sync.txt0000644000175000017500000001415612244263644017520 00000000000000Mailbox Synchronization ======================= The idea behind synchronization is to find out what changes other sessions have done to the mailbox and to finalize our own changes to the mailbox. For example if you expunge a message in a transaction and commit it, the commit will only write a "please expunge UID n" record to Dovecot's transaction log file. The message still exists on the disk. The next time Dovecot syncs the mailbox (either the session that wrote the record or another one), it goes through all the non-synchronized records in transaction log and applies the requested changes to the backend mailbox. Syncing can be a bit heavyweight operation, so it's possible to commit multiple transactions and perform a single sync for all of them. Dovecot attempts to do this with IMAP protocol when pipelining commands. The other important job of syncing is to refresh mailbox's state: * Finding out about external modifications to mailbox (e.g. a new mail delivered to Maildir/new/). * Updating in-memory view of what messages exist, what their flags are, etc. When a mailbox is opened, its state starts with what index files contain at the time. Since the backend mailbox may have already changed, and syncing an up-to-date mailbox is usually really cheap, there isn't much point in not syncing mailbox immediately after opening. The mailbox state stays the same until you synchronize the mailbox again, before that no new messages show up and no messages get expunged. Typically you would sync the mailbox * after committing a transaction that modifies backend mailbox in any way (instead of just internal index data), such as after changing message flags or expunging a message. * whenever you want to find out if there are any changes. With IMAP protocol this is done every time after running a command. Initializing ------------ 'mailbox_sync_init()' initializes syncing. There are some flags that control how much effort is spent on syncing: * 'MAILBOX_SYNC_FLAG_FAST' can be given when you're ready for mailbox to be refreshed, but don't care much if it actually is or not. When this flag is set, Dovecot still notices all internal changes, but external changes are checked only once every few seconds or so. * 'MAILBOX_SYNC_FLAG_FULL_READ' is mainly useful with mboxes. If 'mbox_dirty_syncs=yes' and a new mail gets appended to mbox by an external program, Dovecot assumes that the only change was the added mail, even though the program may have also modified existing messages' flags by rewriting Status: headers. If 'mbox_very_dirty_syncs=no', these changes are noticed after the next time mailbox is opened. So when this flag is enabled, it means Dovecot should try harder to find out if there were any external unexpected changes. It's currently used only with IMAP SELECT and CHECK commands and POP3 startup. Probably unnecessary elsewhere. * 'MAILBOX_SYNC_FLAG_FULL_WRITE' is again mainly useful with mboxes. If 'mbox_lazy_writes=no', Dovecot delays writing flag changes to mbox file until mailbox is closed or IMAP CHECK command is issued. Using this elsewhere is probably unnecessary, except as an optimization if mailbox is in any case synced just before closing it, you might as well give this flag to it to avoid double-syncing with mbox. * 'MAILBOX_SYNC_FLAG_FORCE_RESYNC' is used to force resyncing indexes. The only time this should be done is when manually triggered by administrator. Then there are also other syncing flags: * 'MAILBOX_SYNC_FLAG_NO_EXPUNGES': No expunged messages are removed from the in-memory mailbox view. Their removal is delayed until syncing is done without this flag. Attempting to access the expunged messages may or may not work, depending on what information is accessed and what storage backend is used. * 'MAILBOX_SYNC_FLAG_FIX_INCONSISTENT': Normally when the internal mailbox state can't be consistently updated (typically due to index file corruption), the syncing fails. When this flag is set, it means that the caller doesn't care about mailbox's previous state and just wants to get it accessible again. Typically this is used when the mailbox is being opened, but not afterwards. * 'MAILBOX_SYNC_FLAG_EXPUNGE' is mainly intended for virtual plugin with IMAP protocol. You probably shouldn't use it. Reading changes --------------- While 'mailbox_sync_next()' returns TRUE, it fills out sync record: * seq1, seq2: Message sequence numbers that were affected * type: expunge, flag change or modseq change. Expunge records don't immediately change the view's sequence numbers. After seeing an expunge record you can still fetch the expunged messages' flags and possibly other information. Only after syncing is deinitialized, the sequences change. Message flag change records don't actually show what the changes were. You can find the new flags just by fetching them ('mail_get_flags()', etc.), they're available immediately. You'll need to create a [Design.Storage.Mailbox.Transaction.txt] and a [Design.Storage.Mail.txt] for that. For example: ---%<------------------------------------------------------------------------- sync_ctx = mailbox_sync_init(box, flags); trans = mailbox_transaction_begin(box, 0); mail = mail_alloc(trans, MAIL_FETCH_FLAGS, 0); ---%<------------------------------------------------------------------------- If you don't actually care about sync records, you don't necessarily have to even call 'mailbox_sync_next()'. In that case it's actually easiest to perform the whole sync using a one-step 'mailbox_sync()' function. This function also sets 'MAILBOX_SYNC_FLAG_FIX_INCONSISTENT' flag automatically. Deinitializing -------------- 'mailbox_sync_deinit()' finalizes the syncing. If any errors occurred during sync, it'll return -1. If 'MAILBOX_SYNC_FLAG_NO_EXPUNGES' was used and some expunges were actually delayed,'status_r->sync_delayed_expunges' is set to TRUE. Implementing sync for a storage backend --------------------------------------- FIXME: talk about mail_index_sync_*() and how to change stuff and how to update internal state. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.ManageSieve.Configuration.txt0000644000175000017500000002526012244263656021462 00000000000000ManageSieve Configuration ========================= *NOTE*: If you have used the Sieve plugin before and you have '.dovecot.sieve' files in user directories, you are advised to *make a backup first*. Although theManageSieve daemon takes care to move these files to the Sieve storage before it is substituted with a symbolic link, this is not a very well tested operation, meaning that there is a possibility that existing Sieve scripts get lost. The ManageSieve configuration consists of ManageSieve protocol settings and [Pigeonhole.Sieve.txt]-related settings. The Sieve interpreter settings are shared with settings of the [Pigeonhole.Sieve.txt] for Dovecot's [LDA.txt] and . First, the ManageSieve protocol settings are outlined and then the relevant Sieve settings are described. Protocol Configuration ---------------------- Along with all other binaries that Dovecot uses, the managesieve and managesieve-login binaries are installed during 'make install' of the package. The only thing you need to do to activate the ManageSieve protocol support in Dovecot is to add 'sieve' to the 'protocols=' configuration line in your dovecot.conf. The managesieve daemon will listen on port 4190 by default. As the implementation of the managesieve daemon is largely based on the original IMAP implementation, it is very similar in terms of configuration. In addition to most mail daemon config settings, the managesieve daemon accepts a few more. The following settings can be configured in the 'protocol sieve' section: managesieve_max_line_length = 65536: The maximum ManageSieve command line length in bytes. This setting is directly borrowed from IMAP. But, since long command lines are very unlikely withManageSieve, changing this will not be very useful. managesieve_logout_format = bytes=%i/%o: Specifies the string pattern used to compose the logout message of an authenticated session. The following substitutions are available: : ---%<----------------------------------------------------------------------- %i - total number of bytes read from client %o - total number of bytes sent to client ---%<----------------------------------------------------------------------- managesieve_implementation_string = Dovecot Pigeonhole: To fool ManageSieve clients that are focused on CMU's timesieved you can specify the IMPLEMENTATION capability that the Dovecot reports to clients (e.g. 'Cyrus timsieved v2.2.13'). managesieve_max_compile_errors = 5: The maximum number of compile errors that are returned to the client upon script upload or script verification. managesieve_sieve_capability =, managesieve_notify_capability = : Respectively the SIEVE and NOTIFY capabilities reported by the ManageSieve service before authentication. If left unassigned, these will be assigned dynamically according to what the Sieve interpreter supports by default (after login this may differ depending on the authenticated user). Sieve Interpreter Configuration ------------------------------- The part of the [Pigeonhole.Sieve.txt] configuration that is relevant forManageSieve mainly consists of the settings that specify where the user's scripts are stored and where the active script is located. TheManageSieve service primarily uses the following Sieve interpreter settings in the 'plugin' section of the Dovecot configuration: sieve_dir = ~/sieve: This specifies the path to the directory where the uploaded scripts are stored. Scripts are stored as separate files with extension '.sieve'. All other files are ignored when scripts are listed by aManageSieve client. The Sieve interpreter also uses this setting to locate the user's personal scripts for use with the Sieve include extension [http://tools.ietf.org/html/draft-ietf-sieve-include]. A storage location specified by 'sieve_dir' is always generated automatically if it does not exist (as far as the system permits the user to do so; no root privileges are used). This is similar to the behavior of the mail daemons regarding the 'mail_location' configuration. sieve = ~/.dovecot.sieve: This specifies the location of the symbolic link pointing to the active script in the Sieve storage directory. The Sieve interpreter uses this setting to locate the main script file that needs to be executed upon delivery. When usingManageSieve, this is a symbolic link managed by the ManageSieve service. ManageSieve thereby determines which script (if any) in the 'sieve_dir' directory is executed for incoming messages. If a regular file already exists at the location specified by the 'sieve' setting, it is moved to the 'sieve_dir' location before the symbolic link is installed. It is renamed to 'dovecot.orig.sieve' and therefore listed as `dovecot.orig' by aManageSieve client. *Note:* It is not wise to place this link inside your mail store, as it may be mistaken for a mail folder. Inside a maildir for instance, the default '.dovecot.sieve' would show up as phantom folder //dovecot/sieve/ in your IMAP tree. Quota Support ------------- By default, users can manage an unlimited number of Sieve scripts on the server throughManageSieve. However, ManageSieve can be configured to enforce limits on the number of personal Sieve scripts per user and/or the amount of disk storage used by these scripts. The maximum size of individual uploaded scripts is dictated by the configuration of the [Pigeonhole.Sieve.txt]. The limits are configured in the plugin section of the Dovecot configuration as follows: sieve_max_script_size = 1M: The maximum size of a Sieve script. sieve_quota_max_scripts = 0: The maximum number of personal Sieve scripts a single user can have. sieve_quota_max_storage = 0: The maximum amount of disk storage a single user's scripts may occupy. A value of 0 for these settings means that no limit is enforced. Examples -------- The following provides example configurations for ManageSieve in dovecot.conf for the various versions. Only sections relevant toManageSieve and the Sieve plugin are shown. Refer to 20-managesieve.conf in doc/dovecot/example-config/conf.d, but don't forget to add 'sieve' to the 'protocols' setting if you use it. ---%<------------------------------------------------------------------------- ... service managesieve-login { #inet_listener sieve { # port = 4190 #} #inet_listener sieve_deprecated { # port = 2000 #} # Number of connections to handle before starting a new process. Typically # the only useful values are 0 (unlimited) or 1. 1 is more secure, but 0 # is faster. #service_count = 1 # Number of processes to always keep waiting for more connections. #process_min_avail = 0 # If you set service_count=0, you probably need to grow this. #vsz_limit = 64M } service managesieve { # Max. number of ManageSieve processes (connections) #process_limit = 1024 } # Service configuration protocol sieve { # Maximum ManageSieve command line length in bytes. ManageSieve usually does # not involve overly long command lines, so this setting will not normally need # adjustment #managesieve_max_line_length = 65536 # Maximum number of ManageSieve connections allowed for a user from each IP address. # NOTE: The username is compared case-sensitively. #mail_max_userip_connections = 10 # Space separated list of plugins to load (none known to be useful so far). Do NOT # try to load IMAP plugins here. #mail_plugins = # MANAGESIEVE logout format string: # %i - total number of bytes read from client # %o - total number of bytes sent to client #managesieve_logout_format = bytes=%i/%o # To fool ManageSieve clients that are focused on CMU's timesieved you can specify # the IMPLEMENTATION capability that the dovecot reports to clients. # For example: 'Cyrus timsieved v2.2.13' #managesieve_implementation_string = Dovecot Pigeonhole # Explicitly specify the SIEVE and NOTIFY capability reported by the server before # login. If left unassigned these will be reported dynamically according to what # the Sieve interpreter supports by default (after login this may differ depending # on the user). #managesieve_sieve_capability = #managesieve_notify_capability = # The maximum number of compile errors that are returned to the client upon script # upload or script verification. #managesieve_max_compile_errors = 5 # Refer to 90-sieve.conf for script quota configuration and configuration of # Sieve execution limits. } plugin { # Used by both the Sieve plugin and the ManageSieve protocol sieve = ~/.dovecot.sieve sieve_dir = ~/sieve } ---%<------------------------------------------------------------------------- Proxy ----- Like Dovecot's imapd, the ManageSieve login daemon supports proxying to multiple backend servers. Although the underlying code is copied from the imapd sources for the most part, it has someManageSieve-specifics that have not seen much testing. The [PasswordDatabase.ExtraFields.Proxy.txt] for POP3 and IMAP should apply to ManageSieve as well. Migration from Dovecot v1.x ManageSieve --------------------------------------- The following has changed since the ManageSieve releases for Dovecot v1.x: * For Dovecot v1.0 and v1.1, the 'sieve_dir' setting used by ManageSieve was called 'sieve_storage'. Also, the 'sieve' and 'sieve_storage' settings were located inside the 'protocol managesieve' section of the configuration. As per Dovecot v1.2 these settings are shared with the Sieve plugin and located in the 'plugin' section of the configuration. Make sure you have updated the name of the 'sieve_dir' setting and the location of both these settings if you are upgrading fromManageSieve for Dovecot v1.0/v1.1. * Pigeonhole ManageSieve does not use the 'mail_location' configuration as a fall-back anymore to determine a default location for storing Sieve scripts. It always uses the 'sieve_dir' setting, with default value '~/sieve'. * The Pigeonhole ManageSieve service now binds to TCP port 4190 by default due to the IANA port assignment for theManageSieve service. When upgrading from v1.x, this should be taken into account. For a smooth transition, the service can be configured manually to listen on both port 2000 and port 4190, as demonstrated in the example section. * The Dovecot configuration now calls the ManageSieve protocol 'sieve' instead of 'managesieve' because it is registered as such with IANA. The binaries and the services are still called 'managesieve' and 'managesieve-login'. The example section demonstrates how this affects the configuration. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Upgrading.2.0.txt0000644000175000017500000001127712244263664014620 00000000000000Upgrading Dovecot v1.2 to v2.0 ============================== A lot of settings have changed. Dovecot v2.0 can still use most of the v1.x configuration files, but it logs a lot of warnings at startup. A quick and easy way to convert your old config file to v2.0 format is: ---%<------------------------------------------------------------------------- # convert old config to new temp config file doveconf -n -c /etc/dovecot/dovecot.conf > dovecot-2.conf # replace the old config file with the new generated file mv dovecot-2.conf /etc/dovecot/dovecot.conf ---%<------------------------------------------------------------------------- This command logs a warning about each obsolete setting it converts to the new format.*You can simply ignore all the warnings* in most cases. If you really want to, you can modify your old config file using the instructions from the warnings, but even that can be done more easily by looking at the generated config file. Some of the warning messages aren't obvious. Once running v2.0, it's safe to downgrade to v1.2.5 or newer. Older versions don't understand some of the changes to index files and will log errors. Permission related changes -------------------------- * Dovecot uses two system users for internal purposes now by default: "dovenull" and "dovecot". You need to create the "dovenull" user or change 'default_login_user' setting. "dovenull" user is used by completely untrustworthy processes, while "dovecot" user is used for slightly more trusted processes. * If you want to be using something else than "dovecot" as the other user, you need to change 'default_internal_user' setting. * Just like with "dovecot" user, "dovenull" doesn't need a password, home directory or anything else (but it's good to give it its own private "dovenull" group). * "auth-master" socket related configuration should be replaced with "auth-userdb" socket everywhere (auth-master should still work, but it gives more permissions than necessary) * If you get any kind of "permission denied" errors related to UNIX sockets, you can change their permissions from 'service { unix_listener { ... } } ' blocks. See 'example-config/conf.d/10-master.conf' for examples or 'doveconf -a' output for their current values. Other major changes ------------------- * No more convert plugin, use [Tools.Dsync.txt] instead * No more expire-tool, use [Plugins.Expire.txt] instead. Also expire configuration is different. * [PostLoginScripting.txt] and need to be modified * [Quota.Configuration.txt] and the script may need to be modified (most environment settings like $USER are gone) * Global ACL filenames now require namespace prefix (e.g. if you use "INBOX." prefix,'/etc/acls/foo' needs to be renamed to '/etc/acls/INBOX.foo' * Maildir: Permissions for newly created mail files are no longer copied from dovecot-shared file, but instead from the mail directory (e.g. for "foo" mailbox, they're taken from '~/Maildir/.foo' directory) * dbox: v2.0 format is slightly different, but backwards compatible. The main problem is that v2.0 no longer supports maildir-dbox hybrid resulting from "fast Maildir migration". If you have any Maildir files in your dbox, you need to convert them somehow (some examples [http://dovecot.org/list/dovecot/2010-September/053012.html]). You might also consider using [Tools.Dsync.txt] to get rid of the old unused metadata in your dbox files. * Pre-login and post-login CAPABILITY reply is now different. Dovecot expects clients to recognize new automatically sent capabilities. [http://dovecot.org/list/dovecot/2010-April/048147.html] This should work with all commonly used clients, but some rarely used clients might have problems. Either get the client fixed, or set 'imap_capability' manually. * ManageSieve protocol [http://tools.ietf.org/html/rfc5804] was assigned an official port by IANA: 4190. This is used by by default now. If you want to listen also on the old 2000 port, see the example. * 'dovecot --exec-mail imap' has been replaced by simply running "imap" binary. You can also use "imap -u" to access other users' mails more easily. LDA --- * deliver binary was renamed to dovecot-lda (but a symlink still exists for now) * -n parameter was replaced by lda_mailbox_autocreate setting. The default also changed to "no". * -s parameter was replaced by lda_mailbox_autosubscribe setting. The default is "no", as before. (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/UserDatabase.Static.txt0000644000175000017500000000312312244263664016162 00000000000000Static User Database ==================== Static user database can be used when you want to use only single UID and GID values for all users, and their home directories can be specified with a simple template. The syntax is: ---%<------------------------------------------------------------------------- userdb { driver = static args = uid= gid= home= } ---%<------------------------------------------------------------------------- The home is optional. You can also return other [UserDatabase.ExtraFields.txt]. You can use the standard [Variables.txt] everywhere. LDA and passdb lookup for user verification ------------------------------------------- Unless your MTA already verifies that the user exists before calling dovecot-lda, you'll most likely want dovecot-lda itself to verify the user's existence. Since dovecot-lda looks up the user only from the userdb, it of course doesn't work with static userdb because there is no list of users. Normally static userdb handles this by doing a passdb lookup instead. This works with most passdbs, with [PasswordDatabase.PAM.txt] being the most notable exception. If you want to avoid this user verification, you can add 'allow_all_users=yes' to the args in which case the passdb lookup is skipped. Example ------- ---%<------------------------------------------------------------------------- userdb { driver = static args = uid=500 gid=500 home=/home/%u } ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/Design.Dsync.txt0000644000175000017500000001155012244263644014662 00000000000000Dsync Design ============ Two-way synchronization ----------------------- dsync attempts to preserve all changes done by both sides of the synced mailboxes. Mailbox list ------------ Mailboxes have 128 bit globally unique IDs, which are used for figuring out when two mailboxes should actually be synchronized. This solves two major problems: * If mailbox has been renamed in one side, dsync finds it because its GUID hasn't changed. * If mailbox has been deleted and recreated, dsync doesn't attempt to sync it because it's a different mailbox. Then there's the problem of how to correctly sync mailbox renames and deletions. How do you know which side of the sync has the most recent name for the mailbox? How do you know if one side had deleted mailbox, or if the other side had created it? To solve these problems, Dovecot v2.0 created a "mailbox log", which adds a record with mailbox GUID and timestamp whenever mailbox is renamed or deleted. So: * If mailbox has different names on two sides, its "last renamed" timestamp is looked up from the mailbox list index. The side with the most recent timestamp is assumed to contain the newer name and the other side's mailbox is renamed to it. * If neither side has a "last renamed" timestamp, one side is picked. This shouldn't happen, except when mailbox log is deleted for some reason or if the renaming is done outside Dovecot. * If mailbox exists only on one side, the other side checks if mailbox log contains a delete record for its GUID. If there is one, the mailbox is deleted from the other side. If there's not, the mailbox is created and synced. * Subscriptions and unsubscriptions are synced in a similar way. But because it's possible to be subscribed to nonexistent mailboxes, mailbox log can't contain mailbox GUIDs for them. Instead the first 128 bits of SHA1 of mailbox name are used. Collisions for mailbox names are highly unlikely, but even if one happens, the worst that can happen is that user gets unsubscribed from wrong mailbox. dsync writes timestamps to changelog using the original timestamps, so that dsync's changes won't override changes done by user during sync. Mailbox ------- When saving new mails, dsync preserves all of their immutable state: * GUID * Received date * Save date * Message contents It also attempts preserve IMAP UID. This works as long as the other side hasn't already used the UID for another mail. If it has, dsync doesn't attempt to preserve the UID, because an IMAP client might have already seen the UID and cached another mail's contents for it. IMAP requires that message's contents must never change, so UIDs can't be reused. So whenever an UID conflict happens, dsync gives messages in both sides a new UID, because it can't know which message the client had seen, or perhaps user used two clients and both saw a different message. (This assumes a master/slave replication use case for dsync.) The mutable metadata that dsync preserves is: * Message flags and keywords * Modification sequences (modseqs) Flags and keywords are synced based on modseqs. Whichever side has a higher modseq for the message, its flags and keywords are synced to the other side. Currently there's no per-flag or per-keyword synchronization, so that if one side had added \Seen flag and other side had added \Answered flag, one of them would be dropped. Finding what to sync -------------------- dsync can run in full mode or fast mode. Full mode means it goes through all messages in all mailboxes, making sure everything is fully synchronized. In fast mode it relies on uidvalidity, uid-next and highest-modseq values to find out changes. If any of the values changed, the mailbox is included in sync. FIXME: A superfast mode should still be implemented, where once a mailbox is selected for syncing, it should sync only mails whose modseq is higher than a given one. This would improve performance and network traffic with large mailboxes. Copy optimizations ------------------ Before dsync actually starts syncing anything, it first fetched a list of all to-be-synced messages and adds them to a GUID -> message hash table. Whenever dsync needs to sync a new message to the other side, it first checks if the message's GUID already exists on the other side. If it does, it starts a message copy operation instead of a full save. It's possible that this copy operation fails if the message just gets expunged from the other side, so there needs to be fallback handling for this. If the message exists in multiple mailboxes, a copy from the next mailbox is attempted. If all of them fail, dsync fallbacks to saving the message. FIXME: This optimization currently works only in full sync mode. If this were to work in fast sync mode, the full mailbox list would have to be looked up from local side. And this would slow it down.. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailLocation.mbox.txt0000644000175000017500000002100112244263647015704 00000000000000mbox configuration ================== See for a complete description of how Dovecot has implemented mbox support. Mail location ------------- In many systems the user's mails are by default stored in '/var/mail/username' file. This file is called INBOX in IMAP world. Since IMAP supports multiple mailboxes, you'll need to have a directory for them as well. Usually '~/mail' is a good choice for this. For installation such as this, the mail location is specified with: ---%<------------------------------------------------------------------------- # %u is replaced with the username that logs in mail_location = mbox:~/mail:INBOX=/var/mail/%u ---%<------------------------------------------------------------------------- It's in no way a requirement to have the INBOX in '/var/mail/' directory. In fact this often just brings problems because Dovecot might not be able to write dotlock files to the directory (see below). You can avoid this completely by just keeping everything in '~/mail/': ---%<------------------------------------------------------------------------- # INBOX exists in ~/mail/inbox mail_location = mbox:~/mail ---%<------------------------------------------------------------------------- Index files ----------- See [MailLocation.txt] for full explanation of how to change the index path. For example: ---%<------------------------------------------------------------------------- mail_location = mbox:~/mail:INBOX=/var/mail/%u:INDEX=/var/indexes/%u ---%<------------------------------------------------------------------------- Locking ------- Make sure that all software accessing the mboxes are using the same locking methods in the same order. The order is important to prevent deadlocking. From Dovecot's side you can change these from 'mbox_read_locks' and 'mbox_write_locks' settings. See for more information. /var/mail/ dotlocks ------------------- Often mbox write locks include dotlock, which means that Dovecot needs to create a new ".lock" file to the directory where the mbox file exists. If your INBOXes are in '/var/mail/' directory you may have to give Dovecot write access to the directory. There are two ways the '/var/mail/' directory's permissions have traditionally been set up: * World-writable with sticky bit set, allowing anyone to create new files but not overwrite or delete existing files owned by someone else (ie. same as /tmp). You can do this with 'chmod a+rwxt /var/mail' * Directory owned by a mail group and the directory set to group-writable (mode=0770, group=mail) You can give Dovecot access to mail group by setting: ---%<------------------------------------------------------------------------- mail_privileged_group = mail ---%<------------------------------------------------------------------------- NOTE: With the 'mail_privileged_group' setting unfortunately doesn't work, so you'll have to use the sticky bit, disable dotlocking completely or use LMTP server instead. /var/mail/* permissions ----------------------- In some systems the '/var/mail/$USER' files have 0660 mode permissions. This causes Dovecot to try to preserve the file's group, and if it doesn't have permissions to do so, it'll fail with an error: ---%<------------------------------------------------------------------------- imap(user): Error: chown(/home/user/mail/.imap/INBOX, -1, 12(mail)) failed: Operation not permitted (egid=1000(user), group based on /var/mail/user) ---%<------------------------------------------------------------------------- There is rarely any real need for the files to have 0660 mode, so the best solution for this problem is to just change the mode to 0600: ---%<------------------------------------------------------------------------- chmod 0600 /var/mail/* ---%<------------------------------------------------------------------------- Optimizations ------------- The settings below are related to mbox performance. See for more complete description of what they do. * 'mbox_lazy_writes=yes' (default): Metadata updates, such as writing X-UID headers or flag changes, aren't written to mbox file until the mailbox is closed or CHECK or EXPUNGE IMAP commands are sent by the client. The mbox rewrites can be costly, so this may avoid a lot of disk writes. * 'mbox_dirty_syncs=yes' (default): Dovecot assumes that external mbox file changes only mean that new messages were appended to it. Without this setting Dovecot re-reads the whole mbox file whenever it changes. There are various safeguards in place to make this setting safe even when other changes than appends were done to the mbox. The only downside to this setting is that external message flag modifications may not be visible immediately. * 'mbox_very_dirty_syncs=yes' (not default): When opening mbox file that has been changed externally, don't re-read it. Otherwise similar to 'mbox_dirty_syncs=yes'. * 'mbox_min_index_size=n': If mbox file is smaller than n kilobytes, don't update its index files. If an index file exists for it, it's still read however. Only /var/mail/ mboxes ---------------------- With POP3 it's been traditional that users have their mails only in the '/var/mail/' directory. IMAP however supports having multiple mailboxes, so each user has to have a private directory where the mailboxes are stored. Dovecot also needs a directory for its index files unless you disable them completely. If you *really* want to use Dovecot as a plain POP3 server without index files, you can work around the problem of not having the per-user directory: * Set users' home directory in userdb to some empty non-writable directory, for example '/var/empty' * Modify 'mail_location' setting so that the mail root directory is also the empty directory and append ':INDEX=MEMORY' to it. For example: 'mail_location = mbox:/var/empty:INBOX=/var/mail/%u:INDEX=MEMORY' * Note that if you have IMAP users, they'll see the '/var/empty' as the directory containing other mailboxes than INBOX. If the directory is writable, all the users will have their mailboxes shared. Directory layout ---------------- By default Dovecot uses filesystem layout under mbox. This means that mail is stored in mbox files under hierarchical directories, for example: * '~/mail/inbox' - mbox file containing mail for INBOX * '~/mail/foo' - mbox file containing mail for mailbox "foo" * '~/mail/bar/baz' - mbox file containing mail for mailbox "bar/baz" One upshot of this is that it is not normally possible to have mailboxes which are subfolders of mailboxes containing messages. As an alternative, it is possible to configure Dovecot to store all mailboxes in a single directory with hierarchical levels separated by a dot. This can be configured by adding ':LAYOUT=maildir++' to the mail location. There are, however, some further considerations when doing this; see for some examples. Control files ------------- Under mbox format, Dovecot maintains the subscribed mailboxes list in a file '.subscriptions' which by default is stored in the mail location root. So in the example configuration this would be at '~/mail/.subscriptions'. If you want to put this somewhere else, you can change the directory in which the '.subscriptions' file is kept by using the 'CONTROL' parameter. So for example, if we configured the mail location using: ---%<------------------------------------------------------------------------- mail_location = mbox:~/mail:CONTROL=~/mail-control ---%<------------------------------------------------------------------------- then the subscribed mailboxes list would be maintained at '~/mail-control/.subscriptions'. One practical application of the 'CONTROL' parameter is described at . Message file name ----------------- By default, Dovecot stores messages for INBOX in an mbox file called "inbox", and messages for all other mailboxes in an mbox file whose relative path is equivalent to the name of the mailbox. Under this scheme, it is not possible to have mailboxes which contain both messages and child mailboxes. However, the behaviour (for mailboxes other than INBOX) can be changed using the 'DIRNAME' parameter. If the 'DIRNAME' parameter is specified with a particular value, then Dovecot will store messages in a file with a name of that value, in a directory with a name equivalent to the mailbox name. There are, however, some further considerations when doing this; see for an example. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PreAuth.txt0000644000175000017500000000164412244263657013751 00000000000000Pre-Authenticated Sessions ========================== For debugging purpose or to perform IMAP actions on-behalf of an user, you can use pre-authenticated sessions. You can do this by simply running: ---%<------------------------------------------------------------------------- /usr/local/libexec/dovecot/imap ---%<------------------------------------------------------------------------- And you can start talking IMAP via stdin/stdout. This doesn't change process's UID, GID, or get any userdb settings. If you want to emulate a full regular login, you can execute ---%<------------------------------------------------------------------------- /usr/local/libexec/dovecot/imap -u username ---%<------------------------------------------------------------------------- Of course, you may need to run the above command as root if it needs to change process's uid/gid. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.ExtraFields.AllowNets.txt0000644000175000017500000000150312244263655021740 00000000000000Allow_nets extra field ---------------------- This is a comma separated list of IPs and/or networks where the user is allowed to log in from. If the user tries to log in elsewhere, the authentication will fail the same way as if a wrong password was given. Example: 'allow_nets=127.0.0.0/8,192.168.0.0/16,1.2.3.4,4.5.6.7'. IPv6 addresses are also allowed. IPv6 mapped IPv4 addresses (eg. '::ffff:1.2.3.4') are converted to standard IPv4 addresses before matching. Example:'allow_nets=::1,2001:abcd:abcd::0:0/80,1.2.3.4' passwd-file example ------------------- ---%<------------------------------------------------------------------------- user:{plain}password::::::allow_nets=192.168.0.0/24 ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/AuthDatabase.SQL.txt0000644000175000017500000002003512244263637015356 00000000000000SQL === SQL can be used for both passdb and userdb lookups. If the args parameter in passdb sql and userdb sql contain the exact same filename, only one SQL connection is used for both passdb and userdb lookups. Contents 1. SQL 1. Password database lookups 2. Password verification by SQL server 3. User database lookups 4. User iteration 5. Prefetching 6. High availability 7. Examples Password database lookups ------------------------- 'password_query' setting contains the SQL query to look up the password. It must return a field named "password". If you have it by any other name in the database, you can use the SQL's "AS" keyword ('SELECT pw AS password ..'). You can use all the normal [Variables.txt] such as '%u' in the SQL query. If all the passwords are in same format, you can use 'default_pass_scheme' to specify it. Otherwise each password needs to be prefixed with "{password-scheme}", for example "{plain}plaintext-password". See for a list of supported password schemes. By default MySQL does case-insensitive string comparisons, so you may have a problem if your users are logging with different as "user", "User" and "uSer". To fix this, you can make the SQL database return a <"user" field> [PasswordDatabase.ExtraFields.User.txt], which makes Dovecot modify the username to the returned value. Note that if you're using separate user and domain fields, a common problem is that you're returning only the "user" field from the database.*This drops out the domain from the username*. So make sure you're returning a concatenated user@domain string or username/domain fields separately. See the examples below. The query can also return other [PasswordDatabase.ExtraFields.txt] which have special meaning. You can't use multiple statements in one query, but you could use a stored procedure. If you want something like a last login update, use instead. Password verification by SQL server ----------------------------------- If the passwords are in some special format in the SQL server that Dovecot doesn't recognize, it's still possible to use them. Change the SQL query to return NULL as the password and return the row only if the password matches. You'll also need to return a non-NULL "nopassword" field. The password is in '%w' variable. For example: ---%<------------------------------------------------------------------------- password_query = SELECT NULL AS password, 'Y' as nopassword, userid AS user \ FROM users WHERE userid = '%u' AND mysql_pass = password('%w') ---%<------------------------------------------------------------------------- This of course makes the verbose logging a bit wrong, since password mismatches are also logged as "unknown user". User database lookups --------------------- Usually your SQL database contains also the userdb information. This means user's UID, GID and home directory. If you're using only static UID and GID, and your home directory can be specified with a template, you could use [UserDatabase.Static.txt] instead. It is also a bit faster since it avoids doing the userdb SQL query. 'user_query' setting contains the SQL query to look up the userdb information. The commonly returned userdb fields are uid, gid, home and mail. See for more information about these and other fields that can be returned. If you're using a single UID and GID for all users, you can set them in dovecot.conf with: ---%<------------------------------------------------------------------------- mail_uid = vmail mail_gid = vmail ---%<------------------------------------------------------------------------- User iteration -------------- Some commands, such as 'doveadm -A' need to get a list of users. With SQL userdb this is done with 'iterate_query' setting. You can either return * "user" field containing either user or user@domain style usernames, or * "username" and "domain" fields Any other fields are ignored. Prefetching ----------- If you want to avoid doing two SQL queries when logging in with IMAP/POP3, you can make the 'password_query' return all the necessary userdb fields and use prefetch userdb to use those fields. If you're using Dovecot's deliver you'll still need to have the 'user_query' working. See for example configuration High availability ----------------- You can add multiple "host" parameters to the SQL connect string. Dovecot will do round robin load balancing between them. If one of them goes down, the others will handle the traffic. Examples -------- Note that "user" can have a special meaning in some SQL databases, so we're using "userid" instead. SQL table creation command: ---%<------------------------------------------------------------------------- CREATE TABLE users ( userid VARCHAR(128) NOT NULL, domain VARCHAR(128) NOT NULL, password VARCHAR(64) NOT NULL, home VARCHAR(255) NOT NULL, uid INTEGER NOT NULL, gid INTEGER NOT NULL ); ---%<------------------------------------------------------------------------- MySQL ----- Add to your 'dovecot-sql.conf' file: ---%<------------------------------------------------------------------------- driver = mysql # The mysqld.sock socket may be in different locations in different systems. # Use "host= ... pass=foo#bar" with double-quotes if your password has '#' character. connect = host=/var/run/mysqld/mysqld.sock dbname=mails user=admin password=pass # Alternatively you can connect to localhost as well: #connect = host=localhost dbname=mails user=admin password=pass password_query = SELECT userid AS username, domain, password \ FROM users WHERE userid = '%n' AND domain = '%d' user_query = SELECT home, uid, gid FROM users WHERE userid = '%n' AND domain = '%d' # For using doveadm -A: iterate_query = SELECT userid AS username, domain FROM users ---%<------------------------------------------------------------------------- PostgreSQL ---------- Add to your 'dovecot-sql.conf' file: ---%<------------------------------------------------------------------------- # You can also set up non-password authentication by modifying PostgreSQL's pg_hba.conf driver = pgsql # Use "host= ... pass=foo#bar" if your password has '#' character connect = host=localhost dbname=mails user=admin password=pass password_query = SELECT userid AS username, domain, password \ FROM users WHERE userid = '%n' AND domain = '%d' user_query = SELECT home, uid, gid FROM users WHERE userid = '%n' AND domain = '%d' # For using doveadm -A: iterate_query = SELECT userid AS username, domain FROM users ---%<------------------------------------------------------------------------- SQLite ------ Add to your 'dovecot-sql.conf' file: ---%<------------------------------------------------------------------------- driver = sqlite connect = /path/to/sqlite.db password_query = SELECT userid AS username, domain, password \ FROM users WHERE userid = '%n' AND domain = '%d' user_query = SELECT home, uid, gid FROM users WHERE userid = '%n' AND domain = '%d' # For using doveadm -A: iterate_query = SELECT userid AS username, domain FROM users ---%<------------------------------------------------------------------------- PostgreSQL/Horde ---------------- I used the following in devocot-sql.conf file to authenticate directly against the Horde user/password database (with static userdb) on PostgreSQL: ---%<------------------------------------------------------------------------- driver = pgsql connect = host=localhost dbname=horde user=dovecot password= default_pass_scheme = MD5-CRYPT password_query = SELECT user_uid AS username, user_pass AS password \ FROM horde_users WHERE user_uid = '%u' iterate_query = SELECT user_uid AS username FROM users ---%<------------------------------------------------------------------------- Note that you will have to change the password encryption in Horde to MD5-CRYPT. Also, the example above requires a 'dovecot' user in PostgreSQL with read (SELECT) privileges on the 'horde_users' table. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Migration.MailFormat.txt0000644000175000017500000002555612244263650016365 00000000000000Converting between mailbox formats ================================== If you want a transparent migration, the biggest problem is preserving message UIDs. See for the problems this may cause. If you do the conversion with dsync, it preserves the UIDs. dsync ----- With dsync you can convert between any two mailbox formats that Dovecot supports. As much of the mailbox state is preserved as possible. Typically it's everything. See for full documentation, here are only a couple of examples: * mbox -> maildir migration. Set 'mail_location=maildir:~/Maildir' and run 'dsync -u username mirror mbox:~/mail:INBOX=/var/mail/username' * maildir -> mdbox migration. Set 'mail_location=mdbox:~/mdbox' and run 'dsync -u username mirror maildir:~/Maildir' If you can successfully use dsync, you can skip the rest of this page. Converting from mbox to Maildir ------------------------------- * mb2md.pl with Dovecot modifications [http://dovecot.org/tools/mb2md.pl] can convert mails, preserving UIDs and keywords. * See also [attachment:migrateuser.sh script to drive the full migration for a user]. * This script requires patched 'mailutil' that supports Maildir. One working 'mailutil' binary is RHEL4 PINE RPM [http://dag.wieers.com/rpm/packages/pine/pine-4.64-3.el4.rf.i386.rpm] from the DAG RPM Repository [http://dag.wieers.com/rpm/]. You could also patch any version of pine/c-client and patch it with the Maildir patches from http://staff.washington.edu/chappa/pine/info/maildir.html. * mb2md.py [http://dovecot.org/list/dovecot/2008-March/029736.html] can convert also message UIDs. * Yet another way to fix the UIDL migration problem. If you can generate an uildlist ("messagenumber uidl" pairs), use my new -U uidllist option to inject X-UILD: headers in the converted Maildir-file. The modified mb2md script is avalable here:mb2md.xuidl.pl.gz [http://www.chaos.dk/~sch/mb2md.xuidl/mb2md.xuidl.pl.gz]. I used this to convert a cucipop installation to dovecot. pop3_reuse_xuidl=yes will do the rest./-- 2009-02-13/ Check also the *User-Contributed Maildir Support* section on the qmail community site [http://www.qmail.org/top.html#usersoft] for more choices. Example (user's mail in '~someuser/mail' and INBOX in '/var/mail/someuser'): ---%<------------------------------------------------------------------------- cd ~someuser mb2md-3.20.pl -s mail -R mb2md-3.20.pl -m -s /var/mail/someuser mv mail mail.old ---%<------------------------------------------------------------------------- Now the mail will be in '~someuser/Maildir'. Do not forget to migrate the subscriptions as well, otherwise the new maildir will seem to have only an inbox when viewed through a mail client that supports them. This can be as simple as copying the old '~someuser/mail/.subscriptions' file to '~someuser/Maildir/subscriptions' (warning: I have not tested this extensively, my subscription list and folder hierarchy was very simplistic). Hierarchy separator change -------------------------- The default hierarchy separator with Maildir is '.' instead of '/' which is common with mboxes. To keep the migration transparent to users, you can keep the '/' separator by using [Namespaces.txt]. In any case you need to replace the '/' with '.' in the subscriptions file: * ---%<---------------------------------------------------------------------- sed 's:/:.:g' subscriptions > subscriptions.new mv subscriptions.new subscriptions ---%<---------------------------------------------------------------------- UW-IMAP's subscriptions file is in '~/.mailboxlist'. Dovecot's mbox subscriptions is in '/.subscriptions'. Dovecot's Maildir subscriptions is in '/subscriptions'. Also if you're migrating from UW-IMAP, you probably had "mail/" prefixes in the mailbox names. You can again use [Namespaces.txt] to let clients use the prefix, or you can tell your users to remove the namespace prefix from their clients and change the subscriptions file: ---%<------------------------------------------------------------------------- sed 's/^mail\.//' subscriptions > subscriptions.new mv subscriptions.new subscriptions ---%<------------------------------------------------------------------------- Note that because Maildir uses '.' as the hierarchy separator in filesystem, it's not possible to have mailbox names containing '.' characters, even if you changed the separator in namespaces. If you really want to have dots, the only way to do this is by modifying the filesystem separator in 'MAILDIR_FS_SEP' and 'MAILDIR_FS_SEP_S' defines in 'src/lib-storage/index/maildir/maildir-storage.h' file in the sources. Do not be tempted to change 'MAILDIR_FS_SEP' et al to '/'; it won't work. Converting from Maildir to mbox ------------------------------- This is especially helpful if you want to archive your mail to a single file for storage on a CD, a PC, etc. But it can also be helpful if you want to use [MailboxFormat.mbox.txt] with Dovecot. Use the reformail program that comes with maildrop [http://www.courier-mta.org/maildrop/]. You can also use the formail program that comes with procmail [http://www.procmail.org/]. Here is a simple script showing how this works. To use it, adjust the script to invoke the right command according to your system. Then 'cd' to the user's home directory (one level above 'Maildir') and run the script with two arguments: the mailbox name (You can use "." for the top-level folder), and the output mbox filename, for example: ---%<------------------------------------------------------------------------- cd ~hans perl dw-maildirtombox.pl . >/tmp/hans-inbox perl dw-maildirtombox.pl Sent >/tmp/hans-sent ---%<------------------------------------------------------------------------- ---%<------------------------------------------------------------------------- #!/usr/bin/env perl # dw-maildirtombox.pl # dw = Dovecot Wiki :-) # NOTE! The output file must not contain single quotes (')! # figure out which program to run $cmd="reformail -f1"; system("$cmd /dev/null 2>/dev/null") == 0 or $cmd="formail"; system("$cmd /dev/null 2>/dev/null") == 0 or die "cannot find reformail or formail on your \$PATH!\nAborting"; $dir=$ARGV[0]; $outputfile=$ARGV[1]; if (($outputfile eq '') || ($dir eq '')) { die "Usage: ./archivemail.pl mailbox outputfile\nAborting"; } if (!stat("Maildir/$dir/cur") || !stat("Maildir/$dir/new")) { die "Maildir/$dir is not a maildir.\nAborting"; } @files = (,); foreach $file (@files) { next unless -f $file; # skip non-regular files next unless -s $file; # skip empty files next unless -r $file; # skip unreadable files $file =~ s/'/'"'"'/; # escape ' (single quote) $run = "cat '$file' | $cmd >>'$outputfile'"; system($run) == 0 or warn "cannot run \"$run\"."; } ---%<------------------------------------------------------------------------- Converting from MBX to Maildir ------------------------------ See the uw2dovecot.pl [http://wiki.dovecot.org/Migration/UW?action=AttachFile&do=view&target=uw2dovecot.pl] as mentioned on the page. Converting from MBX to mbox --------------------------- If you are using UW-IMAP and using the [MailboxFormat.mbx.txt], you will need to convert it to [MailboxFormat.mbox.txt] format. The conversion process isn't pretty, but here is a script that works. You will need to get and compile the mailutil program from the UW-IMAP web site [http://www.washington.edu/imap/]. ---%<------------------------------------------------------------------------- #! /bin/sh # Written by Marc Perkel - public domain # overhauled by Matthias Andree, 2006 # Usage: mbx-convert # This code assumes there a user named "marc" with the primary group "marc". # Change to any real user on your system. # Yes - it look bizzare - but it gets the job done # abort on error set -e user=marc group=marc homedir=/home/$user if [ $# -ne 1 ] ; then echo >&2 "Usage: $0 " exit 1 fi # set up automatic cleanup trap 'rm -f "${homedir}"/in.$$ "${homedir}"/out.$$' 0 # First copy to users home dir and make the user the owner cp "$1" "${homedir}/in.$$" chown "$user":"$group" "${homedir}/in.$$" # Run mailutil to convert as the user other than root # mailutil requires this su "$user" -c "mailutil copy in.$$ \#driver.unix/out.$$" # create new file with same permissions/owner as old cp -p "$1" "${1}.new" # cat instead of copy leaves the original owner and permissions alone if cat "${homedir}/out.$$" >"${1}.new" ; then # cat succeeded, rename file into place mv "${1}.new" "$1" else # cat failed, remove temp file rm -f "${1}.new" exit 1 fi ---%<------------------------------------------------------------------------- Make a copy of some folders and test it first. Once you are satisfied that it works then: * Write a script to convert all your files. * Shut down your email system so files can't be modified * Copy all your email to a backup location in case you have to revert * Run the script * Turn on Dovecot and test to verify it is working * Important - make sure that you changed your SMTP configuration to write mbox and not MBX. * Turn on SMTP and verify it is all working *User comments:* * Is this hassle actually necessary? I have run mailutil as root like this (mailutil as provided by the PINE 4.61 package for SUSE Linux 10.0):'mailutil copy /tmp/foo.mbx.orig '#driver.unix//tmp/foo.test'' and did not encounter any problems./-- , 2006-05-18/ * I did the same (using 'mailutil') but it doesn't maintain UIDs or UIDVALIDITY. So I hacked[attachment:mbx2mbox.tgz this] together to do the migration./-- , 2008-08-02/ * Tried to do 'mailutil -v copy /tmp/foo '#driver.unix'/tmp/foo.unix', but mailutil argues 'Can't open mailbox /tmp/foo: no such mailbox'. (mailutil as from the Debian package uw-mailutils in Debian 5.0 Lenny) strace shows that it searches for /tmp/foo/cur, i.e. a Maildir format mailbox. (WTF?) No idea yet how to get this working. And I'm really glad when I got rid of MBX. -- , 2009-06-19 * As Mark Crispin always says: Don't use UW-IMAP with buggy (Maildir) patches, compile your own. * Same problem here - just reinstalled uw-imapd, used thunderbird to create a "normal" mailfolder and copied the old folder contents - F.Fernandez * At least with ubuntu mailutil from uw-mailutils it starts search from root *ALWAYS* and if you start with slash it tries to convert from maildir format 'mailutil -v copy /tmp/foo '#driver.unix'/tmp/foo.unix' = convert maildir formated folder /tmp/foo/cur to mbox and 'mailutil -v copy tmp/foo '#driver.unix'/tmp/foo.unix' = convert /tmp/foo file to mbox - Manwe, 2010-03-09 (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/LDA.Postfix.txt0000644000175000017500000002277112244263647014437 00000000000000Dovecot LDA with Postfix ======================== This page contains only information specific to using LDA with Postfix, see for more information about using the LDA itself. System users ------------ If you wish you use 'dovecot-lda' for all system users on a single domain mail host you can do it by editing 'mailbox_command' parameter in '/etc/postfix/main.cf' (postconf(5) [http://www.postfix.org/postconf.5.html]): ---%<------------------------------------------------------------------------- mailbox_command = /usr/local/libexec/dovecot/dovecot-lda -f "$SENDER" -a "$RECIPIENT" # or mailbox_command = /usr/libexec/dovecot/dovecot-lda -f "$SENDER" -a "$RECIPIENT" # or mailbox_command = /usr/lib/dovecot/dovecot-lda -f "$SENDER" -a "$RECIPIENT" # or wherever it was installed in your system. ---%<------------------------------------------------------------------------- Then run 'postfix reload'. * This command doesn't do a [UserDatabase.txt] lookup. If you want that (e.g. for per-user quota lookups) you need to add '-d "$USER"' parameter. * Be sure that /var/mail or wherever you deliver mail is writable by the dovecot-lda process, which does not run as root. * Postfix's 'mailbox_size_limit' setting applies to all files that are written via dovecot-lda. The default is 50 MB, so dovecot-lda can't write *any* files larger than that, including mbox files or log files. This shows up only in Dovecot's logs: ---%<---------------------------------------------------------------------- dovecot-lda(user): write() failed with mbox file /home/user/mail/foo: File too large (process was started with ulimit -f limit) ---%<---------------------------------------------------------------------- * If you have trouble seeing anything in Dovecot's logs, see [LDA.txt]. Virtual users ------------- Dovecot LDA is very easy to use on large scale installations with Postfix virtual domains support, just add a 'dovecot' service in '/etc/postfix/master.cf' (master(5) [http://www.postfix.org/master.5.html]): ---%<------------------------------------------------------------------------- dovecot unix - n n - - pipe flags=DRhu user=vmail:vmail argv=/usr/local/libexec/dovecot/dovecot-lda -f ${sender} -d ${recipient} ---%<------------------------------------------------------------------------- An example using address extensions (ie user+extension@domain.com (don't forget to define the proper recipient_delimiter in Postfix's main.cf)) to deliver to the folder 'extension' in your maildir (If you wish to preserve the case of ${extension}, remove the 'hu'flags [http://www.postfix.org/pipe.8.html], and be sure to utilize [Variables.txt] in your dovecot.conf for mail locations and other configuration parameters that are expecting lower case): ---%<------------------------------------------------------------------------- dovecot unix - n n - - pipe flags=DRhu user=vmail:vmail argv=/usr/local/libexec/dovecot/dovecot-lda -f ${sender} -d ${user}@${nexthop} -m ${extension} # or if you have a INBOX/ namespace prefix: dovecot unix - n n - - pipe flags=DRhu user=vmail:vmail argv=/usr/local/libexec/dovecot/dovecot-lda -f ${sender} -d ${user}@${nexthop} -m INBOX/${extension} ---%<------------------------------------------------------------------------- This example ignores address extensions (ie user+extension@domain.com delivers just like user@domain.com ), but still shows the original address for Sieve: ---%<------------------------------------------------------------------------- dovecot unix - n n - - pipe flags=DRhu user=vmail:vmail argv=/usr/lib/dovecot/dovecot-lda -f ${sender} -a ${recipient} -d ${user}@${nexthop} ---%<------------------------------------------------------------------------- Replace 'vmail' above with your virtual mail user account. Then set 'virtual_transport' to 'dovecot' in '/etc/postfix/main.cf': ---%<------------------------------------------------------------------------- dovecot_destination_recipient_limit = 1 virtual_mailbox_domains = your.domain.here virtual_transport = dovecot ---%<------------------------------------------------------------------------- And remember to run ---%<------------------------------------------------------------------------- postfix reload ---%<------------------------------------------------------------------------- Virtual users with multiple uids/gids ------------------------------------- If you need multiple uids/gids you'll need to set dovecot-lda setuid root or invoke it through sudo. See [LDA.txt] for how to do this securely. Postfix with a NFS mail store ----------------------------- If you are experiencing problems with dovecot-lda processes hanging when delivering to an NFS mail store, it's likely that the dovecot-lda process is hanging while waiting for free locks. The occurrence of this can be greatly reduced, if not eradicated, by forcing Postfix to only deliver to the same recipient one at a time. ---%<------------------------------------------------------------------------- dovecot_destination_concurrency_limit = 1 ---%<------------------------------------------------------------------------- Prevent backscatter ------------------- To prevent backscatter you should configure Postfix to reject mail for non existent recipients. This is the default behaviour (smtpd_reject_unlisted_recipient = yes) so there's no need to set "reject_unlisted_recipient" in any of your restriction. But: Postfix must know if a recipient exists. Depending on how you've configured Dovecot and Postfix this can be done several ways. System users ------------ If you only use local system users this is no problem - all valid recipients can be found in the local password or alias database. Virtual users (static) ---------------------- When you use virtual users and domains you should maintain a list of valid recipients. The relevant settings settings are: *virtual_alias_maps, virtual_mailbox_maps* For static verification you can maintain the content of the files yourself. For every recipient or alias you need one entry. Example: *virtual_alias_maps* ---%<------------------------------------------------------------------------- name_recipient@example.com external@example.net ---%<------------------------------------------------------------------------- *virtual_mailbox_maps* ---%<------------------------------------------------------------------------- name@example.com OK recipient@example.com available ---%<------------------------------------------------------------------------- Don't forget to run "postmap" afterwards. *Info:* if you use the Dovecot LDA or LMTP it doesn't matter what you use behind the recipient address. Use "OK", the full name of the user or else. Virtual users (dynamic) ----------------------- Do you already use a database (MySQL, PostgreSQL) for Dovecot? Use the same source for Postfix. You only have to to define a valid sql query for Postfix. Example: ---%<------------------------------------------------------------------------- virtual_mailbox_maps = proxy:mysql:/etc/postfix/virtual_mailbox_maps.cf ---%<------------------------------------------------------------------------- *virtual_mailbox_maps.cf* ---%<------------------------------------------------------------------------- user = mysql-user password = mysql-password hosts = unix:/var/run/mysql/mysqld.sock dbname = mailserver query = SELECT name FROM mailbox WHERE email='%s' ---%<------------------------------------------------------------------------- This query will return the value of the filed "name" from table "mailbox" if the email address of the recipient matches the email from the field "email". This is enough for Postfix because Postfix must only know if the recipient exists. The value doesn't matter. When you use a database (or LDAP) there's no need to manually maintain a file with valid recipients. *Info:* If you use "relay_domains" instead of "virtual_mailbox_domains" you have to use "relay_recipient_maps" instead of "virtual_mailbox_maps". Dynamic address verification with LMTP -------------------------------------- With Dovecot 2.0 you can also use LMTP and the Postfix setting "reject_unverified_recipient" for dynamic address verification. It's really nice because Postfix doesn't need to query an external datasource (MySQL, LDAP...). Postfix maintain a local database with existing/non existing addresses (you can configure how long positive/negative results should be cached). To use LMTP and dynamic address verification you must first get Dovecot working. Then you can configure Postfix to use LMTP and set "reject_unverified_recipient" in the smtpd_recipient_restrictions. On every incoming email Postfix will probe if the recipient address exists. You will see similar entries in your logfile: ---%<------------------------------------------------------------------------- Recipient address rejected: undeliverable address: host tux.example.com[private/dovecot-lmtp] said: 550 5.1.1 < tzknvtr@example.com > User doesn't exist: tzknvtr@example.com (in reply to RCPT TO command); from=< cnrilrgfclra@spammer.org > to=< tzknvtr@example.com > ---%<------------------------------------------------------------------------- If the recipient address exists (status=deliverable) Postfix accepts the mail. *Info:* you can not use "reject_unverified_recipient" with "pipe" so this doesn't work with the Dovecot LDA "deliver". (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Design.Storage.Mailbox.Save.txt0000644000175000017500000000527712244263644017506 00000000000000Mailbox Saving ============== Both saving and copying messages begins by calling 'mailbox_save_alloc()'. After that you can set message's metadate fields: * 'mailbox_save_set_flags()' sets flags and keywords. * 'mailbox_save_set_received_date()' sets message's received date (IMAP INTERNALDATE). It also supports specifying timezone, but most backends don't support saving it. * 'mailbox_save_set_dest_mail()' specifies a mail that can be used to access the partially saved mail after save/copy is finished (but not committed). You should be able to do pretty much anything with the mail, but its UID is usually 0 at this point. * 'mailbox_save_set_from_envelope()' sets the envelope sender. Currently this is only used by mbox format to specify the address in From_-line. When copying, most of the metadata fields are automatically copied from the source message. The only exception is message's flags and keywords. If you want to preserve them, the easiest way is to call 'mailbox_save_copy_flags()'. Some metadata fields are mainly useful when you're replicating or restoring an existing mailbox and want to preserve metadata: * 'mailbox_save_set_min_modseq()' sets message's modseq to at least the specified modseq. If the modseqs are already higher in the mailbox, the resulting modseq is what it would have been without this call. * 'mailbox_save_set_uid()' sets message's UID. If mailbox's next_uid is already higher than the specified UID, the UID request is ignored. * 'mailbox_save_set_guid()' sets message's globally unique ID. A new GUID is generated by default, and if there already exists a message with the same GUID a different one may or may not be given. For example with maildir the GUID is same as the base filename, while dbox has an explicit GUID metadata field. * 'mailbox_save_set_pop3_uidl()' sets POP3 UIDL value. Not all backends support this. * 'mailbox_save_set_save_date()' sets "message saved" date, i.e. the date/time when the message was saved to this mailbox. The default is the current time. Once you're done with setting the metadata fields, you can either copy an existing mail with 'mailbox_copy()' or provide message body with: * 'mailbox_save_begin()' starts saving from given input stream. * 'mailbox_save_continue()' saves all data from input stream. If input stream is blocking, typically a single call to this function should be enough. If input stream is non-blocking, you need to call this function until you're done. In any case call this until 'i_stream_is_eof()' returns TRUE. * 'mailbox_save_finish()' finishes saving the mail, or 'mailbox_save_cancel()' aborts it. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Services.txt0000644000175000017500000003751312244263660014162 00000000000000Service configuration ===================== Contents 1. Service configuration 1. Service basics 2. Service privileges 3. Service limits 4. Service listeners 1. unix_listeners and fifo_listeners 2. inet_listeners 2. Default services 1. anvil 2. auth 3. auth-worker 4. config 5. dict 6. director 7. dns_client 8. doveadm 9. imap, pop3, managesieve 10. imap-login, pop3-login, managesieve-login 11. indexer 12. indexer-worker 13. ipc 14. lmtp 15. log 16. ssl-params 17. stats This page describes Dovecot's services comprehensively. Most admins don't need to know these details. The important service settings are described in the 'example-config/conf.d/10-master.conf' file. Service basics -------------- executable: The binary path to execute and its parameters. If the path doesn't begin with '/', it's relative to *base_dir*. type: Type of this service: * "" is the default. * "startup" creates one process at startup. For example SSL parameters are generated at startup because of this, instead of only after the first SSL connection arrives. * "login" is used by login processes. The login processes have "all processes full" notification fd. It's used by the processes to figure out when no more client connections can be accepted because client and process limits have been reached. The login processes can then kill some of their oldest connections that haven't logged in yet. * "log", "config" and "anvil" are treated specially by these specific processes. protocol: If non-empty, this service is enabled only when the protocol name is listed in *protocols* setting. idle_kill: If a process doesn't appear to be doing anything after this much time, notify it that it should kill itself if it's not doing anything.*process_min_avail* setting overrides this. If set to 0,*default_idle_kill* is used. Service privileges ------------------ user: UNIX user (UID) which runs this process. *default_login_user* setting's value should be used for type=login processes and *default_internal_user* should be used for other processes that don't require root privileges. group: The primary UNIX group (GID) which runs this process. extra_groups: Secondary UNIX groups that this process belongs to. privileged_group: Secondary UNIX group, which is disabled by default, but can be enabled by the process. This setting is probably never needed directly.*mail_privileged_group* setting is a more user friendly way to use this setting for mail processes. chroot: The processes are chrooted to this directory at startup. Relative to *base_dir*. drop_priv_before_exec: Drop all privileges after forking, but before executing the binary. This is mainly useful for dumping core files on non-Linux OSes, since the processes are no longer in "setuid" mode. This setting can't be used with non-empty chroot. Service limits -------------- client_limit: Maximum number of simultaneous client connections. If set to 0, *default_client_limit* is used instead. service_count: Number of client connections to handle until the process kills itself. 0 means unlimited. process_limit: Maximum number of processes that can exist for this service. If set to 0, *default_process_limit* is used instead. process_min_avail: Minimum number of processes that always should be available to accept more client connections. For service_count=1 processes this decreases the latency for handling new connections. For service_count!=1 processes it could be set to the number of CPU cores on the system to balance the load among them. vsz_limit: Limit the process's address space (both RLIMIT_DATA and RLIMIT_AS if available). When the space is reached, some memory allocations may start failing with "Out of memory", or the kernel may kill the process with signal 9. This setting is mainly intended to prevent memory leaks from eating up all of the memory, but there can be also legitimate reasons why the process reaches this limit. For example a huge mailbox may not be accessed if this limit is too low. The default value (18446744073709551615 = 2^64-1) sets the limit to *default_vsz_limit*, while 0 disables the limit entirely. Service listeners ----------------- unix_listeners and fifo_listeners --------------------------------- path: Path to the file, relative to *base_dir* setting. This is also used as the section name. user: Owner of the file. Defaults to 0 (root). group: Group of the file. Defaults to 0 (root/wheel). mode: Mode of the file. Defaults to 0700. Note that 0700 is an octal value, while 700 is a different decimal value. Setting mode to 0 disables the listener. inet_listeners -------------- name: Section name of this listener. It is meant to be descriptive for humans (e.g. "imap", "imaps"). address: Space separated list of IP addresses / host names to listen on. "*" means all IPv4 addresses, "::" means all IPv6 addresses. Defaults to *listen* setting. port: Port number where to listen. 0 disables the listener. ssl: If yes, the listener does an immediate SSL/TLS handshake after accepting a connection. This is needed for the legacy imaps and pop3s ports. Default services ================ anvil ----- The anvil process tracks state of users and their connections. * *chroot=empty* and *user=$default_internal_user*, because anvil doesn't need access to anything. * *process_limit=1*, because there can be only one. * *idle_kill=4294967295s*, because it should never die or all of its tracked state would be lost. * "doveadm who" and some other doveadm commands connect to anvil's UNIX listener and request its state. auth ---- The master auth process. There are 4 types of auth client connections: 1. client: Only SASL authentication is allowed. This can be safely exposed to entire world. 2. userdb: userdb lookups and passdb lookups (without the password itself) can be done for any user, and a list of users can be requested. This may or may not be a security issue. Access to userdb lookup is commonly needed by dovecot-lda, doveadm and other tools. 3. login: Starts a two phase user login by performing authenticating (same as "client" type). Used by login processes. 4. master: Finishes the two phase user login by performing a userdb lookup (similar to "userdb" type). Used by post-login processes (e.g. imap, pop3). With UNIX listeners the client type is selected based on the filename after the last "-" in the filename. For example "anything-userdb" is of "userdb" type. The default type is "client" for inet insteners and unrecognized UNIX listeners. You can add as many client and userdb listeners as you want (and you probably shouldn't touch the login/master listeners). * *client_limit* should be large enough to handle all the simultaneous connections. Typically only login processes use long lasting auth connections, while other processes do only quick lookups and disconnect afterwards. * *process_limit=1*, because there can be only one auth master process. * *user=$default_internal_user*, because it typically doesn't need permissions to do anything (PAM lookups are done by auth-workers). * *chroot* could be set (to e.g. "empty") if passdb/userdb doesn't need to read any files (e.g. SQL, LDAP config is read before chroot) auth-worker ----------- Auth master process connects to auth worker processes. It is mainly used by passdbs and userdbs that do potentially long running lookups. For example MySQL supports only synchronous lookups, so each query is run in a separate auth worker process that does nothing else during the query. PostgreSQL and LDAP supports asynchronous lookups, so those don't use worker processes at all. With some passdbs and userdbs you can select if worker processes should be used. * *client_limit=1*, because only the master auth process connects to auth worker. * *service_count=1*, because auth master stops extra idling workers by disconnecting from them. * *process_limit* should be a bit higher than *auth_worker_max_count* setting. * *user=root* by default, because by default PAM authentication is used, which usually requires reading '/etc/shadow'. If this isn't needed, it's a good idea to change this to something else, such as *$default_internal_user*. * *chroot* could also be set if possible. config ------ Config process reads and parses the 'dovecot.conf' file, and exports the parsed data in simpler format to config clients. * *user=root*, because the process needs to be able to reopen the config files during a config reload, and often some parts of the config having secrets are readable only by root. * Only root should be able to connect to its UNIX listener, unless there are no secrets in the configuration. Passwords are obviously secrets, but less obviously *ssl_key* is also a secret, since it contains the actual SSL key data instead of only a filename. dict ---- Dovecot has a "lib-dict" API for doing simple key-value lookups/updates in various backends (SQL, file, others in future). This is optionally used by things like quota, expire plugin and other things in future. It would be wasteful for each mail process to separately create a connection to SQL, so usually they go through the "proxy" dict backend. These proxy connections are the client connections of dict processes. * *client_limit=1*, because dict lookups are synchronous and the client is supposed to disconnect immediately after the lookup. * *user=$default_internal_user*, because the proxy dict lookups are typically SQL lookups, which require no filesystem access. (The SQL config files are read while still running as root.) * The dict clients can do any kind of dict lookups and updates for all users, so they can be rather harmful if exposed to an attacker. That's why by default only root can connect to dict socket. Unfortunately that is too restrictive for all setups, so the permissions need to be changed so that Dovecot's mail processes (and only them) can connect to it. director -------- tracker process, which hooks into all auth-client and auth-userdb connections. * *process_limit=1*, because only one process can keep track of everyone's state. * *user=$default_internal_user*, because director doesn't access any files. * *chroot* can't be set, because it still needs to be connect to auth process. * Connections are basically proxying auth connections, so they have similar security considerations. dns_client ---------- Used by "lib-dns" library to perform asynchronous DNS lookups. The dns-client processes internally use the synchronous 'gethostbyname()' function. * *client_limit=1*, because the DNS lookup is synchronous. * *user=$default_internal_user*, because typically no special privileged files need to be read. * *chroot* can be used only if it contains 'etc/resolv.conf' and other files necessary for DNS lookups. doveadm ------- It's possible to run doveadm mail commands via doveadm server processes. This is useful for running doveadm commands for multiple users simultaneously, and it's also useful in a multiserver system where doveadm can automatically connect to the correct backend to run the command. * *client_limit=1*, because doveadm command execution is synchronous. * *service_count=1* just in case there were any memory leaks. This could be set to some larger value (or 0) for higher performance. * *user=root*, but the privileges are (temporarily) dropped to the mail user's privileges after userdb lookup. If only a single UID is used, user can be set to the mail UID for higher security, because the process can't gain root privileges anymore. imap, pop3, managesieve ----------------------- Post-login process for handling IMAP/POP3/ManageSieve client connections. * *client_limit* may be increased from the default 1 to save some CPU and memory, but it also increases the latency when one process serving multiple clients it waiting for a long time for a lock or disk I/O. In future these waits may be reduced or avoided completely, but for now it's not safe to set this value higher than 1 in enterprise mail systems. For small mostly-idling hobbyist servers a larger number may work without problems. * *service_count* can be changed from 1 if only a single UID is used for mail users. This is improves performance, but it's less secure, because bugs in code may leak email data from another user's earlier connection. * *process_limit* defaults to 1024, which means that the number of simultaneous IMAP (or POP3 or ) connections is limited by this setting. If you expect more connections, increase this value. imap-login, pop3-login, managesieve-login ----------------------------------------- See . indexer ------- Indexer master process, which tracks and prioritizes indexing requests from mail processes. The actual indexing is done by indexer-worker processes. The indexing means both updating Dovecot's internal index and cache files with new messages and more importantly updating full text search indexes (if enabled). The indexer master process guarantees that the FTS index is never modified by more than one process. * *process_limit=1*, because only one process can keep the FTS guarantee. * *user=$default_internal_user*, because the process doesn't need any permissions. * *chroot* could be set to *$base_dir* for extra security. It still needs to be able to connect to indexer-worker socket. indexer-worker -------------- Indexer worker process. * *client_limit=1*, because indexing is a synchronous operation. * *process_limit* defaults to 10, because the FTS index updating can eat a lot of CPU and disk I/O. You may need to adjust this value depending on your system. * *user=root*, but the privileges are (temporarily) dropped to the mail user's privileges after userdb lookup. If only a single UID is used, user can be set to the mail UID for higher security, because the process can't gain root privileges anymore. ipc --- IPC hub process. * *process_limit=1*, because there can be only one hub. * *chroot=empty* and *user=$default_internal_user*, because it doesn't need any files and there are no outbound connections. * The "ipc" UNIX socket can be used to send any commands to other processes, such as killing a specific user's connection. It is somewhat security sensitive. lmtp ---- LMTP process for delivering new mails. * *client_limit=1*, because most of the time spent on an LMTP client is spent waiting for disk I/O and other blocking operations. There's no point in having more clients waiting around during that doing nothing. * *user=root*, but the privileges are (temporarily) dropped to the mail user's privileges after userdb lookup. If only a single UID is used, user can be set to the mail UID for higher security, because the process can't gain root privileges anymore. log --- All processes started via Dovecot master process log their messages via the "log" process. This allows some nice features compared to directly logging via syslog. * *process_limit=1*, because the log process keeps track of all the other logging processes. * *user=root*, because it guarantees being able to write to syslog socket and to the log files directly. ssl-params ---------- Build SSL parameters every n days, based on *ssl_parameters_regenerate* setting. * *type=startup* so that the (re)generation can be started immediately at startup when needed, instead of waiting until the first SSL handshake starts. stats ----- Mail process statistics tracking. Its behavior is very similar to the anvil process, but anvil's data is of higher importance and lower traffic than stats, so stats are tracked in a separate process. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/SSL.DovecotConfiguration.txt0000644000175000017500000003513712244263660017172 00000000000000Dovecot SSL configuration ========================= The most important SSL settings are (in 'conf.d/10-ssl.conf'): ---%<------------------------------------------------------------------------- ssl = yes # Preferred permissions: root:root 0444 ssl_cert = [Authentication.Mechanisms.txt] only when SSL/TLS is used first. * 'ssl = required' requires SSL/TLS also for [Authentication.Mechanisms.txt]. * If you have only plaintext mechanisms enabled ('auth { mechanisms = plain login }'), you can use either (or both) of the above settings. They behave exactly the same way then. Multiple SSL certificates ------------------------- Different certificates per IP and protocol ------------------------------------------ If you have multiple IPs available, this method is guaranteed to work with all clients. ---%<------------------------------------------------------------------------- local 192.0.2.10 { # instead of IP you can also use hostname, which will be resolved protocol imap { ssl_cert = for list of clients known to (not) support SNI. ---%<------------------------------------------------------------------------- local_name imap.example.org { ssl_cert = /var/lib/dovecot/ssl-parameters.ssl'. After the initial creation they're by default regenerated every week. With newer computers the generation shouldn't take more than a few seconds, but with older computers it can take as long as half an hour. The extra security gained by the regeneration is quite small, so with slower computers you might want to disable it: ---%<------------------------------------------------------------------------- ssl_parameters_regenerate = 0 ---%<------------------------------------------------------------------------- By default Dovecot's allowed ciphers list contains: ---%<------------------------------------------------------------------------- ssl_cipher_list = ALL:!LOW:!SSLv2:!EXP:!aNULL ---%<------------------------------------------------------------------------- Disallowing more won't really gain any security for those using better ciphers, but it does prevent people from accidentally using insecure ciphers. See http://www.openssl.org/docs/apps/ciphers.html for a list of the ciphers. SSL verbosity ------------- ---%<------------------------------------------------------------------------- verbose_ssl = yes ---%<------------------------------------------------------------------------- This will make Dovecot log all the problems it sees with SSL connections. Some errors might be caused by dropped connections, so it could be quite noisy. Client certificate verification/authentication ---------------------------------------------- If you want to require clients to present a valid SSL certificate, you'll need these settings: ---%<------------------------------------------------------------------------- ssl_ca = class3-revoke.pem ---%<------------------------------------------------------------------------- With the above settings if a client connects which doesn't present a certificate signed by one of the CAs in the 'ssl_ca' file, Dovecot won't let the user log in. This could present a problem if you're using Dovecot to provide SASL authentication for an MTA (such as Postfix) which is not capable of supplying client certificates for SASL authentication. If you need Dovecot to provide SASL authentication to an MTA without requiring client certificates and simultaneously provide IMAP service to clients while requiring client certificates, you can put 'auth_ssl_require_client_cert = yes' inside of a protocol block as shown below to make an exemption for SMTP SASL clients (such as Postfix). ---%<------------------------------------------------------------------------- protocol !smtp { auth_ssl_require_client_cert = yes } ---%<------------------------------------------------------------------------- You may also force the username to be taken from the certificate by setting 'ssl_username_from_cert = yes'. * The text is looked up from subject DN's specified field using OpenSSL's 'X509_NAME_get_text_by_NID()' function. * By default the 'CommonName' field is used. * You can change the field with 'ssl_cert_username_field = name' setting (parsed using OpenSSL's 'OBJ_txt2nid()' function). 'x500UniqueIdentifier' is a common choice. You may also want to disable the password checking completely. Doing this currently circumvents Dovecot's security model so it's not recommended to use it, but it is possible by making the [PasswordDatabase.txt] allow logins using any password (typically requiring <"nopassword" extra field> [PasswordDatabase.ExtraFields.txt] to be returned). Testing ------- Try out your new setup: ---%<------------------------------------------------------------------------- openssl s_client -connect mail.sample.com:pop3s ---%<------------------------------------------------------------------------- You should see something like this: ---%<------------------------------------------------------------------------- CONNECTED(00000003) depth=2 /O=Root CA/OU=http://www.cacert.org/CN=CA Cert Signing Authority/emailAddress=support@cacert.org verify error:num=19:self signed certificate in certificate chain verify return:0 --- Certificate chain 0 s:/CN=mail.example.com i:/O=CAcert Inc./OU=http://www.CAcert.org/CN=CAcert Class 3 Root 1 s:/O=CAcert Inc./OU=http://www.CAcert.org/CN=CAcert Class 3 Root i:/O=Root CA/OU=http://www.cacert.org/CN=CA Cert Signing Authority/emailAddress=support@cacert.org 2 s:/O=Root CA/OU=http://www.cacert.org/CN=CA Cert Signing Authority/emailAddress=support@cacert.org i:/O=Root CA/OU=http://www.cacert.org/CN=CA Cert Signing Authority/emailAddress=support@cacert.org --- Server certificate -----BEGIN CERTIFICATE----- MIIE1DCCArygAwIBAgIDAMBPMA0GCSqGSIb3DQEBBAUAMFQxFDASBgNVBAoTC0NB Y2VydCBJbmMuMR4wHAYDVQQLExVodHRwOi8vd3d3LkNBY2VydC5vcmcxHDAaBgNV BAMTE0NBY2VydCBDbGFzcyAzIFJvb3QwHhcNMTAxMjIwMTM1NDQ1WhcNMTIxMjE5 MTM1NDQ1WjAmMSQwIgYDjksadnjkasndjksandjksandjksandj5YXJlYS5vcmcw ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3jOX3FC8wVqnb2r65Sfvk cYUpJhlbhCfqPdN41c3WS0y1Jwwum1q4oMAJvdRnD5TMff1+fqTFy3lS1sYxIXiD kBRo478eNqzXHMpBOqbvKjYp/UZgWUNA9ebI1nQtwd7rnjmm/GrtyItjahCsgzDS qPAie+mXYzuT49ZoG+Glg7/R/jDcLMcJY0d5eJ7kufB1RLhvRitZD4FEbJVehqhY aevf5bLk1BNFhzRBfLXmv6u/kfvWf2HjGAf0aFhaQyiAldDgnZrvaZOFjkToJk27 p9MguvwGmbciao0DmMjcJhQ0smclFwy8Kj98Tz+nTkfAlU8jJdb1J/tIatJdpSRh AgMBAAGjgdwwgdkwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAgYI KwYBBQUHAwEGCWCGSAGG+EIEAQYKKwYBBAGCNwoDAzALBgNVHQ8EBAMCBaAwMwYI KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhadodHRwOi8vb2NzcC5jYWNlcnQub3Jn LzBRBgNVknsadkjasnjdksandjksandjsnNlY3VyaXR5YXJlYS5vcmegKQYIKwYB BQUHCAWgHQwbbWFpbC5qb2ludC5zZWN1cml0eWFyZWEub3JnMA0GCSqGSIb3DQEB BQUAA4ICAQAX8ceObvUZNKYTlNQ/cv0BiA1XweRsVNca1ILACNLdVPR9mvf+aXCh ODkHaZAmGngj1DfD4fJsTbaydGWSPeVH91Qi9F+Pi6szhsxylI83NKbuXihcenuG twnte8aIb5FelVHttLQPSKRR62E8YmDWk3KYivuFAuZqDaGnWc5yeneTBpsGter/ 4awqsgymBK2YEg1HIWMPaRBvwzCVN/yUyWhFH9Nj11f/xgZE87VXrjLHWT/73i2Z S4uIZ2KHQUYuxMGldgpXm+QxFM8DGA6z1T1oPCVfW85cezlfr8QVvX6SXZrAUNL0 3D5YPzQuevW+5CrqnGA+F5ff4mBMl8R8Sg0+0LoLqt5PbpGyTt9vS1INZCdfvtIA /d7Ae7Xp9W8FVRqd7tvNMIy3ZA0/wNMDUczkhC/YtvHfMELpjtMJAGF15OtO7Vik V+FZnBP1Yd7760dtEmd6bF8vjcXCvDdxwGtcAehAUpIgAWvkHHOt8+H56tkFENAP /ZpJ+Wr+K3lxkkG+BN1bucxMuAdVyTpFyZfKDHRXIO/5e0hpPOaTO+obD3kifzdh yy7KmdKvDclHTiPuonJBzEXeM3JQBjcDHbMSyA6+38yBcso27h9VqCQJB2cZmSlW ArS/9wt2X21KgeuGHlTZ/8z9gXAjQKXhDYECWWd6LkWl98ZDBihslQ== -----END CERTIFICATE----- subject=/CN=mail.example.com issuer=/O=CAcert Inc./OU=http://www.CAcert.org/CN=CAcert Class 3 Root --- No client certificate CA names sent --- SSL handshake has read 5497 bytes and written 293 bytes --- New, TLSv1/SSLv3, Cipher is DHE-RSA-AES256-SHA Server public key is 2048 bit Secure Renegotiation IS supported Compression: zlib compression Expansion: zlib compression SSL-Session: Protocol : TLSv1 Cipher : DHE-RSA-AES256-SHA Session-ID: 114A22BE4625B33F6893124ACF640AE0628B48B5039E90B3B9A20ADF7FA691F3 Session-ID-ctx: Master-Key: B8A55EC91A060575CFB29503FBF7160C2DC8BCBFE02D20A7F704882F72D8D00272D8D002CE5CCC4B94A492F43ED8F Key-Arg : None TLS session ticket: 0000 - 86 c7 46 63 a5 b6 48 74-16 d8 e0 a7 e2 64 e8 89 ..Fc..Ht.....d.. 0010 - 97 90 59 4b 57 f3 e2 b3-e2 d2 88 90 a8 aa b4 44 ..YKW..........D 0020 - ea 24 08 5e b4 14 7f e1-2a 1a 1c 40 ca 85 e7 41 .$.^....*..@...A 0030 - 9d 0d a8 4c f7 e3 db 1e-ef da 53 9c fe 43 cc 62 ...L......S..C.b 0040 - 79 b6 ad ea 9d cf ca b2-37 41 b7 0f ea 7d 59 e8 y.......7A...}Y. 0050 - 10 01 a0 eb dc c2 63 66-56 54 6a e8 3a 4b 93 49 ......cfVTj.:K.I 0060 - 77 da e4 4b 21 e8 30 7e-bf 10 91 3a 2c f9 59 80 w..K!.0~...:,.Y. 0070 - 01 1f 36 0b 92 85 67 55-c8 86 1d 44 b1 6f 0d ae ..6...gU...D.o.. 0080 - 15 36 b6 49 3a ef 94 9a-ef 6d 27 f0 80 20 43 09 .6.I:....m'.. C. 0090 - be 70 c5 30 15 3b 93 c6-c1 4c e9 7f 5c 34 98 dd .p.0.;...L..\4.. Compression: 1 (zlib compression) Start Time: 1292857721 Timeout : 300 (sec) Verify return code: 19 (self signed certificate in certificate chain) --- +OK Dovecot ready. ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.ManageSieve.Troubleshooting.txt0000644000175000017500000002277412244263656022051 00000000000000ManageSieve Troubleshooting =========================== Like Dovecot itself, *the ManageSieve service always logs a detailed error message* if something goes wrong at the server (refer to [Logging.txt] for more details): the logs are the first place to look if you suspect something is wrong. To get additional debug messages in your log file, you should set 'mail_debug=yes' in dovecot.conf (inside 'protocol sieve {...} 'if you want to enable this forManageSieve only). If the client commits protocol violations or sends invalid scripts, an error response is provided to the client which is not necessarily logged on the server. A goodManageSieve client presents such error messages to the user. Keep in mind that the the ManageSieve service only provides the Sieve /protocol/, which may be somewhat confusing. This protocol can only be used to /upload/ Sieve scripts and /activate/ them for execution. Performing the steps below therefore only verifies that this functionality is working and *not* whether Sieve scripts are correctly being executed upon delivery. The execution of Sieve scripts is performed by the Dovecot [LDA.txt] or its [LMTP.txt] using the [Pigeonhole.Sieve.txt]. If you have problems with Sieve script execution upon delivery, you are referred to the [Pigeonhole.Sieve.Troubleshooting.txt]. Manual Login and Script Upload ------------------------------ If you fail to login or upload scripts to the server, it is not necessarily caused by Dovecot or your configuration. It is often best to test yourManageSieve server manually first. This also provides you with the direct error messages from the server without intermission of your client. If you do not use TLS, you can connect using a simple 'telnet' or 'netcat' connection to the configured port (typically 4190 or 2000 for older setups). Otherwise you must use a TLS-capable text protocol client like 'gnutls-cli' as described below. Upon connection, the server presents the initial greeting with its capabilities: ---%<------------------------------------------------------------------------- "IMPLEMENTATION" "dovecot" "SASL" "PLAIN" "SIEVE" "comparator-i;ascii-numeric fileinto reject vacation imapflags notify include envelope body relational regex subaddress copy" "STARTTLS" OK "Dovecot ready." ---%<------------------------------------------------------------------------- Note that the reported 'STARTTLS' capability means that the server accepts TLS, but, since you are using telnet/netcat, you cannot use this (refer to Manual TLS Login below). The 'SASL' capability lists the available SASL authentication mechanisms. If this list is empty and 'STARTTLS' is available, it probably means that the server forces you to initiate TLS first (as dictated by ''disable_plaintext_auth=yes'' in dovecot.conf). Now you need to log in. Although potentially multiple SASL mechanisms are available, only 'PLAIN' is described here. Authentication is performed using theManageSieve 'AUTHENTICATE' command. This command typically looks as follows when the 'PLAIN' mechanism is used: ---%<------------------------------------------------------------------------- AUTHENTICATE "PLAIN" "" ---%<------------------------------------------------------------------------- The credentials are the base64-encoded version of the string '"\0\0 ---%<------------------------------------------------------------------------- The command is written to stdout and you can paste this to your protocol session, e.g.: ---%<------------------------------------------------------------------------- AUTHENTICATE "PLAIN" "AHVzZXJuYW1lAHBhc3N3b3Jk" OK "Logged in." ---%<------------------------------------------------------------------------- Now that you are logged in, you can upload a script. This is done using the 'PUTSCRIPT' command. Its first argument is the name for the script and its second argument is a string literal. A string literal starts with a length specification ''{+}'' followed by a newline. Thereafter the server expects '' bytes of script data. The following uploads a trivial 6 byte long sieve script that keeps every message (6th byte is the newline character): ---%<------------------------------------------------------------------------- PUTSCRIPT "hutsefluts" {6+} keep; OK "Putscript completed." ---%<------------------------------------------------------------------------- Upon successful upload, you should find a file called 'hutsefluts.sieve' in your 'sieve_dir' directory. The script should also be listed by the server as follows when the 'LISTSCRIPTS' command is issued: ---%<------------------------------------------------------------------------- LISTSCRIPTS "hutsefluts" OK "Listscripts completed." ---%<------------------------------------------------------------------------- You can check whether your script is uploaded correctly by downloading it using the 'GETSCRIPT' command. This command accepts the name of the downloaded script as its only parameter: ---%<------------------------------------------------------------------------- GETSCRIPT "hutsefluts" {6} keep; OK "Getscript completed." ---%<------------------------------------------------------------------------- To let the Sieve plugin use your newly uploaded script, you must activate it using the 'SETACTIVE' command (only one script can be active at any time). The active script is indicated 'ACTIVE' in the 'LISTSCRIPTS' output, e.g.: ---%<------------------------------------------------------------------------- SETACTIVE "hutsefluts" OK "Setactive completed." LISTSCRIPTS "hutsefluts" ACTIVE OK "Listscripts completed. ---%<------------------------------------------------------------------------- The symbolic link configured with the 'sieve' setting should now point to the activated script in the 'sieve_dir' directory. If no script is active, this symbolic link is absent. Manual TLS Login ---------------- When TLS needs to be used during manual testing, 'gnutls-cli' provides the means to do so. This command-line utility is part of the GNUTLS distribution and on most systems this should be easy to install. It is used to connect toManageSieve as follows: ---%<------------------------------------------------------------------------- gnutls-cli --starttls -p ---%<------------------------------------------------------------------------- This starts the client in plain text mode first. As shown in the previous section, the server presents a greeting with all capabilities of the server. If 'STARTTLS' is listed, you can issue the 'STARTTLS' command as follows: ---%<------------------------------------------------------------------------- STARTTLS OK "Begin TLS negotiation now." ---%<------------------------------------------------------------------------- If an OK response is given by the server you can press 'Ctrl-D' to make 'gnutls-cli' start the TLS negotiation. Upon pressing 'Ctrl-D', 'gnutls-cli' will show information on the negotiated TLS session and finally the first response of the server is shown: ---%<------------------------------------------------------------------------- "IMPLEMENTATION" "dovecot" "SASL" "PLAIN" "SIEVE" "comparator-i;ascii-numeric fileinto reject vacation imapflags notify include envelope body relational regex subaddress copy" OK "TLS negotiation successful." ---%<------------------------------------------------------------------------- Hereafter, you can continue to authenticate and upload a script as described in the previous section. Client Problems --------------- If manual efforts to upload a script are successful, but your client still fails, you need to obtain a view on what the client communicates with the server. A common method is to sniff the client protocol session using a tool like 'ngrep'. However, this will not work when TLS is active. If the problem is not specific to TLS, you are advised to temporarily turn off TLS and sniff the plain text protocol. If TLS is part of the issue, you can use Dovecot's [Debugging.Rawlog.txt] facility to see what is going on if the client is logged in. If the authentication is the problem, there is no real nice way to obtain a transcript of the protocol. One way is to run managesieve from inetd, wrapping it into a script that writes the protocol messages somewhere (*FIXME*: This needs some checking and explanation). Alternatively, if possible, the client can be altered to write its protocol messages somewhere. Refer to the [Pigeonhole.ManageSieve.Clients.txt] for information on known client problems. Known Server Issues and Protocol Deviations ------------------------------------------- * The ANONYMOUS authentication mechanism is currently not supported and explicitly denied. *NOTE*: If you add new issues to this list, notify the author or send an e-mail to the Dovecot mailing list [http://dovecot.org/mailinglists.html]. In any case, you must make sure that the issue is properly explained and that the author can contact you for more information. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MboxProblems.txt0000644000175000017500000001377412244263650015012 00000000000000mbox problems ============= External modifications ---------------------- In general Dovecot doesn't mind if you modify the mbox file externally. It's fine if external software expunges messages or appends new ones. However moving around existing messages, inserting messages in the middle of the file or modifying existing messages isn't allowed. Especially modifying existing messages (eg. removing attachments) may cause all kinds of problems. If you do that, at the minimum go and delete 'dovecot.index.cache' file from the mailbox, otherwise weird things may happen. However IMAP protocol guarantees that messages don't change at all, and deleting Dovecot's cache file doesn't clear clients' local caches, so it still may not work right. If you insert messages, or if you "undelete" messages (eg. replace mbox from a backup), you may see errors in Dovecot's logs: ---%<------------------------------------------------------------------------- mbox sync: UID inserted in the middle of mailbox /home/tss/mail/inbox (817 > 787, seq=18, idx_msgs=32) ---%<------------------------------------------------------------------------- This is normal. Dovecot just assigned new UIDs for the messages. See below for other reasons why UID insertions could happen. Debugging UID insertions ------------------------ The above error message can be read as: "18th message in the mbox file contained X-UID: 787 header, however the index file at that position told the message was supposed to have UID 817. There are 32 messages currently in the index file." There are four possibilities why the error message could happen: 1. Message with a X-UID: 787 header really was inserted in the mbox file. For example you replaced mbox from a backup. 2. Something changed the X-UID headers. I don't think any existing software can cause this. 3. The message was expunged from the index file, but for some reason it wasn't expunged from the mbox file. The index file is updated only after a successful mbox file modification, so this shouldn't really happen either. 4. If this problem happens constantly, it could mean that you're sharing the same index file for multiple different mboxes! * This could happen if you let Dovecot do mailbox autodetection and it sometimes uses '/var/mail/%u' (when it exists) and other times '~/mail/inbox'. Use an explicit [MailLocation.txt] setting to make sure the same INBOX is used. * Another possibility is that you're sharing index files between multiple users. Each user must have their own home directory. It's possible that broken X-UID headers in mails and 'mbox_lazy_writes=yes' combination has some bugs. If you're able to reproduce such an error, please let me know how. Dovecot versions earlier than 1.0.rc27 have some known bugs. UIDVALIDITY changes ------------------- UIDVALIDITY is stored in X-IMAPbase: or X-IMAP: header of the first message in mbox file. This is done by both Dovecot and UW-IMAP (and Pine). It's also stored in 'dovecot.index' file. It shouldn't normally change, because if it does it means that client has to download all the messages for the mailbox again. If the UIDVALIDITY in mbox file doesn't match the one in 'dovecot.index' file, Dovecot logs an error: ---%<------------------------------------------------------------------------- UIDVALIDITY changed (1100532544 -> 1178155834) in mbox file /home/user/mail/mailbox ---%<------------------------------------------------------------------------- This can happen when the following happens: 1. Dovecot accesses the mailbox saving the current UIDVALIDITY to 'dovecot.index' file. 2. The UIDVALIDITY gets lost from the mbox file * X-IMAP: or X-IMAPbase: header gets lost because something else than Dovecot or UW-IMAP deletes the first message * The whole file gets truncated * Something else than Dovecot deletes or renames the mbox 3. The mailbox is accessed (or created if necessary) by UW-IMAP or Pine. It notices that the mailbox is missing UIDVALIDITY, so it assigns a new UIDVALIDITY and writes the X-IMAPbase: or X-IMAP: header. * Also Dovecot that's configured to not use index files behaves the same. 4. Dovecot accesses again the mailbox. UIDVALIDITY in the mbox file's header doesn't match the one in 'dovecot.index' file. It logs an error and updates the UIDVALIDITY in the index file to the new one. Crashes ------- Dovecot's mbox code is a bit fragile because of the way it works. However instead of just corrupting the mbox file, it usually assert-crashes whenever it notices an inconsistency. You may see crashes such as: ---%<------------------------------------------------------------------------- Panic: mbox /home/user/mail/mailbox: seq=2 uid=45 uid_broken=0 originally needed 12 bytes, now needs 27 bytes ---%<------------------------------------------------------------------------- This is a bit difficult problem to fix. Usually this crash has been related to Dovecot rewriting some headers that were broken. If you see these crashes, it would really help if you were able to reproduce the crash. If you have such a mailbox which crashes every time when it's tried to be opened, please put the mbox through mbox anonymizer [http://dovecot.org/tools/mbox-anonymize.pl] and send it, the mailbox's 'dovecot.index' and 'dovecot.index.log' files to tss@iki.fi. None of those files contain any actual message contents so it's be safe to send them. Avoiding crashes and errors --------------------------- Since the problems usually have been related to broken headers, you should be able to avoid them by filtering out all the Dovecot's internal metadata headers. This is a good idea to do in any case. If you use [LDA.txt] it does this filtering automatically. Otherwise you could do this in your SMTP server. The headers that you should filter out are: * Content-Length * Status * X-IMAP * X-IMAPbase * X-Keywords * X-Status * X-UID * X-UIDL (if you're using 'pop3_reuse_xuidl=yes') (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MTA.txt0000644000175000017500000000463712244263647013026 00000000000000MTA === MTA is an acronym for _M_ail _T_ransport _A_gent. It is the software that works behind the scenes to transport E-Mail messages from one computer to another. MUAs (such as mutt, thunderbird, sylpheed, evolution, kmail) hand off newly sent messages to an MTA. MTAs talk to other MTAs, and either deliver mail locally or hand it off for delivery to an [MTA.txt] if it was destined to the local system. MTA is a generic term and usually refers to one of these popular software packages: * Postfix [http://www.postfix.org/] is Wietse Venema's secure, fast and flexible mailer. Default on SUSE Linux and NetBSD. * Exim [http://www.exim.org/] is Philip Hazel's flexible mailer. Default on Debian GNU/Linux. * Sendmail [http://www.sendmail.org/] the original BSD mailer. Default on FreeBSD; a Sun spinoff is used on Solaris. * Courier [http://www.courier-mta.org/] was inspired by qmail, but intends to do things right. * qmail [http://cr.yp.to/qmail.html] is an obsolete and unmaintained server. Its POP3 part can be taken over by Dovecot. Qmail started off boasting about speed and security in the mid-1990s, but has lots of unfixed bugs (this document includes patches where known), [http://home.pages.de/~mandree/qmail-bugs.html] among them security bugs that remain unfixed, and the security guarantee (500 USD) denied. If you really intend to continue using it, read Dave Sill's Life with qmail [http://www.lifewithqmail.org/] which contains instructions to work around some of qmail's security issues. Some people also subsume mail fetching utilities under the MTA category, among them: * fetchmail [http://www.fetchmail.info/] a fast mail retriever for the POP2, POP3/KPOP/SDPS, IMAP2/IMAP4, ODMR and ETRN protocols, SSL and Kerberos capable. It forwards the retrieved messages to SMTP/ESMTP, LMTP servers or into an . Developed out of Carl Harris's popclient by Eric S. Raymond. Fetchmail is now maintained by and Rob F. Funk. * getmail [http://pyropus.ca/software/getmail/] a POP3/SDPS and IMAP4 (with SSL) enabled mail retrieval utility written in Python. Developed by Chales Cazabon. These mail fetching utilities can be used to store mail for later retrieval by dovecot. Contrast this to the [MDA.txt], sometimes called local delivery agent (LDA). (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Plugins.MailFilter.txt0000644000175000017500000000510212244263657016042 00000000000000Mail filter plugin ================== Mail filter plugin can be used to filter written and/or read mails via a script, for example to encrypt/decrypt mails. Currently the filtering must not modify the message in any way: mail -> write filter -> read filter -> must produce exactly the original mail back. (TODO: Modifying the mail during writing would be possible with some code changes.) Note that IMAP protocol requires that emails never change, so the read filter must always produce the same output for the message. If the output changes you'll probably see some errors about Dovecot's cache file being corrupted and the IMAP client may also become confused if it has already cached some of the mail data. Configuration ------------- Add to 'dovecot.conf': ---%<------------------------------------------------------------------------- mail_plugins = $mail_plugins mail_filter plugin { # Read filter: mail_filter = mail-filter %u # %u = username given to the script as first parameter # Write filter: mail_filter_out = mail-filter-out %u } service mail-filter { executable = script /usr/local/bin/mail-filter.sh user = dovecot # run unprivileged unix_listener mail-filter { # enough permissions to give imap/pop3/etc processes access to this socket mode = 0600 user = vmail } } service mail-filter-out { executable = script /usr/local/bin/mail-filter-out.sh user = dovecot # run unprivileged unix_listener mail-filter { # enough permissions to give imap/pop3/etc processes access to this socket mode = 0600 user = vmail } } ---%<------------------------------------------------------------------------- Example scripts --------------- Here's a minimal example of how gpg could be used to encrypt and decrypt mails. All the key handling details are left out. The mail is read from stdin and written to stdout. Note that the plugin currently can't handle asynchronously reading+writing data, so the script cannot write any data to stdout before it has read everything from stdin. This is most easily done by first saving the stdin to a temporary file. 'mail-filter.sh': ---%<------------------------------------------------------------------------- cat > tempfile gpg -d tempfile rm -f tempfile ---%<------------------------------------------------------------------------- 'mail-filter-out.sh': ---%<------------------------------------------------------------------------- USER=$1 cat > tempfile gpg -e -r $USER tempfile rm -f tempfile ---%<------------------------------------------------------------------------- (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/SharedMailboxes.txt0000644000175000017500000000124212244263660015437 00000000000000Shared mailboxes ================ Dovecot can support mailbox sharing in several different ways: * [SharedMailboxes.Public.txt]: Shared mailboxes created by administrators (ACLs can still restrict who sees them) * [SharedMailboxes.Shared.txt]: Users sharing their mailboxes to other users (using IMAP ACL commands) (v1.2+) * [SharedMailboxes.Symlinks.txt]: Quick and dirty way of sharing a few mailboxes. See for common filesystem related permission problems that are common with all the sharing methods. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/PasswordDatabase.ExtraFields.Proxy.txt0000644000175000017500000001742112244263656021160 00000000000000Proxying ======== Dovecot supports proxying IMAP, POP3 and connections to other hosts. The proxying can be done for all users, or only for some specific users. There are two ways to do the authentication: 1. Forward the password to the remote server. The proxy may or may not perform authentication itself. This requires that the client uses only plaintext authentication, or alternatively the proxy has access to users' passwords in plaintext. 2. Let Dovecot proxy perform the authentication and login to remote server using the proxy's [MasterPassword.txt]. This allows client to use also non-plaintext authentication. The proxy is configured pretty much the same way as [PasswordDatabase.ExtraFields.Host.txt], with the addition of 'proxy' field. The common fields to use for both proxying ways are: * 'proxy' and 'proxy_maybe': Enables the proxying. Either one of these fields is required. * 'proxy_maybe' can be used to implement "automatic proxying". If the proxy destination matches the current connection, the user gets logged in normally instead of being proxied. If the same happens with 'proxy', the login fails with "Proxying loops" error. * 'proxy_maybe' with LMTP requires v2.1.0+ * 'proxy_maybe' with 'host=' requires v2.1.2+. * 'proxy_always' can be used with 'proxy_maybe' to conditionally do proxying to specified remote host (host isn't self) or to let director assign a backend host (host is self). So basically this setting just always sends the 'proxy' extra field to login process, but not necessarily the 'host'. Useful when dividing users across multiple director clusters. * 'host=s': The destination server's *IP address*. This field is required. * 'port=s': The destination server's port. The default is 143 with IMAP and 110 with POP3. * 'destuser=s': Tell client to use a different username when logging in. * 'proxy_timeout': Abort connection after this many seconds. You can use SSL/TLS connection to destination server by returning: * 'ssl=yes': Use SSL and require a valid verified remote certificate. *WARNING: Unless used carefully, this is an insecure setting!* Before v2.0.16/v2.1.beta1 the host name isn't checked in any way against the certificate's CN. The only way to use this securely is to only use and allow your own private CA's certs, anything else is exploitable by a man-in-the-middle attack. * 'ssl=any-cert': Use SSL, but don't require a valid remote certificate. * 'starttls': Use STARTTLS command instead of doing SSL handshake immediately after connected. * 'starttls=any-cert': Combine starttls and ssl=any-cert. * Additionally you can also tell Dovecot to send SSL client certificate to the remote server using 'ssl_client_cert' and 'ssl_client_key' settings in 'dovecot.conf' (v2.0.17+). Set 'login_trusted_networks' to point to the proxies in the backends. This way you'll get the clients' actual IP addresses logged instead of the proxy's. The destination servers don't need to be running Dovecot, but you should make sure that the Dovecot proxy doesn't advertise more capabilities than the destination server can handle. For IMAP you can do this by changing 'imap_capability' setting. For POP3 you'll have to modify Dovecot's sources for now ('src/pop3/capability.h'). Dovecot also automatically sends updated untagged CAPABILITY reply if it detects that the remote server has different capabilities than what it already advertised to the client, but some clients simply ignore the updated CAPABILITY reply. Password forwarding ------------------- If you don't want proxy itself to do authentication, you can configure it to succeed with any given password. You can do this by returning an empty password and 'nopassword' field. Master password --------------- This way of forwarding requires the destination server to support master user feature. The users will be normally authenticated in the proxy and the common proxy fields are returned, but you'll need to return two fields specially: * 'master=s': This contains the master username (e.g. "proxy"). It's used as SASL auhentication ID. * Alternatively you could return 'destuser=user*master' and set 'auth_master_user_separator = *'. * 'pass=s': This field contains the master user's password. See for more information how to configure this. Example password forwarding SQL configuration --------------------------------------------- Create the SQL table: ---%<------------------------------------------------------------------------- CREATE TABLE proxy ( user varchar(255) NOT NULL, host varchar(16) default NULL, destuser varchar(255) NOT NULL default '', PRIMARY KEY (user) ); ---%<------------------------------------------------------------------------- Insert data to SQL corresponding your users. Working data could look like this: +------+-------------+-----------------+ | user | host | destuser | +------+-------------+-----------------+ | john | 192.168.0.1 | | +------+-------------+-----------------+ | joe | 192.168.0.2 | joe@example.com | +------+-------------+-----------------+ The important parts of 'dovecot.conf': ---%<------------------------------------------------------------------------- # If you want to trade a bit of security for higher performance, change these settings: service imap-login { service_count = 0 } service pop3-login { service_count = 0 } # If you are not moving mailboxes between hosts on a daily basis you can # use authentication cache pretty safely. auth_cache_size = 4096 auth_mechanisms = plain passdb { driver = sql args = /usr/local/etc/dovecot/dovecot-sql.conf.ext } ---%<------------------------------------------------------------------------- The important parts of 'dovecot-sql.conf.ext': ---%<------------------------------------------------------------------------- driver = mysql connect = host=sqlhost1 host=sqlhost2 dbname=mail user=dovecot password=secret password_query = SELECT NULL AS password, 'Y' as nopassword, host, destuser, 'Y' AS proxy FROM proxy WHERE user = '%u' ---%<------------------------------------------------------------------------- Example proxy_maybe SQL configuration ------------------------------------- Create the SQL table: ---%<------------------------------------------------------------------------- CREATE TABLE users ( user varchar(255) NOT NULL, domain varchar(255) NOT NULL, password varchar(100) NOT NULL, host varchar(16) NOT NULL, home varchar(100) NOT NULL, PRIMARY KEY (user) ); ---%<------------------------------------------------------------------------- The important parts of 'dovecot.conf': ---%<------------------------------------------------------------------------- # user/group who owns the message files: mail_uid = vmail mail_gid = vmail auth_mechanisms = plain passdb { driver = sql args = /usr/local/etc/dovecot/dovecot-sql.conf.ext } userdb sql { driver = sql args = /usr/local/etc/dovecot/dovecot-sql.conf.ext } ---%<------------------------------------------------------------------------- The important parts of 'dovecot-sql.conf.ext': ---%<------------------------------------------------------------------------- driver = mysql password_query = \ SELECT concat(user, '@', domain) AS user, password, host, 'Y' AS proxy_maybe \ FROM users WHERE user = '%n' AND domain = '%d' user_query = SELECT user AS username, domain, home \ FROM users WHERE user = '%n' AND domain = '%d' ---%<------------------------------------------------------------------------- Example proxy LDAP configuration -------------------------------- see: for more information, and a worked out example (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Clients.NegativeUIDs.txt0000644000175000017500000000372512244263643016265 00000000000000Negative UIDs ============= ---%<------------------------------------------------------------------------- Invalid messageset: 1181461470:-1181461446. ---%<------------------------------------------------------------------------- IMAP uses unsigned 32bit integers for unique message identifiers. Unfortunately a lot of IMAP clients use 32bit signed integers, which means that if the UIDs go higher than 2147483647, they'll wrap to negative integers. This causes errors such as above. However normally the UIDs should never go that high, so it's possible to avoid this problem. mbox ---- Earlier Dovecot versions had bugs which could cause X-UID: headers in incoming messages to grow the UIDs too high. Some spam messages especially contained these intentionally broken X-UID: headers. With newer Dovecot versions these broken X-UID: headers aren't practically ever used. It happens only if the mail has a valid X-IMAPbase: header, X-UID: header and the mail is written to an empty mbox file. Note that this can happen only new mboxes, because expunging all messages in a mailbox causes Dovecot to create a metadata message at the beginning of the mbox file. In any case it's still a good idea to filter out X-UID: and other metadata headers in your MDA. [LDA.txt] does this internally. See for a list of headers to filter out. Fixing ------ Fixing is done by letting Dovecot update UIDVALIDITY value and recreate the UIDs beginning from one. This means that client's local cache will be invalidated and the client will be required to download all the messages again. mbox ---- Delete Dovecot's index files (eg. '.imap/INBOX/') and X-IMAP: and X-IMAPbase: headers from the mbox file. Maildir ------- This should really never be a problem with Maildir. If however you have managed to cause it somehow (by receiving 2 billion mails?), you can recreate the UIDs by deleting 'dovecot-uidlist' file. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/AuthDatabase.txt0000644000175000017500000000135312244263637014722 00000000000000Authentication Databases ======================== These databases can be used as both [PasswordDatabase.txt] and [UserDatabase.txt]: * [AuthDatabase.Passwd.txt]: System users (NSS, '/etc/passwd', or similiar) * [AuthDatabase.PasswdFile.txt]: '/etc/passwd'-like file in specified location * [AuthDatabase.LDAP.txt]: Lightweight Directory Access Protocol * [AuthDatabase.SQL.txt]: SQL database (PostgreSQL, MySQL, SQLite) * [AuthDatabase.Dict.txt]: Dict key-value database (Redis, memcached, etc.) * [AuthDatabase.VPopMail.txt]: External software used to handle virtual domains (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Pigeonhole.ManageSieve.Install.txt0000644000175000017500000000007312244263656020254 00000000000000 (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/MailboxFormat.dbox.txt0000644000175000017500000002601112244263650016064 00000000000000dbox ==== dbox is Dovecot's own high-performance mailbox format. The original version was introduced in v1.0 alpha4, but since then it has been completely redesigned in v1.1 series and improved even further in v2.0. dbox can be used in two ways: 1. *single-dbox* ('sdbox' in [MailLocation.txt]): One message per file, similar to [MailboxFormat.Maildir.txt]. For backwards compatibility,'dbox' is an alias to 'sdbox' in [MailLocation.txt]. 2. *multi-dbox* ('mdbox' in [MailLocation.txt]): Multiple messages per file, but unlike [MailboxFormat.mbox.txt] multiple files per mailbox. One of the main reasons for dbox's high performance is that it uses Dovecot's index files as the only storage for message flags and keywords, so the indexes don't have to be "synchronized". Dovecot trusts that they're always up-to-date (unless it sees that something is clearly broken). This also means that *you must not lose the dbox index files, they can't be regenerated without data loss*. dbox has a feature for transparently moving message data to an alternate storage area. See [MailboxFormat.dbox.txt] below. dbox storage is extensible. Single instance attachment storage was already implemented as such extension. Layout ------ By default, the dbox filesystem layout will be as follows. Data which isn't the actual message content is stored in a layout common to both *single-dbox* and *multi-dbox*: * '/mailboxes/INBOX/dbox-Mails/dovecot.index*' - Index files for INBOX * '/mailboxes/foo/dbox-Mails/dovecot.index*' - Index files for mailbox "foo" * '/mailboxes/foo/bar/dbox-Mails/dovecot.index*' - Index files for mailbox "foo/bar" * '/dovecot.mailbox.log*' - Mailbox changelog * '/subscriptions' - subscribed mailboxes list * '/dovecot-uidvalidity*' - IMAP UID validity Note that with dbox the Index files actually contain significant data which is held nowhere else. Index files for both *single-dbox* and *multi-dbox* contain message flags and keywords. For *multi-dbox*, the index file also contains the map_uids which link (via the "map index") to the actual message data. This data cannot be automatically recreated, so it is important that Index files are treated with the same care as message data files. Index files can be stored in a different location by using the INDEX parameter in the mail location specification. If the INDEX parameter is specified, it will make Dovecot look for the Index files as follows: * '/mailboxes/INBOX/dbox-Mails/dovecot.index*' - Index files for INBOX * '/mailboxes/foo/dbox-Mails/dovecot.index*' - Index files for mailbox "foo" * '/mailboxes/foo/bar/dbox-Mails/dovecot.index*' - Index files for mailbox "foo/bar" Actual message content is stored differently depending on whether it is *single-dbox* or *multi-dbox*. Under *single-dbox* we have: * '/mailboxes/INBOX/dbox-Mails/u.*' - Numbered files ('u.1','u.2', ...) each containing one message of INBOX * '/mailboxes/foo/dbox-Mails/u.*' - Files each containing one message for mailbox "foo" * '/mailboxes/foo/bar/dbox-Mails/u.*' - Files each containing one message for for mailbox "foo/bar" Under *multi-dbox* we have: * '/storage/dovecot.map.index*' - "Map index" containing a record for each message stored * '/storage/m.*' - Numbered files ('m.1', 'm.2', ...) each containing one or multiple messages With Dovecot versions 2.0.4 and later, setting the INDEX parameter sets the location of the "map index" as well as the location of the mailbox indexes. So this would make the "map index" be stored as follows: * '/storage/dovecot.map.index*' - "Map index" containing a record for each message stored Multi-dbox ---------- You can enable multi-dbox with: ---%<------------------------------------------------------------------------- mail_location = mdbox:~/mdbox ---%<------------------------------------------------------------------------- The directory layout (under '~/mdbox/') is: * '~/mdbox/storage/' contains the actual mail data for all mailboxes * '~/mdbox/mailboxes/' contains directories for mailboxes and their index files The storage directory has files: * 'dovecot.map.index*' files contain the "map index" * 'm.*' files contain the mail data Each m.* file contains one or more messages. 'mdbox_rotate_size' setting can be used to configure how large the files can grow. The map index contains a record for each message: * map_uid: Unique growing 32 bit number for the message. * refcount: 16 bit reference counter for this message. Each time the message is copied the refcount is increased. * file_id: File number containing the message. For example if file_id=5, the message is in file 'm.5'. * offset: Offset to message within the file. * size: Space used by the message in the file, including all metadata. Mailbox indexes refer to messages only using map_uids. This allows messages to be moved to different files by updating only the map index. Copying is done simply by appending a new record to mailbox index containing the existing map_uid and increasing its refcount. If refcount grows over 32768, currently Dovecot gives an error message. It's unlikely anyone really wants to copy the same message that many times. Expunging a message only decreases the message's refcount. The space is later freed in "purge" step. This is typically done in a nightly cronjob when there's less disk I/O activity. The purging first finds all files that have refcount=0 mails. Then it goes through each file and copies the refcount>0 mails to other mdbox files (to the same files as where newly saved messages would also go), updates the map index and finally deletes the original file. So there is never any overwriting or file truncation. The purging can be invoked explicitly running [Tools.Doveadm.Purge.txt]. There are several safety features built into dbox to avoid losing messages or their state if map index or mailbox index gets corrupted: * Each message has a 128 bit globally unique identifier (GUID). The GUID is saved to message metadata in m.* files and also to mailbox indexes. This allows Dovecot to find messages even if map index gets corrupted. * Whenever index file is rewritten, the old index is renamed to 'dovecot.index.backup'. If the main index becomes corrupted, this backup index is used to restore flags and figure out what messages belong to the mailbox. * Initial mailbox where message was saved to is stored in the message metadata in m.* files. So if all indexes get lost, the messages are put to their initial mailboxes. This is better than placing everything into a single mailbox. Alternate storage ----------------- Unlike Maildir, with dbox the message file names don't change. This makes it possible to support storing files in multiple directories or mount points. dbox supports looking up files from "altpath" if they're not found from the primary path. This means that it's possible to move older mails that are rarely accessed to cheaper (slower) storage. To enable this functionality, use the 'ALT' parameter in the mail location. For example, specifying the mail location as: ---%<------------------------------------------------------------------------- mail_location = mdbox:/var/vmail/%d/%n:ALT=/altstorage/vmail/%d/%n ---%<------------------------------------------------------------------------- will make Dovecot look for message data first under '/var/vmail/%d/%n' ("primary storage"), and if it is not found there it will look under '/altstorage/vmail/%d/%n' ("alternate storage") instead. There's no problem having the same (identical) file in both storages. Keep the unmounted '/altstorage' directory permissions such that Dovecot mail processes can't create directories under it (e.g. root:root 0755). This way if the alt storage isn't mounted for some reason, Dovecot won't think that all the messages in alt storage were deleted and lose their flags. With v2.1 this isn't strictly requires anymore, because it keeps track of missing mountpoints. When messages are moved from primary storage to alternate storage, only the actual message data (stored in files 'u.*' under *single-dbox* and 'm.*' under *multi-dbox*) is moved to alternate storage; everything else remains in the primary storage. Message data can be moved from primary storage to alternate storage using [Tools.Doveadm.Altmove.txt]. (In theory you could also do this with some combination of cp/mv, but better not to go there unless you really need to. The updates must be atomic in any case, so direct cp won't be work.) The granularity at which data is moved to alternate storage is individual messages. This is true even for *multi-dbox* when multiple messages are stored in a single 'm.*' storage file. If individual messages from an 'm.*' storage file need to be moved to alternate storage, the message data is written out to a different 'm.*' storage file (either new or existing) in the alternate storage area and the "map index" updated accordingly. Alternate storage is completely transparent at the IMAP/POP level. Users accessing mail through IMAP or POP cannot normally tell if any given message is stored in primary storage or alternate storage. Conceivably users might be able to measure a performance difference; the point is that there is no IMAP/POP command which could be used to expose this information. It is entirely possible to have a mail folder which contains a mix of messages stored in primary storage and alternate storage. dbox and mail header metadata ----------------------------- Unlike when using [MailboxFormat.mbox.txt] as [MailboxFormat.txt], where mail headers (for example 'Status', 'X-UID', etc.) are [MailboxFormat.mbox.txt], the mail headers within dbox files are (usually)*not* used for this purpose by dovecot; neither when mails are created/moved/etc. via IMAP nor when dboxes are placed (e.g. copied or moved in the filesystem) in a mail location (and then "imported" by dovecot).Therefore, it is (usually) *not* necessary, to strip any such mail headers at the MTA, MDA or LDA (as it is recommended with [MailboxFormat.mbox.txt]). There is one exception, though, namely when 'pop3_reuse_xuidl=yes' (which is however rather deprecated):In this case 'X-UIDL' is used for the POP3 UIDLs. Therefore,*in this case, is recommended to strip the 'X-UIDL' mail headers _case-insensitively_ at the MTA, MDA or LDA*. Mail delivery ============= Some MTA configurations have the MTA directly dropping mail into Maildirs or mboxes. Since most MTAs don't understand the dbox format, this option is not available. Instead, the MTA could be set up to use [LMTP.txt] or [LDA.txt]. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Variables.txt0000644000175000017500000002750012244263664014306 00000000000000Variables ========= You can use special variables in several places: * [MailLocation.txt] setting and [Namespaces.txt] locations * [UserDatabase.Static.txt] and [AuthDatabase.PasswdFile.txt] template strings * [AuthDatabase.LDAP.txt] and [AuthDatabase.SQL.txt] userdb query strings * log prefix for imap/pop3 process * [Plugins.txt] settings The variables that work (almost) everywhere are: * +------------+----------+-----------------------------------------------------+ | *Variable* | *Long | *Description* | | | name* | | +------------+----------+-----------------------------------------------------+ | %% | | '%' character. See for | | | | further information about %% variables | +------------+----------+-----------------------------------------------------+ | %u | user | full username (e.g. user@domain) | +------------+----------+-----------------------------------------------------+ | %n | username | user part in user@domain, same as %u if there's no | | | | domain | +------------+----------+-----------------------------------------------------+ | %d | domain | domain part in user@domain, empty if user there's no| | | | domain | +------------+----------+-----------------------------------------------------+ | %s | service | imap, pop3, smtp, lda (and doveadm, dsync, etc.) | +------------+----------+-----------------------------------------------------+ | %p | pid | PID of the current process (login or imap/pop3 | | | | process) | +------------+----------+-----------------------------------------------------+ | %l | lip | local IP address | +------------+----------+-----------------------------------------------------+ | %r | rip | remote IP address | +------------+----------+-----------------------------------------------------+ | | session | session ID for this client connection (unique for 9 | | | | years) (v2.1.6+) | +------------+----------+-----------------------------------------------------+ These variables work almost everywhere else except in Dovecot-auth (userdb queries/templates): * +------------+----------+-----------------------------------------------------+ | *Variable* | *Long | *Description* | | | name* | | +------------+----------+-----------------------------------------------------+ | %h | home | home directory. Use of ~/ is better whenever | | | | possible. | +------------+----------+-----------------------------------------------------+ | %i | uid | UNIX UID of the user | +------------+----------+-----------------------------------------------------+ | | gid | UNIX group identifier of the user (v2.0.17+) | +------------+----------+-----------------------------------------------------+ These variables work only in Dovecot-auth and 'login_log_format_elements' setting: * +----+---------------+--------------------------------------------------------+ | %m | mech | | | | | [Authentication.Mechanisms.txt], e.g. PLAIN | +----+---------------+--------------------------------------------------------+ | %a | lport | Local port | +----+---------------+--------------------------------------------------------+ | %b | rport | Remote port | +----+---------------+--------------------------------------------------------+ | %c | secured | "secured" string with SSL, TLS and localhost | | | | connections. Otherwise empty. | +----+---------------+--------------------------------------------------------+ | | real_rip | Same as %{rip}, except in proxy setups contains the | | | | remote proxy's IP instead of the client's IP (v2.1.10+)| +----+---------------+--------------------------------------------------------+ | | real_lip | Same as %{lip}, except in proxy setups contains the | | | | local proxy's IP instead of the remote proxy's IP | | | | (v2.2+) | +----+---------------+--------------------------------------------------------+ | | real_rport | Similar to %{real_rip} except for port instead of IP | | | | (v2.2+) | +----+---------------+--------------------------------------------------------+ | | real_lport | Similar to %{real_lip} except for port instead of IP | | | | (v2.2+) | +----+---------------+--------------------------------------------------------+ | | orig_user | Same as %{user}, except using the original username the| | | | client sent before any changes by auth process (v2.2.6+)| +----+---------------+--------------------------------------------------------+ | | orig_username | Same as %{username}, except using the original username| | | | (v2.2.6+) | +----+---------------+--------------------------------------------------------+ | | orig_domain | Same as %{domain}, except using the original username | | | | (v2.2.6+) | +----+---------------+--------------------------------------------------------+ These variables work only in Dovecot-auth: * +------------+----------------+-----------------------------------------------+ | *Variable* | *Long name* | *Description* | +------------+----------------+-----------------------------------------------+ | %w | password | plaintext password from plaintext | | | | authentication mechanism | +------------+----------------+-----------------------------------------------+ | %k | cert | "valid" if client had sent a valid client | | | | certificate, otherwise empty. | +------------+----------------+-----------------------------------------------+ | | login_user | For master user logins: Logged in user@domain | +------------+----------------+-----------------------------------------------+ | | login_username | For master user logins: Logged in user | +------------+----------------+-----------------------------------------------+ | | login_domain | For master user logins: Logged in domain | +------------+----------------+-----------------------------------------------+ | | session_pid | For user logins: The PID of the IMAP/POP3 | | | | process handling the session. (v2.2.7+) | +------------+----------------+-----------------------------------------------+ These variables work only in 'login_log_format_elements' setting: * +------------+--------------+-------------------------------------------------+ | *Variable* | *Long name* | *Description* | +------------+--------------+-------------------------------------------------+ | %k | ssl_security | SSL protocol and cipher information, e.g. "TLSv1| | | | with cipher DHE-RSA-AES256-SHA (256/256 bits)" | +------------+--------------+-------------------------------------------------+ | %e | mail_pid | Mail process (imap/pop3) PID that handles the | | | | post-login connection | +------------+--------------+-------------------------------------------------+ These variables work only in 'deliver_log_format' setting: * +------------+---------------+----------------------+ | *Variable* | *Long name* | *Description* | +------------+---------------+----------------------+ | %$ | | Log entry | +------------+---------------+----------------------+ | %m | msgid | Message-ID | +------------+---------------+----------------------+ | %s | subject | Subject | +------------+---------------+----------------------+ | %f | from | From address | +------------+---------------+----------------------+ | %e | from_envelope | Envelope sender | +------------+---------------+----------------------+ | %p | size | Message size | +------------+---------------+----------------------+ | %w | vsize | Virtual message size | +------------+---------------+----------------------+ * Long variable names can be used like '%{long_name} ' or with L modifier: '%L{long_name}'. * Environment variables can be accessed with '%{env:ENVIRONMENT_VARIABLE} '. * Additionally, the (self-explanatory) variables '%{pid} ' and '%{hostname} ' are available. Modifiers --------- You can apply a modifiers for each variable (e.g. %Us = POP3): * %L - lowercase * %U - uppercase * %E - escape '"', "'" and '\' characters by inserting '\' before them. Note that variables in SQL queries are automatically escaped, you don't need to use this modifier for them. * %X - parse the variable as a base-10 number, and convert it to base-16 (hexadecimal) * %R - reverse the string * %H - take a 32bit hash of the variable and return it as hex. You can also limit the hash value. For example %256Hu gives values 0..ff. You might want padding also, so %2.256Hu gives 00..ff. This can be useful for example in dividing users automatically to multiple partitions. * %H hash function is a bit bad if all the strings end with the same text, so if you're hashing usernames being in user@domain form, you probably want to reverse the username to get better hash value variety, e.g. %3RHu. * %N - "New hash" - same as %H, but based on MD5 to give better distribution of values (no need for any string reversing kludges either). (v2.2.3+) * %M - return the string's MD5 sum as hex. * %D - return "sub.domain.org" as "sub,dc=domain,dc=org" (for LDAP queries) * %T - Trim trailing whitespace You can take a substring of the variable by giving optional offset followed by '.' and width after the '%' character. For example %2u gives first two characters of the username. %2.1u gives third character of the username. If the offset is negative, it counts from the end, for example %-2.2i gives the UID mod 100 (last two characters of the UID printed in a string). If a positive offset points outside the value, empty string is returned, if a negative offset does then the string is taken from the start. If the width is prefixed with zero, the string isn't truncated, but only padded with '0' character if the string is shorter. For example %04i may return "0001", "1000" and "12345". %1.04i for the same string would return "001", "000" and "2345". The modifiers are applied from left-to-right order, except the substring is always taken from the final string. (This file was created from the wiki on 2013-11-24 04:43) dovecot-2.2.9/doc/wiki/FinishBasicConfiguration.txt0000644000175000017500000000070612244263645017306 00000000000000Finishing Basic Configuration ============================= Unless you're going to have only virtual users and you don't care about their passwords,*switch back to disable_plaintext_only = yes* and [SSL.txt]. Plaintext authentication is still allowed from localhost, so you can have your webmail application to connect to Dovecot without using SSL or even having to configure it. (This file was created from the wiki on 2013-11-24 04:42) dovecot-2.2.9/doc/wiki/Makefile.am0000644000175000017500000001400712244505156013662 00000000000000if BUILD_DOCS wikidir = $(docdir)/wiki wiki_DATA = $(wikifiles) endif EXTRA_DIST = \ $(wikifiles) wikifiles = ACL.txt \ AixPluginsSupport.txt \ AuthDatabase.CheckPassword.txt \ AuthDatabase.Dict.txt \ AuthDatabase.LDAP.AuthBinds.txt \ AuthDatabase.LDAP.PasswordLookups.txt \ AuthDatabase.LDAP.txt \ AuthDatabase.LDAP.Userdb.txt \ AuthDatabase.PasswdFile.txt \ AuthDatabase.Passwd.txt \ AuthDatabase.SQL.txt \ AuthDatabase.txt \ AuthDatabase.VPopMail.txt \ Authentication.Caching.txt \ Authentication.Kerberos.txt \ Authentication.MasterUsers.txt \ Authentication.Mechanisms.DigestMD5.txt \ Authentication.Mechanisms.NTLM.txt \ Authentication.Mechanisms.txt \ Authentication.Mechanisms.Winbind.txt \ Authentication.MultipleDatabases.txt \ Authentication.PasswordSchemes.txt \ Authentication.RestrictAccess.txt \ Authentication.txt \ BasicConfiguration.txt \ Chrooting.txt \ Clients.NegativeUIDs.txt \ Clients.txt \ CompilingSource.txt \ Debugging.Authentication.txt \ Debugging.ProcessTracing.txt \ Debugging.Rawlog.txt \ Debugging.Thunderbird.txt \ Design.Arrays.txt \ Design.AuthProcess.txt \ Design.AuthProtocol.txt \ Design.Buffers.txt \ Design.Code.txt \ Design.DoveadmProtocol.txt \ Design.Dsync.txt \ Design.Indexes.Cache.txt \ Design.Indexes.MailIndexApi.txt \ Design.Indexes.MainIndex.txt \ Design.Indexes.TransactionLog.txt \ Design.Indexes.txt \ Design.InputStreams.txt \ Design.MailProcess.txt \ Design.Memory.txt \ Design.OutputStreams.txt \ Design.Plugins.txt \ Design.Processes.txt \ Design.Storage.ErrorHandling.txt \ Design.Storage.MailboxList.txt \ Design.Storage.Mailbox.Save.txt \ Design.Storage.Mailbox.Search.txt \ Design.Storage.Mailbox.Sync.txt \ Design.Storage.Mailbox.Transaction.txt \ Design.Storage.Mailbox.txt \ Design.Storage.MailNamespace.txt \ Design.Storage.MailStorage.txt \ Design.Storage.Mail.txt \ Design.Storage.MailUser.txt \ Design.Storage.Plugins.txt \ Design.Strings.txt \ Design.txt \ Dict.txt \ Director.txt \ DomainLost.txt \ Errors.ChgrpNoPerm.txt \ FindMailLocation.txt \ FinishBasicConfiguration.txt \ HowTo.EximAndDovecotSASL.txt \ HowTo.ImapcProxy.txt \ HowTo.PopBSMTPAndDovecot.txt \ HowTo.PopRelay.txt \ HowTo.PostfixAndDovecotSASL.txt \ HowTo.Rootless.txt \ HowTo.SimpleVirtualInstall.txt \ HowTo.txt \ IndexFiles.txt \ LDA.Exim.txt \ LDA.Indexing.txt \ LDA.Postfix.txt \ LDA.Qmail.txt \ LDA.Sendmail.txt \ LDA.txt \ LMTP.Exim.txt \ LMTP.txt \ Logging.txt \ LoginProcess.txt \ MailboxFormat.Cydir.txt \ MailboxFormat.dbox.txt \ MailboxFormat.Maildir.txt \ MailboxFormat.mailstore.txt \ MailboxFormat.mbox.txt \ MailboxFormat.mbx.txt \ MailboxFormat.MH.txt \ MailboxFormat.txt \ MailboxSettings.txt \ maildrop.txt \ MailLocation.dbox.txt \ MailLocation.LocalDisk.txt \ MailLocation.Maildir.txt \ MailLocation.mbox.txt \ MailLocation.SharedDisk.txt \ MailLocation.txt \ MboxChildFolders.txt \ MboxLocking.txt \ MboxProblems.txt \ MDA.txt \ Migration.BincIMAP.txt \ Migration.Courier.txt \ Migration.Cyrus.txt \ Migration.Dsync.txt \ Migration.Gmail.txt \ Migration.Linuxconf.txt \ Migration.MailFormat.txt \ Migration.Online.txt \ Migration.Teapop.txt \ Migration.txt \ Migration.UW.txt \ Migration.Vm-pop3d.txt \ MissingMailboxes.txt \ Mountpoints.txt \ MTA.txt \ mutt.txt \ Namespaces.txt \ NFS.txt \ OSCompatibility.txt \ PasswordDatabase.BSDAuth.txt \ PasswordDatabase.ExtraFields.AllowNets.txt \ PasswordDatabase.ExtraFields.Host.txt \ PasswordDatabase.ExtraFields.NoDelay.txt \ PasswordDatabase.ExtraFields.NoLogin.txt \ PasswordDatabase.ExtraFields.Proxy.txt \ PasswordDatabase.ExtraFields.txt \ PasswordDatabase.ExtraFields.User.txt \ PasswordDatabase.IMAP.txt \ PasswordDatabase.PAM.txt \ PasswordDatabase.Shadow.txt \ PasswordDatabase.Static.txt \ PasswordDatabase.txt \ PerformanceTuning.txt \ Pigeonhole.Installation.txt \ Pigeonhole.ManageSieve.Clients.txt \ Pigeonhole.ManageSieve.Configuration.txt \ Pigeonhole.ManageSieve.Install.txt \ Pigeonhole.ManageSieve.Troubleshooting.txt \ Pigeonhole.ManageSieve.txt \ Pigeonhole.Sieve.Configuration.txt \ Pigeonhole.Sieve.Examples.txt \ Pigeonhole.Sieve.Extensions.SpamtestVirustest.txt \ Pigeonhole.Sieve.Extensions.txt \ Pigeonhole.Sieve.Extensions.Vacation.txt \ Pigeonhole.Sieve.Plugins.Extdata.txt \ Pigeonhole.Sieve.Plugins.Extprograms.txt \ Pigeonhole.Sieve.Plugins.Pipe.txt \ Pigeonhole.Sieve.Plugins.txt \ Pigeonhole.Sieve.Troubleshooting.txt \ Pigeonhole.Sieve.txt \ Pigeonhole.Sieve.Usage.txt \ Pigeonhole.txt \ Plugins.Autocreate.txt \ Plugins.Compress.txt \ Plugins.Expire.txt \ Plugins.FTS.Lucene.txt \ Plugins.FTS.Solr.txt \ Plugins.FTS.Squat.txt \ Plugins.FTS.txt \ Plugins.Lazyexpunge.txt \ Plugins.Listescape.txt \ Plugins.MailboxAlias.txt \ Plugins.MailFilter.txt \ Plugins.MailLog.txt \ Plugins.Notify.txt \ Plugins.Snarf.txt \ Plugins.Stats.txt \ Plugins.Trash.txt \ Plugins.txt \ Plugins.Virtual.txt \ Plugins.Zlib.txt \ POP3Server.txt \ PostLoginScripting.txt \ PreAuth.txt \ QuickConfiguration.txt \ Quota.Configuration.txt \ Quota.Dict.txt \ Quota.Dirsize.txt \ Quota.FS.txt \ Quota.Maildir.txt \ Quota.txt \ Replication.txt \ RunningDovecot.txt \ Sasl.txt \ SecurityTuning.txt \ Services.txt \ SharedMailboxes.Permissions.txt \ SharedMailboxes.Public.txt \ SharedMailboxes.Shared.txt \ SharedMailboxes.Symlinks.txt \ SharedMailboxes.txt \ SocketUnavailable.txt \ SSL.CertificateClientImporting.txt \ SSL.CertificateCreation.txt \ SSL.DovecotConfiguration.txt \ SSL.SNIClientSupport.txt \ SSL.txt \ Statistics.txt \ SystemUsers.txt \ TestInstallation.txt \ TestPop3Installation.txt \ TimeMovedBackwards.txt \ Upgrading.1.0.txt \ Upgrading.1.1.txt \ Upgrading.1.2.txt \ Upgrading.2.0.txt \ Upgrading.2.1.txt \ Upgrading.2.2.txt \ Upgrading.txt \ UserDatabase.ExtraFields.txt \ UserDatabase.NSS.txt \ UserDatabase.Prefetch.txt \ UserDatabase.Static.txt \ UserDatabase.txt \ UserIds.txt \ uw2dovecot.sh.txt \ Variables.txt \ VirtualUsers.Home.txt \ VirtualUsers.txt \ WhyDoesItNotWork.txt dovecot-2.2.9/doc/wiki/PasswordDatabase.IMAP.txt0000644000175000017500000000151712244263656016353 00000000000000Authentication via remote IMAP server ===================================== Available driver settings: * host=