pyparted-3.6/0000775000076400007640000000000011542323614010226 500000000000000pyparted-3.6/configure.ac0000664000076400007640000000411311542323532012432 00000000000000dnl configure.ac for pyparted dnl dnl Copyright (C) 2007, 2008, 2009 Red Hat, Inc. dnl dnl This copyrighted material is made available to anyone wishing to use, dnl modify, copy, or redistribute it subject to the terms and conditions of dnl the GNU General Public License v.2, or (at your option) any later version. dnl This program is distributed in the hope that it will be useful, but WITHOUT dnl ANY WARRANTY expressed or implied, including the implied warranties of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General dnl Public License for more details. You should have received a copy of the dnl GNU General Public License along with this program; if not, write to the dnl Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA dnl 02110-1301, USA. Any Red Hat trademarks that are incorporated in the dnl source code or documentation are not subject to the GNU General Public dnl License and may only be used or replicated with the express permission of dnl Red Hat, Inc. dnl dnl Red Hat Author(s): David Cantrell m4_define(libparted_required_version, 2.3) m4_define(python_required_version, 2.7) AC_PREREQ(2.59) AC_INIT([pyparted], [3.6], [pyparted-devel@redhat.com]) AM_INIT_AUTOMAKE([color-tests]) AC_CONFIG_SRCDIR([src/_pedmodule.c]) AC_CONFIG_HEADER([config.h]) AC_CONFIG_FILES([Makefile include/Makefile include/docstrings/Makefile include/typeobjects/Makefile src/Makefile src/parted/Makefile tests/Makefile tests/_ped/Makefile tests/parted/Makefile]) AC_CONFIG_MACRO_DIR([m4]) AC_DEFINE_UNQUOTED([BUILD_DATE], ["`date +%m%d%Y`"], [Date of pyparted build]) AC_DISABLE_STATIC AC_PROG_CPP AC_PROG_CC AC_HEADER_STDC AC_PROG_LIBTOOL dnl Check for Python AM_PATH_PYTHON(python_required_version) AM_CHECK_PYTHON_HEADERS(,[AC_MSG_ERROR(could not find Python headers or library)]) dnl Check for GNU parted AM_CHECK_LIBPARTED(libparted_required_version) AM_CHECK_PED_PARTITION_LEGACY_BOOT AC_OUTPUT pyparted-3.6/depcomp0000755000076400007640000004426711542323606011537 00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2009-04-28.21; # UTC # Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009 Free # Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try \`$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by `PROGRAMS ARGS'. object Object file output by `PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputing dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u="sed s,\\\\\\\\,/,g" depmode=msvisualcpp fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ## The second -e expression handles DOS-style file names with drive letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the `deleted header file' problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. tr ' ' ' ' < "$tmpdepfile" | ## Some versions of gcc put a space before the `:'. On the theory ## that the space means something, we add a space to the output as ## well. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like `#:fec' to the end of the # dependency line. tr ' ' ' ' < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ tr ' ' ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' ' ' < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts `$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then # Each line is of the form `foo.o: dependent.h'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" # That's a tab and a space in the []. sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; icc) # Intel's C compiler understands `-MD -MF file'. However on # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c # ICC 7.0 will fill foo.d with something like # foo.o: sub/foo.c # foo.o: sub/foo.h # which is wrong. We want: # sub/foo.o: sub/foo.c # sub/foo.o: sub/foo.h # sub/foo.c: # sub/foo.h: # ICC 7.1 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using \ : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" # Add `dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in `foo.d' instead, so we check for that too. # Subdirectories are respected. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then # With Tru64 cc, shared objects can also be used to make a # static library. This mechanism is used in libtool 1.4 series to # handle both shared and static libraries in a single compilation. # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. # # With libtool 1.5 this exception was removed, and libtool now # generates 2 separate objects for the 2 libraries. These two # compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 tmpdepfile2=$dir$base.o.d # libtool 1.5 tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.o.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d tmpdepfile4=$dir$base.d "$@" -MD fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" # That's a tab and a space in the []. sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove `-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for `:' # in the target name. This is to cope with DOS-style filenames: # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. "$@" $dashmflag | sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" tr ' ' ' ' < "$tmpdepfile" | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" sed '1,2d' "$tmpdepfile" | tr ' ' ' ' | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove `-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" echo " " >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: pyparted-3.6/Makefile.in0000664000076400007640000006324411542323606012225 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted # # Copyright (C) 2007, 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = . DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in $(srcdir)/config.h.in \ $(top_srcdir)/configure AUTHORS COPYING ChangeLog NEWS TODO \ config.guess config.sub depcomp install-sh ltmain.sh missing \ py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir dist dist-all distcheck ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ { test ! -d "$(distdir)" \ || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -fr "$(distdir)"; }; } am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best distuninstallcheck_listfiles = find . -type f -print distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ ACLOCAL_AMFLAGS = -I m4 SUBDIRS = include src tests EXTRA_DIST = AUTHORS BUGS COPYING NEWS README TODO ChangeLog MAINTAINERCLEANFILES = Makefile.in config.guess config.h.in config.sub \ depcomp install-sh ltmain.sh missing ABOUT-NLS \ INSTALL aclocal.m4 configure CLEANFILES = *~ ChangeLog MOSTLYCLEANDIRS = m4 all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: am--refresh: @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): config.h: stamp-h1 @if test ! -f $@; then \ rm -f stamp-h1; \ $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status config.h $(srcdir)/config.h.in: $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f config.h stamp-h1 mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs distclean-libtool: -rm -f libtool config.lt # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$(top_distdir)" distdir="$(distdir)" \ dist-hook -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 $(am__remove_distdir) dist-lzma: distdir tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma $(am__remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz $(am__remove_distdir) dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__remove_distdir) dist-shar: distdir shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__remove_distdir) dist dist-all: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lzma*) \ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir); chmod a+w $(distdir) mkdir $(distdir)/_build mkdir $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @$(am__cd) '$(distuninstallcheck_dir)' \ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile config.h installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-hdr \ distclean-libtool distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \ ctags-recursive install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-generic \ clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \ dist-gzip dist-hook dist-lzma dist-shar dist-tarZ dist-xz \ dist-zip distcheck distclean distclean-generic distclean-hdr \ distclean-libtool distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-recursive uninstall uninstall-am dist-hook: rm -rf `find $(distdir) -type f -name .gitignore` ChangeLog: (GIT_DIR=.git git log > .changelog.tmp && mv .changelog.tmp ChangeLog; rm -f .changelog.tmp) || (touch ChangeLog; echo 'git directory not found: installing possibly empty ChangeLog.' >&2) pychecker: all $(MAKE) -C src/parted pychecker tag: dist-gzip @if [ -z "$(GPGKEY)" ]; then \ echo "GPGKEY environment variable missing, please set this to the key ID" ; \ echo "you want to use to tag the repository." ; \ exit 1 ; \ fi @git tag -u $(GPGKEY) -m "Tag as $(PACKAGE)-$(VERSION)" -f $(PACKAGE)-$(VERSION) @echo "Tagged as $(PACKAGE)-$(VERSION) (GPG signed)" bumpver: @NEWSUBVER=$$((`echo $(PACKAGE_VERSION) |cut -d . -f 2` + 1)) ; \ NEWVERSION=`echo $(PACKAGE_VERSION).$$NEWSUBVER |cut -d . -f 1,3` ; \ sed -i "s/AC_INIT(\[$(PACKAGE_NAME)\], \[$(PACKAGE_VERSION)\], \[$(PACKAGE_BUGREPORT\])/AC_INIT(\[$(PACKAGE_NAME)\], \[$$NEWVERSION\], \[$(PACKAGE_BUGREPORT\])/" configure.ac release: tag rm -rf $(PACKAGE)-$(VERSION) gzip -dc $(PACKAGE)-$(VERSION).tar.gz | tar -xvf - ( cd $(PACKAGE)-$(VERSION) && ./configure && make ) || exit 1 @echo @echo "$(PACKAGE)-$(VERSION).tar.gz is now ready to upload." @echo "Do not forget to push changes to the repository with:" @echo " git push" @echo " git push --tags" @echo @echo "Do not forget to add a new Version entry on the Trac site:" @echo " https://fedorahosted.org/pyparted/admin/ticket/versions" @echo rpmlog: @prevtag="$$(git tag -l | grep -v "^start$$" | tail -n 2 | head -n 1)" ; \ git log --pretty="format:- %s (%ae)" $${prevtag}.. | \ sed -e 's/@.*)/)/' | \ sed -e 's/%/%%/g' | \ grep -v "New version" | \ fold -s -w 77 | \ while read line ; do \ if [ ! "$$(echo $$line | cut -c-2)" = "- " ]; then \ echo " $$line" ; \ else \ echo "$$line" ; \ fi ; \ done # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/config.h.in0000664000076400007640000000340611542323604012173 00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Date of pyparted build */ #undef BUILD_DATE /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the header file. */ #undef HAVE_PARTED_PARTED_H /* Define if libparted has 'PED_PARTITION_LEGACY_BOOT' constant. */ #undef HAVE_PED_PARTITION_LEGACY_BOOT /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Version number of package */ #undef VERSION pyparted-3.6/AUTHORS0000644000076400007640000000015711536235213011217 00000000000000David Cantrell Chris Lumens David Campbell pyparted-3.6/TODO0000664000076400007640000000654511514547631010656 00000000000000- Methods in the parted module that just return data and take in no parameters...make those read-only properties and get rid of the method. Since we have more or less established the API now, mark the methods as Deprecated and leave them around for a release before removing them. - add parted.Device.toSectors() method that takes in a size specification, such as 10.5MB, as a string and converts that size to a sector count based on the sector size of that Device - use disttools from Python to do as much work as possible - Walk through all of the src/py*.c files and make sure libparted exceptions are captured and filtered back up through Python. Will need to define some sane Python exception classes for libparted's exceptions. - Handle exceptions from libparted and pass to Python as necessary. The PED_ASSERT things are where libparted aborts, so we may want to catch things before it goes in to libparted so we can throw an exception rather than letting the library abort(). The ped_exception_throw() instances are all libparted's own exception-like system. - Handle exceptions throughout the _ped module code. Unique exceptions as much as possible. - Figure out what, if anything, we can do with timers. They are optional in libparted, but do we want to support them in pyparted? - Error handling in the get and set methods. - Free memory in error handling cases. - Exception handling: - Audit error messages to make them more useful. - All test cases with '# TODO' in the runTest() method. Be sure to uncomment them once you have written the test. - Make sure PyTypeObjects that have a tp_init are allocating memory that the garbage collector knows about. I'm not sure if PyType_GenericAlloc or PyType_GenericNew do this. - Coding policy that we need to make sure we're doing: If object creation fails, we need to use PyObject_GC_Del() to destroy it before throwing an exception at the user. For all other instances where we need to delete or destroy the object, use Py_XDECREF(). Once the ref count is zero, the GC will take over and run dealloc() for that object, which will eventually run PyObject_GC_Del(). Basically, we should only be using PyObject_GC_Del() in the convert functions or in __init__ constructors where we are making a new object for the user. NOTE: If we need to destroy an object due to creation failure and the object we are creating has other PyObject members, call Py_XDECREF on those members rather than destroing them. We can't ensure that there will be no other references to those members, so let the normal object destructor handle PyObject members, but forcefully destroy the object we are trying to create. - destroy() methods don't seem to be destroying the Python object. - Make sure this new code works in Python 3000 - Look through all PyTypeObject objects and see where we can expand their functionality. Can we add str() support, for instance. - Move the constraint functions presently in _ped to be in the __init_() method for _ped.Constraint, similar to what was done for _ped_Disk_init: constraint_new_from_min_max constraint_new_from_min constraint_new_from_max constraint_any constraint_exact - parted module: - Write docstrings for everything - autoconf - More detailed API checks for libparted and Python ... and much much more pyparted-3.6/tests/0000775000076400007640000000000011542323614011370 500000000000000pyparted-3.6/tests/Makefile.in0000664000076400007640000004362011542323606013363 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted # # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tests DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = _ped parted MAINTAINERCLEANFILES = Makefile.in all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tests/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/tests/parted/0000775000076400007640000000000011542323615012650 500000000000000pyparted-3.6/tests/parted/test_filesystem.py0000775000076400007640000001023711540274274016377 00000000000000# # Test cases for the methods in the parted.filesystem module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import parted import unittest # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class FileSystemNewTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemGetSetTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemClobberTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemOpenTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCreateTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCloseTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCheckTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCopyTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemResizeTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemGetResizeConstraintTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemGetPedFileSystemTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(FileSystemNewTestCase()) suite.addTest(FileSystemGetSetTestCase()) suite.addTest(FileSystemClobberTestCase()) suite.addTest(FileSystemOpenTestCase()) suite.addTest(FileSystemCreateTestCase()) suite.addTest(FileSystemCloseTestCase()) suite.addTest(FileSystemCheckTestCase()) suite.addTest(FileSystemCopyTestCase()) suite.addTest(FileSystemResizeTestCase()) suite.addTest(FileSystemGetResizeConstraintTestCase()) suite.addTest(FileSystemGetPedFileSystemTestCase()) suite.addTest(FileSystemStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/test_parted.py0000775000076400007640000001526111542260753015473 00000000000000# # Test cases for the methods in the parted module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # from __future__ import division import _ped import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class FormatBytesTestCase(unittest.TestCase): def runTest(self): self.assertRaises(SyntaxError, parted.formatBytes, 57, "GIB") self.assertEqual(1e-24, parted.formatBytes(1, "YB")) self.assertEqual(1/2**80, parted.formatBytes(1, "YiB")) self.assertEqual(1, parted.formatBytes(1, 'B')) self.assertEqual(1, parted.formatBytes(1e24, 'YB')) self.assertEqual(1, parted.formatBytes(2**80, 'YiB')) class BytesToSectorsTestCase(unittest.TestCase): def runTest(self): self.assertRaises(SyntaxError, parted.sizeToSectors, 9, "yb", 1) self.assertEqual(int(parted.sizeToSectors(7777.0, "B", 512)), parted.sizeToSectors(7777.0, "B", 512)) class GetLabelsTestCase(unittest.TestCase): def runTest(self): self.assertGreater(len(parted.getLabels()), 0) self.assertSetEqual(parted.getLabels('ppcc'), set()) self.assertSetEqual(parted.getLabels('sparc6'), set()) self.assertSetEqual(parted.getLabels('i586'), {'gpt', 'msdos'}) self.assertSetEqual(parted.getLabels('s390'), {'dasd', 'msdos'}) self.assertSetEqual(parted.getLabels('s390x'), {'dasd', 'msdos'}) self.assertSetEqual(parted.getLabels('sparc'), {'sun'}) self.assertSetEqual(parted.getLabels('sparc64'), {'sun'}) self.assertSetEqual(parted.getLabels('ppc'), {'amiga', 'gpt', 'mac', 'msdos'}) self.assertSetEqual(parted.getLabels('ppc64'), {'amiga', 'gpt', 'mac', 'msdos'}) self.assertSetEqual(parted.getLabels('alpha'), {'bsd', 'msdos'}) self.assertSetEqual(parted.getLabels('ia64'), {'gpt', 'msdos'}) class GetDeviceTestCase(RequiresDeviceNode): def runTest(self): # Check that a DiskException is raised for an invalid path self.assertRaises(parted.DeviceException, parted.getDevice, None) self.assertRaises(parted.IOException, parted.getDevice, "") self.assertRaises(parted.IOException, parted.getDevice, "/dev/whatever") # Check that we get a parted.Device back self.assert_(isinstance(parted.getDevice(self.path), parted.Device)) # Make sure the device node paths match self.assertTrue(parted.getDevice(self.path).path == self.path) class GetAllDevicesTestCase(unittest.TestCase): def setUp(self): self.devices = parted.getAllDevices() def runTest(self): # Check self.devices and see that it's a list self.assertTrue(type(self.devices).__name__ == 'list') # And make sure each element of the list is a parted.Device map(lambda s: self.assert_(isinstance(s, parted.Device)), self.devices) @unittest.skip("Unimplemented test case.") class ProbeForSpecificFileSystemTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ProbeFileSystemTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class FreshDiskTestCase(RequiresDevice): def runTest(self): # Make sure we get SyntaxError when using an invalid disk type self.assertRaises(KeyError, parted.freshDisk, self._device, 'cheese') self.assertRaises(KeyError, parted.freshDisk, self._device, 'crackers') # Create a new disk for each disk type key, verify each one # XXX: Skip over dvh for now (SGI disk label), which doesn't seem to have # working libparted support. If anyone with an SGI cares, patches welcome. for key in parted.diskType.keys(): if key == 'dvh': continue disk = parted.freshDisk(self._device, key) self.assert_(isinstance(disk, parted.Disk)) self.assertTrue(disk.type == key) # Create a new disk each disk type value, verify each one for value in parted.diskType.values(): if value.name == 'dvh': continue disk = parted.freshDisk(self._device, value) self.assert_(isinstance(disk, parted.Disk)) self.assertTrue(parted.diskType[disk.type] == value) @unittest.skip("Unimplemented test case.") class IsAlignToCylindersTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ToggleAlignToCylindersTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class VersionTestCase(unittest.TestCase): def runTest(self): ver = parted.version() self.assertEquals(ver['libparted'], _ped.libparted_version()) self.assertEquals(ver['pyparted'], _ped.pyparted_version()) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(FormatBytesTestCase()) suite.addTest(BytesToSectorsTestCase()) suite.addTest(GetLabelsTestCase()) suite.addTest(GetDeviceTestCase()) suite.addTest(GetAllDevicesTestCase()) suite.addTest(ProbeForSpecificFileSystemTestCase()) suite.addTest(ProbeFileSystemTestCase()) suite.addTest(FreshDiskTestCase()) suite.addTest(IsAlignToCylindersTestCase()) suite.addTest(ToggleAlignToCylindersTestCase()) suite.addTest(VersionTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/test_constraint.py0000775000076400007640000001601411542323322016365 00000000000000# # Test cases for the methods in the parted.constraint module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class ConstraintNewTestCase(RequiresDevice): def runTest(self): align1 = parted.Alignment(offset=10, grainSize=5) align2 = parted.Alignment(offset=10, grainSize=5) geom1 = parted.Geometry(device=self._device, start=0, length=50) geom2 = parted.Geometry(device=self._device, start=0, length=100) # Check that not passing enough args to parted.Constraint.__init__ # is caught. self.assertRaises(parted.ConstraintException, parted.Constraint) self.assertRaises(parted.ConstraintException, parted.Constraint, startAlign=align1, endAlign=align2) # And then the correct ways of creating a _ped.Constraint. c = parted.Constraint(minGeom=geom1, maxGeom=geom2) self.assert_(isinstance(c, parted.Constraint)) c = parted.Constraint(minGeom=geom1) self.assert_(isinstance(c, parted.Constraint)) c = parted.Constraint(maxGeom=geom2) self.assert_(isinstance(c, parted.Constraint)) c = parted.Constraint(exactGeom=geom1) self.assert_(isinstance(c, parted.Constraint)) c = parted.Constraint(device=self._device) self.assert_(isinstance(c, parted.Constraint)) c = parted.Constraint(startAlign=align1, endAlign=align2, startRange=geom1, endRange=geom2, minSize=10, maxSize=100) self.assert_(isinstance(c, parted.Constraint)) # Use a _ped.Constraint as the initializer pc = _ped.Constraint(align1.getPedAlignment(), align2.getPedAlignment(), geom1.getPedGeometry(), geom2.getPedGeometry(), 10, 100) c = parted.Constraint(PedConstraint=pc) self.assert_(isinstance(c, parted.Constraint)) self.assertTrue(c.getPedConstraint() == pc) class ConstraintGetSetTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) align1 = parted.Alignment(offset=10, grainSize=5) align2 = parted.Alignment(offset=10, grainSize=5) geom1 = parted.Geometry(device=self._device, start=0, length=50) geom2 = parted.Geometry(device=self._device, start=25, length=50) self.c = parted.Constraint(startAlign=align1, endAlign=align2, startRange=geom1, endRange=geom2, minSize=10, maxSize=100) def runTest(self): # Test that properties work self.assert_(self.c.minSize == 10) self.assert_(self.c.maxSize == 100) self.assert_(isinstance(self.c.startAlign, parted.Alignment)) self.assert_(isinstance(self.c.endAlign, parted.Alignment)) self.assert_(isinstance(self.c.startRange, parted.Geometry)) self.assert_(isinstance(self.c.endRange, parted.Geometry)) # Test that setting directly and getting with getattr works. self.c.minSize = 15 self.c.maxSize = 75 self.assert_(getattr(self.c, "minSize") == 15) self.assert_(getattr(self.c, "maxSize") == 75) self.assert_(isinstance(getattr(self.c, "startAlign"), parted.Alignment)) self.assert_(isinstance(getattr(self.c, "endAlign"), parted.Alignment)) self.assert_(isinstance(getattr(self.c, "startRange"), parted.Geometry)) self.assert_(isinstance(getattr(self.c, "endRange"), parted.Geometry)) # Test that setting with setattr and getting directly works. setattr(self.c, "minSize", 10) setattr(self.c, "maxSize", 90) self.assert_(self.c.minSize == 10) self.assert_(self.c.maxSize == 90) # Test that values have the right type. self.assertRaises(TypeError, setattr, self.c, "minSize", "string") # Test that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self.c, "blah") self.assertRaises(AttributeError, setattr, self.c, "startRange", 47) self.assertRaises(AttributeError, setattr, self.c, "endRange", 47) @unittest.skip("Unimplemented test case.") class ConstraintIntersectTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ConstraintSolveMaxTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ConstraintSolveNearestTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ConstraintIsSolutionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ConstraintGetPedConstraintTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class ConstraintStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(ConstraintNewTestCase()) suite.addTest(ConstraintGetSetTestCase()) suite.addTest(ConstraintIntersectTestCase()) suite.addTest(ConstraintSolveMaxTestCase()) suite.addTest(ConstraintSolveNearestTestCase()) suite.addTest(ConstraintIsSolutionTestCase()) suite.addTest(ConstraintGetPedConstraintTestCase()) suite.addTest(ConstraintStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/test_disk.py0000775000076400007640000002473611540274274015156 00000000000000# # Test cases for the methods in the parted.disk module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class DiskNewTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetSetTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskClobberTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDuplicateTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDestroyTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskCommitTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskCommitToDeviceTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskCommitToOSTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskCheckTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskSupportsFeatureTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskAddPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskRemovePartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDeletePartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDeleteAllPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskSetPartitionGeometryTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskMaximizePartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskCalculateMaxPartitionGeometryTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskMinimizeExtendedPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPartitionBySectorTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetMaxLogicalPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetMaxSupportedPartitionCountTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class DiskMaxPartitionLengthTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self.assertEquals(self._disk.maxPartitionLength, 4294967295L) class DiskMaxPartitionStartSectorTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self.assertEquals(self._disk.maxPartitionStartSector, 4294967295L) class DiskGetFlagTestCase(RequiresDisk): def runTest(self): flag = self._disk.getFlag(parted.DISK_CYLINDER_ALIGNMENT) self.assertTrue(isinstance(flag, bool)) class DiskSetFlagTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self._disk.setFlag(parted.DISK_CYLINDER_ALIGNMENT) flag = self._disk.getFlag(parted.DISK_CYLINDER_ALIGNMENT) self.assertEquals(flag, True) class DiskUnsetFlagTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self._disk.unsetFlag(parted.DISK_CYLINDER_ALIGNMENT) flag = self._disk.getFlag(parted.DISK_CYLINDER_ALIGNMENT) self.assertEquals(flag, False) class DiskIsFlagAvailableTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk available = self._disk.isFlagAvailable(parted.DISK_CYLINDER_ALIGNMENT) self.assertEquals(available, True) @unittest.skip("Unimplemented test case.") class DiskGetExtendedPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetLogicalPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPrimaryPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetRaidPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetLVMPartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetFreeSpaceRegionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetFreeSpacePartitionsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetFirstPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPartitionByPathTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPedDiskTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(DiskNewTestCase()) suite.addTest(DiskGetSetTestCase()) suite.addTest(DiskClobberTestCase()) suite.addTest(DiskDuplicateTestCase()) suite.addTest(DiskDestroyTestCase()) suite.addTest(DiskCommitTestCase()) suite.addTest(DiskCommitToDeviceTestCase()) suite.addTest(DiskCommitToOSTestCase()) suite.addTest(DiskCheckTestCase()) suite.addTest(DiskSupportsFeatureTestCase()) suite.addTest(DiskAddPartitionTestCase()) suite.addTest(DiskRemovePartitionTestCase()) suite.addTest(DiskDeletePartitionTestCase()) suite.addTest(DiskDeleteAllPartitionsTestCase()) suite.addTest(DiskSetPartitionGeometryTestCase()) suite.addTest(DiskMaximizePartitionTestCase()) suite.addTest(DiskCalculateMaxPartitionGeometryTestCase()) suite.addTest(DiskMinimizeExtendedPartitionTestCase()) suite.addTest(DiskGetPartitionBySectorTestCase()) suite.addTest(DiskGetMaxLogicalPartitionsTestCase()) suite.addTest(DiskGetMaxSupportedPartitionCountTestCase()) suite.addTest(DiskMaxPartitionLengthTestCase()) suite.addTest(DiskMaxPartitionStartSectorTestCase()) suite.addTest(DiskGetFlagTestCase()) suite.addTest(DiskSetFlagTestCase()) suite.addTest(DiskUnsetFlagTestCase()) suite.addTest(DiskIsFlagAvailableTestCase()) suite.addTest(DiskGetExtendedPartitionTestCase()) suite.addTest(DiskGetLogicalPartitionsTestCase()) suite.addTest(DiskGetPrimaryPartitionsTestCase()) suite.addTest(DiskGetRaidPartitionsTestCase()) suite.addTest(DiskGetLVMPartitionsTestCase()) suite.addTest(DiskGetFreeSpaceRegionsTestCase()) suite.addTest(DiskGetFreeSpacePartitionsTestCase()) suite.addTest(DiskGetFirstPartitionTestCase()) suite.addTest(DiskGetPartitionByPathTestCase()) suite.addTest(DiskGetPedDiskTestCase()) suite.addTest(DiskStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/Makefile.in0000664000076400007640000003437311542323606014647 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for tests/parted # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tests/parted DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = # If stdout is a non-dumb tty, use colors. If test -t is not supported, # then this fails; a conservative approach. Of course do not redirect # stdout here, just stderr. am__tty_colors = \ red=; grn=; lgn=; blu=; std=; \ test "X$(AM_COLOR_TESTS)" != Xno \ && test "X$$TERM" != Xdumb \ && { test "X$(AM_COLOR_TESTS)" = Xalways || test -t 1 2>/dev/null; } \ && { \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ std=''; \ } DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = baseclass.py $(TESTS) MAINTAINERCLEANFILES = Makefile.in *.pyc TESTS_ENVIRONMENT = PYTHONPATH=$(top_builddir)/src/.libs:$(top_builddir)/src $(PYTHON) TESTS = test_parted.py \ test_alignment.py \ test_constraint.py \ test_device.py \ test_disk.py \ test_filesystem.py \ test_geometry.py \ test_partition.py all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/parted/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tests/parted/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/tests/parted/baseclass.py0000664000076400007640000000361011536234551015105 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted import os import tempfile import unittest # Base class for any test case that requires a temp device node class RequiresDeviceNode(unittest.TestCase): def setUp(self): (fd, self.path) = tempfile.mkstemp(prefix="temp-device-") f = os.fdopen(fd) f.seek(140000) os.write(fd, "0") def tearDown(self): os.unlink(self.path) # Base class for any test case that requires a parted.Device object first. class RequiresDevice(RequiresDeviceNode): def setUp(self): RequiresDeviceNode.setUp(self) self._device = parted.getDevice(self.path) # Base class for any test case that requires a parted.Disk. class RequiresDisk(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) pd = _ped.disk_new_fresh(self._device.getPedDevice(), parted.diskType["msdos"]) self._disk = parted.Disk(PedDisk=pd) pyparted-3.6/tests/parted/test_partition.py0000775000076400007640000001242511540274274016225 00000000000000# # Test cases for the methods in the parted.partition module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class PartitionNewTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetSetTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetFlagTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionSetFlagTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionUnsetFlagTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetMaxGeometryTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionIsFlagAvailableTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionNextPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetSizeTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class PartitionGetLengthTestCase(RequiresDisk): def runTest(self): length = 100 geom = parted.Geometry(self._device, start=100, length=length) part = parted.Partition(self._disk, parted.PARTITION_NORMAL, geometry=geom) constraint = parted.Constraint(exactGeom=geom) self._disk.addPartition(part, constraint) self._disk.commit() part = self._disk.partitions[0] self.assertEqual(part.getLength(), part.geometry.length) self.assertEqual(part.getLength(), length) @unittest.skip("Unimplemented test case.") class PartitionGetFlagsAsStringTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetMaxAvailableSizeTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetDeviceNodeNameTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetPedPartitionTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(PartitionNewTestCase()) suite.addTest(PartitionGetSetTestCase()) suite.addTest(PartitionGetFlagTestCase()) suite.addTest(PartitionSetFlagTestCase()) suite.addTest(PartitionUnsetFlagTestCase()) suite.addTest(PartitionGetMaxGeometryTestCase()) suite.addTest(PartitionIsFlagAvailableTestCase()) suite.addTest(PartitionNextPartitionTestCase()) suite.addTest(PartitionGetSizeTestCase()) suite.addTest(PartitionGetLengthTestCase()) suite.addTest(PartitionGetFlagsAsStringTestCase()) suite.addTest(PartitionGetMaxAvailableSizeTestCase()) suite.addTest(PartitionGetDeviceNodeNameTestCase()) suite.addTest(PartitionGetPedPartitionTestCase()) suite.addTest(PartitionStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/Makefile.am0000664000076400007640000000257111536234551014634 00000000000000# # Makefile.am for tests/parted # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # EXTRA_DIST = baseclass.py $(TESTS) MAINTAINERCLEANFILES = Makefile.in *.pyc TESTS_ENVIRONMENT = PYTHONPATH=$(top_builddir)/src/.libs:$(top_builddir)/src $(PYTHON) TESTS = test_parted.py \ test_alignment.py \ test_constraint.py \ test_device.py \ test_disk.py \ test_filesystem.py \ test_geometry.py \ test_partition.py pyparted-3.6/tests/parted/test_geometry.py0000775000076400007640000001213511540274274016045 00000000000000# # Test cases for the methods in the parted.geometry module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class GeometryNewTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryGetSetTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryCheckTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryContainsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryContainsSectorTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryEqualTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryGetSizeTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class GeometryGetLengthTestCase(RequiresDevice): def runTest(self): length = 137 geom = parted.Geometry(self._device, start=100, length=length) self.assertEqual(geom.getLength(), geom.length) self.assertEqual(geom.getLength(), length) @unittest.skip("Unimplemented test case.") class GeometryIntersectTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryMapTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryOverlapsWithTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryReadTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometrySyncTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryWriteTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryGetPedGeometryTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class GeometryStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(GeometryNewTestCase()) suite.addTest(GeometryGetSetTestCase()) suite.addTest(GeometryCheckTestCase()) suite.addTest(GeometryContainsTestCase()) suite.addTest(GeometryContainsSectorTestCase()) suite.addTest(GeometryEqualTestCase()) suite.addTest(GeometryGetSizeTestCase()) suite.addTest(GeometryGetLengthTestCase()) suite.addTest(GeometryIntersectTestCase()) suite.addTest(GeometryMapTestCase()) suite.addTest(GeometryOverlapsWithTestCase()) suite.addTest(GeometryReadTestCase()) suite.addTest(GeometrySyncTestCase()) suite.addTest(GeometryWriteTestCase()) suite.addTest(GeometryGetPedGeometryTestCase()) suite.addTest(GeometryStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/test_alignment.py0000775000076400007640000001376311540274274016200 00000000000000# # Test cases for the methods in the parted.alignment module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class AlignmentNewTestCase(unittest.TestCase): def setUp(self): self.pa = _ped.Alignment(0, 100) def runTest(self): # Check that not passing args to parted.Alignment.__init__ is caught. self.assertRaises(parted.AlignmentException, parted.Alignment) # And then the correct ways of creating a parted.Alignment a = parted.Alignment(offset=0, grainSize=100) self.assert_(isinstance(a, parted.Alignment)) b = parted.Alignment(PedAlignment=self.pa) self.assert_(isinstance(b, parted.Alignment)) # Test for _ped.Alignment equality self.assertTrue(b.getPedAlignment() == self.pa) class AlignmentGetSetTestCase(unittest.TestCase): def setUp(self): self.a = parted.Alignment(offset=27, grainSize=49) def runTest(self): # Test that passing the args to __init__ works. self.assert_(isinstance(self.a, parted.Alignment)) self.assert_(self.a.offset == 27) self.assert_(self.a.grainSize == 49) # Test that setting directly and getting with getattr works. self.a.offset = 10 self.a.grainSize = 90 self.assert_(getattr(self.a, "offset") == 10) self.assert_(getattr(self.a, "grainSize") == 90) # Check that setting with setattr and getting directly works. setattr(self.a, "offset", 20) setattr(self.a, "grainSize", 80) self.assert_(self.a.offset == 20) self.assert_(self.a.grainSize == 80) # Check that values have the right type. self.assertRaises(TypeError, setattr, self.a, "offset", "string") # Check that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self.a, "blah") @unittest.skip("Unimplemented test case.") class AlignmentIntersectTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class AlignmentAlignUpTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class AlignmentAlignDownTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class AlignmentAlignNearestTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class AlignmentIsAlignedTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = parted.Geometry(device=self._device, start=0, length=100) self.a = parted.Alignment(offset=10, grainSize=0) def runTest(self): # Test a couple ways of passing bad arguments. self.assertRaises(TypeError, self.a.isAligned, None, 12) self.assertRaises(TypeError, self.a.isAligned, self.g, None) # Sector must be inside the geometry. self.assert_(self.a.isAligned(self.g, 400) == False) # If grain_size is 0, sector must be the same as offset. self.assert_(self.a.isAligned(self.g, 10) == True) self.assert_(self.a.isAligned(self.g, 0) == False) self.assert_(self.a.isAligned(self.g, 47) == False) # If grain_size is anything else, there's real math involved. self.a.grainSize = 5 self.assert_(self.a.isAligned(self.g, 20) == True) self.assert_(self.a.isAligned(self.g, 23) == False) class AlignmentGetPedAlignmentTestCase(unittest.TestCase): def setUp(self): self.pa = _ped.Alignment(0, 100) self.alignment = parted.Alignment(PedAlignment=self.pa) def runTest(self): # Test to make sure we get a _ped.Alignment self.assert_(isinstance(self.alignment.getPedAlignment(), _ped.Alignment)) # Test for _ped.Alignment equality self.assertTrue(self.alignment.getPedAlignment() == self.pa) @unittest.skip("Unimplemented test case.") class AlignmentStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(AlignmentNewTestCase()) suite.addTest(AlignmentGetSetTestCase()) suite.addTest(AlignmentIntersectTestCase()) suite.addTest(AlignmentAlignUpTestCase()) suite.addTest(AlignmentAlignDownTestCase()) suite.addTest(AlignmentAlignNearestTestCase()) suite.addTest(AlignmentIsAlignedTestCase()) suite.addTest(AlignmentGetPedAlignmentTestCase()) suite.addTest(AlignmentStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/parted/test_device.py0000775000076400007640000001455211540274274015456 00000000000000# # Test cases for the methods in the parted.device module itself # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class DeviceNewTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceGetSetTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceOpenTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceCloseTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceDestroyTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceRemoveFromCacheTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceBeginExternalAccessTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceEndExternalAccessTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceReadTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceWriteTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceSyncTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceCheckTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceStartSectorToCylinderTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceEndSectorToCylinderTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceStartCylinderToSectorTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceEndCylinderToSectorTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceGetSizeTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class DeviceGetLengthTestCase(RequiresDevice): def runTest(self): self.assertEqual(self._device.getLength(), self._device.length) @unittest.skip("Unimplemented test case.") class DeviceGetSizeAsSectorsTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceGetConstraintTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceGetPedDeviceTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(DeviceNewTestCase()) suite.addTest(DeviceGetSetTestCase()) suite.addTest(DeviceOpenTestCase()) suite.addTest(DeviceCloseTestCase()) suite.addTest(DeviceDestroyTestCase()) suite.addTest(DeviceRemoveFromCacheTestCase()) suite.addTest(DeviceBeginExternalAccessTestCase()) suite.addTest(DeviceEndExternalAccessTestCase()) suite.addTest(DeviceReadTestCase()) suite.addTest(DeviceWriteTestCase()) suite.addTest(DeviceSyncTestCase()) suite.addTest(DeviceCheckTestCase()) suite.addTest(DeviceStartSectorToCylinderTestCase()) suite.addTest(DeviceEndSectorToCylinderTestCase()) suite.addTest(DeviceStartCylinderToSectorTestCase()) suite.addTest(DeviceEndCylinderToSectorTestCase()) suite.addTest(DeviceGetSizeTestCase()) suite.addTest(DeviceGetLengthTestCase()) suite.addTest(DeviceGetSizeAsSectorsTestCase()) suite.addTest(DeviceGetConstraintTestCase()) suite.addTest(DeviceGetPedDeviceTestCase()) suite.addTest(DeviceStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/0000775000076400007640000000000011542323615012300 500000000000000pyparted-3.6/tests/_ped/test_filesystem.py0000775000076400007640000000713011540274274016025 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # import _ped import unittest # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. @unittest.skip("Unimplemented test case.") class FileSystemNewTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemGetSetTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemClobberTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCreateTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCloseTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCheckTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemCopyTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemResizeTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemGetResizeConstraintTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class FileSystemStrTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(FileSystemNewTestCase()) suite.addTest(FileSystemGetSetTestCase()) suite.addTest(FileSystemClobberTestCase()) suite.addTest(FileSystemCreateTestCase()) suite.addTest(FileSystemCloseTestCase()) suite.addTest(FileSystemCheckTestCase()) suite.addTest(FileSystemCopyTestCase()) suite.addTest(FileSystemResizeTestCase()) suite.addTest(FileSystemGetResizeConstraintTestCase()) suite.addTest(FileSystemStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_ped.py0000775000076400007640000004671311540542030014407 00000000000000# # Test cases for the methods in the _ped module itself - just the pyunit # and pynatmath files. # # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest import os import tempfile from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class PartitionFlagGetNameTestCase(unittest.TestCase): def runTest(self): for f in [_ped.PARTITION_BOOT, _ped.PARTITION_ROOT, _ped.PARTITION_SWAP, _ped.PARTITION_HIDDEN, _ped.PARTITION_RAID, _ped.PARTITION_LVM, _ped.PARTITION_LBA, _ped.PARTITION_HPSERVICE, _ped.PARTITION_PALO, _ped.PARTITION_PREP, _ped.PARTITION_MSFT_RESERVED, _ped.PARTITION_APPLE_TV_RECOVERY, _ped.PARTITION_BIOS_GRUB, _ped.PARTITION_DIAG]: self.assertNotEquals(_ped.partition_flag_get_name(f), "", "Could not get name for flag %s" % f) self.assertRaises(ValueError, _ped.partition_flag_get_name, -1) self.assertRaises(ValueError, _ped.partition_flag_get_name, 1000) class PartitionFlagGetByNameTestCase(unittest.TestCase): def runTest(self): for f in ["boot", "root", "swap", "hidden", "raid", "lvm", "lba", "hp-service", "palo", "prep", "msftres", "bios_grub"]: self.assertNotEquals(_ped.partition_flag_get_by_name(f), "", "Could not get flag %s" % f) self.assertEquals(_ped.partition_flag_get_by_name("nosuchflag"), 0) class PartitionFlagNextTestCase(unittest.TestCase): def runTest(self): # We should get TypeError when the parameter is invalid self.assertRaises(TypeError, _ped.partition_flag_next) self.assertRaises(TypeError, _ped.partition_flag_next, 'blah') # First flag is 0, keep getting flags until we loop back around # to zero. Make sure each flag we get is an integer. flag = _ped.partition_flag_next(0) self.assertEquals(type(flag).__name__, 'int') while True: flag = _ped.partition_flag_next(flag) if not flag: break self.assertEquals(type(flag).__name__, 'int') class DiskFlagGetNameTestCase(unittest.TestCase): def runTest(self): for f in [_ped.DISK_CYLINDER_ALIGNMENT]: self.assertNotEquals(_ped.disk_flag_get_name(f), "", "Could not get name for flag %s" % f) self.assertRaises(ValueError, _ped.disk_flag_get_name, -1) self.assertRaises(ValueError, _ped.disk_flag_get_name, 1000) class DiskFlagGetByNameTestCase(unittest.TestCase): def runTest(self): for f in ["cylinder_alignment"]: self.assertNotEquals(_ped.disk_flag_get_by_name(f), 0, "Could not get flag %s" % f) self.assertEquals(_ped.disk_flag_get_by_name("nosuchflag"), 0) class DiskFlagNextTestCase(unittest.TestCase): def runTest(self): # We should get TypeError when the parameter is invalid self.assertRaises(TypeError, _ped.disk_flag_next) self.assertRaises(TypeError, _ped.disk_flag_next, 'blah') # First flag is 0, keep getting flags until we loop back around # to zero. Make sure each flag we get is an integer. flag = _ped.disk_flag_next(0) self.assertEquals(type(flag).__name__, 'int') while True: flag = _ped.disk_flag_next(flag) if not flag: break self.assertEquals(type(flag).__name__, 'int') class ConstraintNewFromMinMaxTestCase(RequiresDevice): def runTest(self): self.assertRaises(TypeError, _ped.constraint_new_from_min_max, None) # min is required to be within max, so test various combinations of # that not being the case. self.assertRaises(_ped.CreateException, _ped.constraint_new_from_min_max, _ped.Geometry(self._device, 0, 10), _ped.Geometry(self._device, 15, 25)) self.assertRaises(_ped.CreateException, _ped.constraint_new_from_min_max, _ped.Geometry(self._device, 10, 20), _ped.Geometry(self._device, 15, 25)) # Now test a correct call. min = _ped.Geometry(self._device, 10, 20) max = _ped.Geometry(self._device, 0, 30) constraint = _ped.constraint_new_from_min_max(min, max) self.assertTrue(isinstance(constraint, _ped.Constraint)) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 10, 20))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 11, 20))) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 5, 25))) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 0, 30))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 0, 35))) class ConstraintNewFromMinTestCase(RequiresDevice): def runTest(self): self.assertRaises(TypeError, _ped.constraint_new_from_min, None) min = _ped.Geometry(self._device, 10, 20) constraint = _ped.constraint_new_from_min(min) self.assertTrue(isinstance(constraint, _ped.Constraint)) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 10, 20))) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 5, 25))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 11, 19))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 15, 25))) class ConstraintNewFromMaxTestCase(RequiresDevice): def runTest(self): self.assertRaises(TypeError, _ped.constraint_new_from_max, None) max = _ped.Geometry(self._device, 10, 20) constraint = _ped.constraint_new_from_max(max) self.assertTrue(isinstance(constraint, _ped.Constraint)) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 10, 20))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 5, 25))) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 11, 19))) self.assertFalse(constraint.is_solution(_ped.Geometry(self._device, 15, 25))) class ConstraintAnyTestCase(RequiresDevice): def runTest(self): self.assertRaises(TypeError, _ped.constraint_any, None) constraint = _ped.constraint_any(self._device) self.assertTrue(isinstance(constraint, _ped.Constraint)) for testGeom in [_ped.Geometry(self._device, 0, 5), _ped.Geometry(self._device, 10, 25), _ped.Geometry(self._device, 0, 100)]: self.assertTrue(constraint.is_solution(testGeom)) class ConstraintExactTestCase(RequiresDevice): def runTest(self): geom = _ped.Geometry(self._device, 0, 100) self.assertRaises(TypeError, _ped.constraint_exact, None) constraint = _ped.constraint_exact(geom) self.assertTrue(isinstance(constraint, _ped.Constraint)) for testGeom in [_ped.Geometry(self._device, 1, 100), _ped.Geometry(self._device, 0, 99), _ped.Geometry(self._device, 10, 20), _ped.Geometry(self._device, 50, 101)]: self.assertFalse(constraint.is_solution(testGeom)) self.assertTrue(constraint.is_solution(_ped.Geometry(self._device, 0, 100))) class DeviceGetTestCase(RequiresDevice): def runTest(self): # Try getting the device we just made. self.assertTrue(isinstance(_ped.device_get(self.path), _ped.Device)) # Try getting a device that doesn't exist. self.assertRaises(_ped.IOException, _ped.device_get, "/blah/whatever") self.assertRaises(_ped.IOException, _ped.device_get, "") self.assertRaises(_ped.DeviceException, _ped.device_get, None) class DeviceGetNextTestCase(unittest.TestCase, BuildList): def runTest(self): # Make sure there are some devices in the system first and then # make a list out of them. That's easier to work with. _ped.device_probe_all() lst = self.getDeviceList(_ped.device_get_next) # Now the test cases. self.assertGreater(len(lst), 0) self.assertRaises(TypeError, _ped.device_get_next, None) for ele in lst: self.assertTrue(isinstance(ele, _ped.Device)) self.assertRaises(IndexError, _ped.device_get_next, lst[-1]) class DeviceProbeAllTestCase(RequiresDevice, BuildList): def runTest(self): # Since we inherit from RequiresDevice, we can test that the temp # device we created is in the results list. I can't really think of # any other way to test this method except by getting a list of devices # via some other mechanism and comparing that to the device_probe_all # results. _ped.device_probe_all() lst = self.getDeviceList(_ped.device_get_next) self.assertGreater(len(lst), 0) self.assertGreater(len(filter(lambda e: e.path.startswith("/tmp/temp-device-"), lst)), 0) class DeviceFreeAllTestCase(RequiresDevice): def runTest(self): _ped.device_probe_all() self.assertEquals(_ped.device_free_all(), None) class DiskTypeGetTestCase(unittest.TestCase): def runTest(self): for d in ["aix", "amiga", "bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98","sun"]: t = _ped.disk_type_get(d) self.assertTrue(isinstance(t, _ped.DiskType)) self.assertEquals(t.name, d) self.assertRaises(_ped.UnknownTypeException, _ped.disk_type_get, "nosuch") class DiskTypeGetNextTestCase(unittest.TestCase, BuildList): def runTest(self): lst = self.getDeviceList(_ped.disk_type_get_next) self.assertGreater(len(lst), 0) self.assertRaises(TypeError, _ped.device_get_next, None) for ele in lst: self.assertTrue(isinstance(ele, _ped.DiskType)) self.assertRaises(IndexError, _ped.disk_type_get_next, lst[-1]) class FileSystemProbeTestCase(RequiresFileSystem): def runTest(self): type = _ped.file_system_probe(self._geometry) for name in self._fileSystemType.keys(): if name == 'ext2': self.assertEquals(type.name, name) else: self.assertNotEquals(type.name, name) class FileSystemProbeSpecificTestCase(RequiresFileSystem): def runTest(self): for (name, type,) in self._fileSystemType.items(): if name == 'ext2': result = _ped.file_system_probe_specific(type, self._geometry) # XXX: this should work # we're getting # ValueError: object comparing to must be a _ped.Geometry # at runtime. works fine in pdb. #self.assertEquals(result, self._geometry) self.assertTrue(isinstance(result, _ped.Geometry)) self.assertEquals(result.start, self._geometry.start) self.assertEquals(result.end, self._geometry.end) self.assertEquals(result.length, self._geometry.length) self.assertEquals(result.dev, self._device) else: result = _ped.file_system_probe_specific(type, self._geometry) self.assertEquals(result, None) class FileSystemTypeGetTestCase(unittest.TestCase): def runTest(self): for f in ["affs0", "amufs", "apfs1", "asfs", "ext2", "ext3", "fat16", "fat32", "hfs", "hfs+", "hfsx", "hp-ufs", "jfs", "linux-swap", "ntfs", "reiserfs", "sun-ufs", "xfs"]: self.assertTrue(isinstance(_ped.file_system_type_get(f), _ped.FileSystemType), "Could not get fs type %s" % f) self.assertRaises(_ped.UnknownTypeException, _ped.file_system_type_get, "nosuch") class FileSystemTypeGetNextTestCase(unittest.TestCase, BuildList): def runTest(self): lst = self.getDeviceList(_ped.file_system_type_get_next) self.assertGreater(len(lst), 0) self.assertRaises(TypeError, _ped.file_system_type_get_next, None) for ele in lst: self.assertTrue(isinstance(ele, _ped.FileSystemType)) self.assertRaises(IndexError, _ped.file_system_type_get_next, lst[-1]) class PartitionTypeGetNameTestCase(unittest.TestCase): def runTest(self): for t in [_ped.PARTITION_METADATA, _ped.PARTITION_FREESPACE, _ped.PARTITION_EXTENDED, _ped.PARTITION_LOGICAL]: self.assertNotEquals(_ped.partition_type_get_name(t), "", "Could not get name for flag %s" % t) class UnitSetDefaultTestCase(unittest.TestCase): def setUp(self): self._initialDefault = _ped.unit_get_default() def tearDown(self): _ped.unit_set_default(self._initialDefault) def runTest(self): for v in [_ped.UNIT_BYTE, _ped.UNIT_CHS, _ped.UNIT_COMPACT, _ped.UNIT_CYLINDER, _ped.UNIT_GIBIBYTE, _ped.UNIT_GIGABYTE, _ped.UNIT_KIBIBYTE, _ped.UNIT_KILOBYTE, _ped.UNIT_MEBIBYTE, _ped.UNIT_MEGABYTE, _ped.UNIT_PERCENT, _ped.UNIT_SECTOR, _ped.UNIT_TEBIBYTE, _ped.UNIT_TERABYTE]: _ped.unit_set_default(v) self.assertEquals(_ped.unit_get_default(), v, "Could not set unit default to %s" % v) self.assertRaises(ValueError, _ped.unit_set_default, -1) self.assertRaises(ValueError, _ped.unit_set_default, 1000) class UnitGetDefaultTestCase(unittest.TestCase): def runTest(self): self.assertGreaterEqual(_ped.unit_get_default(), 0) class UnitGetSizeTestCase(RequiresDevice): def runTest(self): self.assertEquals(self._device.unit_get_size(_ped.UNIT_SECTOR), 512) self.assertEquals(self._device.unit_get_size(_ped.UNIT_BYTE), 1) self.assertEquals(self._device.unit_get_size(_ped.UNIT_KILOBYTE), 1000) self.assertEquals(self._device.unit_get_size(_ped.UNIT_MEGABYTE), 1000000) self.assertEquals(self._device.unit_get_size(_ped.UNIT_GIGABYTE), 1000000000) self.assertEquals(self._device.unit_get_size(_ped.UNIT_TERABYTE), 1000000000000) self.assertEquals(self._device.unit_get_size(_ped.UNIT_KIBIBYTE), 1024) self.assertEquals(self._device.unit_get_size(_ped.UNIT_MEBIBYTE), 1048576) self.assertEquals(self._device.unit_get_size(_ped.UNIT_GIBIBYTE), 1073741824) self.assertEquals(self._device.unit_get_size(_ped.UNIT_TEBIBYTE), 1099511627776) self.assertEquals(self._device.unit_get_size(_ped.UNIT_CYLINDER), 65536) self.assertEquals(self._device.unit_get_size(_ped.UNIT_CHS), 512) self.assertEquals(self._device.unit_get_size(_ped.UNIT_PERCENT), self._device.length * self._device.sector_size / 100) self.assertRaises(ValueError, self._device.unit_get_size, _ped.UNIT_COMPACT) class UnitGetNameTestCase(unittest.TestCase): def runTest(self): self.assertEquals(_ped.unit_get_name(_ped.UNIT_BYTE), 'B') self.assertEquals(_ped.unit_get_name(_ped.UNIT_CHS), 'chs') self.assertEquals(_ped.unit_get_name(_ped.UNIT_COMPACT), 'compact') self.assertEquals(_ped.unit_get_name(_ped.UNIT_CYLINDER), 'cyl') self.assertEquals(_ped.unit_get_name(_ped.UNIT_GIBIBYTE), 'GiB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_GIGABYTE), 'GB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_KIBIBYTE), 'kiB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_KILOBYTE), 'kB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_MEBIBYTE), 'MiB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_MEGABYTE), 'MB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_PERCENT), '%') self.assertEquals(_ped.unit_get_name(_ped.UNIT_SECTOR), 's') self.assertEquals(_ped.unit_get_name(_ped.UNIT_TEBIBYTE), 'TiB') self.assertEquals(_ped.unit_get_name(_ped.UNIT_TERABYTE), 'TB') self.assertRaises(ValueError, _ped.unit_get_name, -1) self.assertRaises(ValueError, _ped.unit_get_name, 1000) class UnitGetByNameTestCase(unittest.TestCase): def runTest(self): self.assertEquals(_ped.unit_get_by_name('B'), _ped.UNIT_BYTE) self.assertEquals(_ped.unit_get_by_name('chs'), _ped.UNIT_CHS) self.assertEquals(_ped.unit_get_by_name('compact'), _ped.UNIT_COMPACT) self.assertEquals(_ped.unit_get_by_name('cyl'), _ped.UNIT_CYLINDER) self.assertEquals(_ped.unit_get_by_name('GiB'), _ped.UNIT_GIBIBYTE) self.assertEquals(_ped.unit_get_by_name('GB'), _ped.UNIT_GIGABYTE) self.assertEquals(_ped.unit_get_by_name('kiB'), _ped.UNIT_KIBIBYTE) self.assertEquals(_ped.unit_get_by_name('kB'), _ped.UNIT_KILOBYTE) self.assertEquals(_ped.unit_get_by_name('MiB'), _ped.UNIT_MEBIBYTE) self.assertEquals(_ped.unit_get_by_name('MB'), _ped.UNIT_MEGABYTE) self.assertEquals(_ped.unit_get_by_name('%'), _ped.UNIT_PERCENT) self.assertEquals(_ped.unit_get_by_name('s'), _ped.UNIT_SECTOR) self.assertEquals(_ped.unit_get_by_name('TiB'), _ped.UNIT_TEBIBYTE) self.assertEquals(_ped.unit_get_by_name('TB'), _ped.UNIT_TERABYTE) self.assertRaises(_ped.UnknownTypeException, _ped.unit_get_by_name, "blargle") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(ConstraintNewFromMinMaxTestCase()) suite.addTest(ConstraintNewFromMinTestCase()) suite.addTest(ConstraintNewFromMaxTestCase()) suite.addTest(ConstraintAnyTestCase()) suite.addTest(ConstraintExactTestCase()) suite.addTest(DeviceGetTestCase()) suite.addTest(DeviceGetNextTestCase()) suite.addTest(DeviceProbeAllTestCase()) suite.addTest(DeviceFreeAllTestCase()) suite.addTest(DiskFlagGetNameTestCase()) suite.addTest(DiskFlagGetByNameTestCase()) suite.addTest(DiskFlagNextTestCase()) suite.addTest(DiskTypeGetTestCase()) suite.addTest(DiskTypeGetNextTestCase()) suite.addTest(FileSystemProbeTestCase()) suite.addTest(FileSystemProbeSpecificTestCase()) suite.addTest(FileSystemTypeGetTestCase()) suite.addTest(FileSystemTypeGetNextTestCase()) suite.addTest(PartitionFlagGetNameTestCase()) suite.addTest(PartitionFlagGetByNameTestCase()) suite.addTest(PartitionFlagNextTestCase()) suite.addTest(PartitionTypeGetNameTestCase()) suite.addTest(UnitSetDefaultTestCase()) suite.addTest(UnitGetDefaultTestCase()) suite.addTest(UnitGetSizeTestCase()) suite.addTest(UnitGetNameTestCase()) suite.addTest(UnitGetByNameTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_constraint.py0000775000076400007640000002450211540274274016027 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class ConstraintNewTestCase(RequiresDevice): def runTest(self): align1 = _ped.Alignment(10, 5) align2 = _ped.Alignment(10, 5) geom1 = _ped.Geometry(self._device, 0, 50) geom2 = _ped.Geometry(self._device, 25, 50) # Check that not passing enough args to _ped.Constraint.__init__ is caught. self.assertRaises(TypeError, _ped.Constraint) self.assertRaises(TypeError, _ped.Constraint, align1, align2) # Or the parameters in the wrong order. self.assertRaises(TypeError, _ped.Constraint, align1, align2, 10, 100, geom1, geom2) # And then the correct way of creating a _ped.Constraint. c = _ped.Constraint(align1, align2, geom1, geom2, 10, 100) self.assertTrue(isinstance(c, _ped.Constraint)) class ConstraintGetSetTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) align1 = _ped.Alignment(10, 5) align2 = _ped.Alignment(10, 5) geom1 = _ped.Geometry(self._device, 0, 50) geom2 = _ped.Geometry(self._device, 25, 50) self.c = _ped.Constraint(align1, align2, geom1, geom2, min_size=10, max_size=100) def runTest(self): # Test that passing the kwargs to __init__ works. self.assertEquals(self.c.min_size, 10) self.assertEquals(self.c.max_size, 100) self.assertTrue(isinstance(self.c.start_align, _ped.Alignment)) self.assertTrue(isinstance(self.c.end_align, _ped.Alignment)) self.assertTrue(isinstance(self.c.start_range, _ped.Geometry)) self.assertTrue(isinstance(self.c.end_range, _ped.Geometry)) # Test that setting directly and getting with getattr works. self.c.min_size = 15 self.c.max_size = 75 self.assertEquals(getattr(self.c, "min_size"), 15) self.assertEquals(getattr(self.c, "max_size"), 75) self.assertTrue(isinstance(getattr(self.c, "start_align"), _ped.Alignment)) self.assertTrue(isinstance(getattr(self.c, "end_align"), _ped.Alignment)) self.assertTrue(isinstance(getattr(self.c, "start_range"), _ped.Geometry)) self.assertTrue(isinstance(getattr(self.c, "end_range"), _ped.Geometry)) # Test that setting with setattr and getting directly works. setattr(self.c, "min_size", 10) setattr(self.c, "max_size", 90) self.assertEquals(self.c.min_size, 10) self.assertEquals(self.c.max_size, 90) # Test that values have the right type. self.assertRaises(TypeError, setattr, self.c, "min_size", "string") # Test that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self.c, "blah") # We really shouldn't be allowed to overwrite objects stored in a # _ped.Constraint, but for now there's no way to prevent it. self.c.end_range = 47 self.assertEquals(self.c.end_range, 47) class ConstraintDuplicateTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) align1 = _ped.Alignment(10, 0) align2 = _ped.Alignment(10, 0) geom1 = _ped.Geometry(self._device, 0, 50) geom2 = _ped.Geometry(self._device, 25, 50) self.c = _ped.Constraint(align1, align2, geom1, geom2, min_size=10, max_size=100) def runTest(self): self.dup = self.c.duplicate() self.assertEquals(self.c.min_size, self.dup.min_size) self.assertEquals(self.c.max_size, self.dup.max_size) # duplicate methods should do a deepcopy, so self.dup should have # different references, but the same contents. self.assertNotEquals(repr(self.c), repr(self.dup)) self.assertNotEquals(repr(self.c.start_align), repr(self.dup.start_align)) self.assertEquals(self.c.start_align.offset, self.dup.start_align.offset) self.assertEquals(self.c.start_align.grain_size, self.dup.start_align.grain_size) self.assertNotEquals(repr(self.c.end_align), repr(self.dup.end_align)) self.assertEquals(self.c.end_align.offset, self.dup.end_align.offset) self.assertEquals(self.c.end_align.grain_size, self.dup.end_align.grain_size) self.assertNotEquals(repr(self.c.start_range), repr(self.dup.start_range)) self.assertNotEquals(repr(self.c.start_range.dev), repr(self.dup.start_range.dev)) self.assertEquals(self.c.start_range.dev.path, self.dup.start_range.dev.path) self.assertEquals(self.c.start_range.start, self.dup.start_range.start) self.assertEquals(self.c.start_range.length, self.dup.start_range.length) self.assertEquals(self.c.start_range.end, self.dup.start_range.end) self.assertNotEquals(repr(self.c.end_range), repr(self.dup.end_range)) self.assertNotEquals(repr(self.c.end_range.dev), repr(self.dup.end_range.dev)) self.assertEquals(self.c.end_range.dev.path, self.dup.end_range.dev.path) self.assertEquals(self.c.end_range.start, self.dup.end_range.start) self.assertEquals(self.c.end_range.length, self.dup.end_range.length) self.assertEquals(self.c.end_range.end, self.dup.end_range.end) class ConstraintIntersectTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) align1 = _ped.Alignment(10, 0) align2 = _ped.Alignment(10, 0) geom1 = _ped.Geometry(self._device, 0, 50) geom2 = _ped.Geometry(self._device, 25, 50) self.c1 = _ped.Constraint(align1, align2, geom1, geom2, min_size=10, max_size=100) geom3 = _ped.Geometry(self._device, 10, 50) geom4 = _ped.Geometry(self._device, 30, 40) self.c2 = _ped.Constraint(align1, align2, geom3, geom4, min_size=10, max_size=100) def runTest(self): startAlign = self.c1.start_align.intersect(self.c2.start_align) endAlign = self.c1.end_align.intersect(self.c2.end_align) startRange = self.c1.start_range.intersect(self.c2.start_range) endRange = self.c1.end_range.intersect(self.c2.end_range) minSize = max(self.c1.min_size, self.c2.min_size) maxSize = min(self.c1.max_size, self.c2.max_size) if not startAlign or not endAlign or not startRange or not endRange: expected = None else: expected = _ped.Constraint(startAlign, endAlign, startRange, endRange, min_size=minSize, max_size=maxSize) result = self.c1.intersect(self.c2) self.assertEquals(result, expected) class ConstraintSolveMaxTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.c1 = self._device.get_constraint() def runTest(self): result = self.c1.solve_max() self.assertEquals(result.dev, self._device) self.assertEquals(result.length, self._device.length - 1) class ConstraintSolveNearestTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.c1 = self._device.get_constraint() self.g1 = _ped.Geometry(self._device, 1, 8) def runTest(self): result = self.c1.solve_nearest(self.g1) self.assertEquals(result, self.g1) class ConstraintIsSolutionTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.c1 = self._device.get_constraint() self.g1 = _ped.Geometry(self._device, 1, 8) def runTest(self): self.assertTrue(self.c1.is_solution(self.g1)) class ConstraintStrTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) align1 = _ped.Alignment(10, 0) align2 = _ped.Alignment(10, 0) geom1 = _ped.Geometry(self._device, 0, 50) geom2 = _ped.Geometry(self._device, 25, 50) self.c1 = _ped.Constraint(align1, align2, geom1, geom2, min_size=10, max_size=100) def runTest(self): result = str(self.c1).split('\n') self.assertEquals(result[0], '_ped.Constraint instance --') self.assertTrue(result[1].startswith(' start_align: <_ped.Alignment object at ')) self.assertNotEquals(result[1].find(' end_align: <_ped.Alignment object at '), -1) self.assertTrue(result[2].startswith(' start_range: <_ped.Geometry object at ')) self.assertNotEquals(result[2].find(' end_range: <_ped.Geometry object at '), -1) self.assertEquals(result[3], ' min_size: 10 max_size: 100') # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(ConstraintNewTestCase()) suite.addTest(ConstraintGetSetTestCase()) suite.addTest(ConstraintDuplicateTestCase()) suite.addTest(ConstraintIntersectTestCase()) suite.addTest(ConstraintSolveMaxTestCase()) suite.addTest(ConstraintSolveNearestTestCase()) suite.addTest(ConstraintIsSolutionTestCase()) suite.addTest(ConstraintStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_disk.py0000775000076400007640000002363011540274274014576 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class DiskNewUnlabeledTestCase(RequiresDevice): def runTest(self): self.assertRaises(_ped.DiskLabelException, _ped.Disk, self._device) class DiskNewLabeledTestCase(RequiresLabeledDevice): def runTest(self): result = _ped.Disk(self._device) self.assertTrue(isinstance(result, _ped.Disk)) self.assertEquals(result.type.name, 'msdos') @unittest.skip("Unimplemented test case.") class DiskGetSetTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskClobberTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskClobberExcludeTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDuplicateTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDestroyTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") class DiskCommitTestCase(RequiresDisk): def runTest(self): self.assertTrue(self._disk.commit()) class DiskCommitToDevTestCase(RequiresDisk): def runTest(self): self.assertTrue(self._disk.commit_to_dev()) class DiskCommitToOsTestCase(RequiresDisk): def runTest(self): self.assertTrue(self._disk.commit_to_os()) class DiskCheckTestCase(RequiresDisk): def runTest(self): self.assertTrue(self._disk.check()) @unittest.skip("Unimplemented test case.") class DiskPrintTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") class DiskGetPrimaryPartitionCountTestCase(RequiresDisk): def runTest(self): # XXX: this could probably test more self.assertEquals(self._disk.get_primary_partition_count(), 0) class DiskGetLastPartitionNumTestCase(RequiresDisk): def runTest(self): # XXX: this could probably test more self.assertEquals(self._disk.get_last_partition_num(), -1) class DiskGetMaxPrimaryPartitionCountTestCase(RequiresDisk): def runTest(self): self.assertEquals(self._disk.get_max_primary_partition_count(), 4) class DiskGetMaxSupportedPartitionCountTestCase(RequiresDisk): def runTest(self): self.assertEquals(self._disk.get_max_supported_partition_count(), 16) class DiskGetPartitionAlignmentTestCase(RequiresDisk): def runTest(self): alignment = self._disk.get_partition_alignment() self.assertTrue(isinstance(alignment, _ped.Alignment)) # These 2 tests assume an MSDOS label as given by RequiresDisk self.assertEquals(alignment.offset, 0) self.assertEquals(alignment.grain_size, 1) class DiskMaxPartitionLengthTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self.assertEquals(self._disk.max_partition_length(), 4294967295L) class DiskMaxPartitionStartSectorTestCase(RequiresDisk): def runTest(self): # This test assumes an MSDOS label as given by RequiresDisk self.assertEquals(self._disk.max_partition_start_sector(), 4294967295L) class DiskSetFlagTestCase(RequiresDisk): def runTest(self): # These 2 tests assume an MSDOS label as given by RequiresDisk self._disk.set_flag(_ped.DISK_CYLINDER_ALIGNMENT, 1) self.assertEquals(self._disk.get_flag(_ped.DISK_CYLINDER_ALIGNMENT), True) self._disk.set_flag(_ped.DISK_CYLINDER_ALIGNMENT, 0) self.assertEquals(self._disk.get_flag(_ped.DISK_CYLINDER_ALIGNMENT), False) class DiskGetFlagTestCase(RequiresDisk): def runTest(self): flag = self._disk.get_flag(_ped.DISK_CYLINDER_ALIGNMENT) self.assertTrue(isinstance(flag, bool)) class DiskIsFlagAvailableTestCase(RequiresDisk): def runTest(self): # We don't know which flags should be available and which shouldn't, # but we can at least check that there aren't any tracebacks from # trying all of the valid ones. for flag in [_ped.DISK_CYLINDER_ALIGNMENT]: self.assertTrue(isinstance(self._disk.is_flag_available(flag), bool)) # However, an invalid flag should definitely not be available. self.assertFalse(self._disk.is_flag_available(1000)) @unittest.skip("Unimplemented test case.") class DiskAddPartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskRemovePartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDeletePartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskDeleteAllTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskSetPartitionGeomTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskMaxmimzePartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetMaxPartitionGeoemtryTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskMinimizeExtendedPartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskNextPartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPartitionTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DiskGetPartitionBySectorTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") class DiskExtendedPartitionTestCase(RequiresDisk): def runTest(self): self.assertRaises(_ped.PartitionException, self._disk.extended_partition) class DiskStrTestCase(RequiresDisk): def runTest(self): expected = "_ped.Disk instance --\n dev: %s type: %s" % \ (repr(self._disk.dev), repr(self._disk.type),) self.assertEquals(expected, str(self._disk)) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(DiskNewUnlabeledTestCase()) suite.addTest(DiskNewLabeledTestCase()) suite.addTest(DiskGetSetTestCase()) suite.addTest(DiskClobberTestCase()) suite.addTest(DiskClobberExcludeTestCase()) suite.addTest(DiskDuplicateTestCase()) suite.addTest(DiskDestroyTestCase()) suite.addTest(DiskCommitTestCase()) suite.addTest(DiskCommitToDevTestCase()) suite.addTest(DiskCommitToOsTestCase()) suite.addTest(DiskCheckTestCase()) suite.addTest(DiskPrintTestCase()) suite.addTest(DiskGetPrimaryPartitionCountTestCase()) suite.addTest(DiskGetLastPartitionNumTestCase()) suite.addTest(DiskGetMaxPrimaryPartitionCountTestCase()) suite.addTest(DiskGetMaxSupportedPartitionCountTestCase()) suite.addTest(DiskGetPartitionAlignmentTestCase()) suite.addTest(DiskMaxPartitionLengthTestCase()) suite.addTest(DiskMaxPartitionStartSectorTestCase()) suite.addTest(DiskSetFlagTestCase()) suite.addTest(DiskGetFlagTestCase()) suite.addTest(DiskIsFlagAvailableTestCase()) suite.addTest(DiskAddPartitionTestCase()) suite.addTest(DiskRemovePartitionTestCase()) suite.addTest(DiskDeletePartitionTestCase()) suite.addTest(DiskDeleteAllTestCase()) suite.addTest(DiskSetPartitionGeomTestCase()) suite.addTest(DiskMaxmimzePartitionTestCase()) suite.addTest(DiskGetMaxPartitionGeoemtryTestCase()) suite.addTest(DiskMinimizeExtendedPartitionTestCase()) suite.addTest(DiskNextPartitionTestCase()) suite.addTest(DiskGetPartitionTestCase()) suite.addTest(DiskGetPartitionBySectorTestCase()) suite.addTest(DiskExtendedPartitionTestCase()) suite.addTest(DiskStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/Makefile.in0000664000076400007640000003444411542323606014276 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted # # Copyright (C) 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tests/_ped DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = # If stdout is a non-dumb tty, use colors. If test -t is not supported, # then this fails; a conservative approach. Of course do not redirect # stdout here, just stderr. am__tty_colors = \ red=; grn=; lgn=; blu=; std=; \ test "X$(AM_COLOR_TESTS)" != Xno \ && test "X$$TERM" != Xdumb \ && { test "X$(AM_COLOR_TESTS)" = Xalways || test -t 1 2>/dev/null; } \ && { \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ std=''; \ } DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = baseclass.py $(TESTS) MAINTAINERCLEANFILES = Makefile.in *.pyc TESTS_ENVIRONMENT = PYTHONPATH=$(top_builddir)/src/.libs $(PYTHON) TESTS = test_ped.py \ test_alignment.py \ test_chsgeometry.py \ test_constraint.py \ test_device.py \ test_disk.py \ test_disktype.py \ test_filesystem.py \ test_filesystemtype.py \ test_geometry.py \ test_partition.py all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/_ped/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tests/_ped/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags: TAGS TAGS: ctags: CTAGS CTAGS: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ echo "$$grn$$dashes"; \ else \ echo "$$red$$dashes"; \ fi; \ echo "$$banner"; \ test -z "$$skipped" || echo "$$skipped"; \ test -z "$$report" || echo "$$report"; \ echo "$$dashes$$std"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool distclean distclean-generic distclean-libtool \ distdir dvi dvi-am html html-am info info-am install \ install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/tests/_ped/baseclass.py0000664000076400007640000001414211540274340014532 00000000000000# # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell import _ped import os import tempfile import unittest # Base class for any test case that requires a _ped.Device object first. class RequiresDevice(unittest.TestCase): def setUp(self): (fd, self.path,) = tempfile.mkstemp(prefix="temp-device-") f = os.fdopen(fd) f.seek(140000) os.write(fd, "0") self._device = _ped.device_get(self.path) def tearDown(self): os.unlink(self.path) # Base class for any test case that requires a filesystem on a device. class RequiresFileSystem(unittest.TestCase): def setUp(self): self._fileSystemType = {} type = _ped.file_system_type_get_next() self._fileSystemType[type.name] = type while True: try: type = _ped.file_system_type_get_next(type) self._fileSystemType[type.name] = type except: break (fd, self.path,) = tempfile.mkstemp(prefix="temp-device-") f = os.fdopen(fd) f.seek(140000) os.write(fd, "0") f.close() os.system("/sbin/mke2fs -F -q %s" % (self.path,)) self._device = _ped.device_get(self.path) self._geometry = _ped.Geometry(self._device, 0, self._device.length - 1) def tearDown(self): os.unlink(self.path) # Base class for certain alignment tests that require a _ped.Device class RequiresDeviceAlignment(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) def roundDownTo(self, sector, grain_size): if sector < 0: shift = sector % grain_size + grain_size else: shift = sector % grain_size return sector - shift def roundUpTo(self, sector, grain_size): if sector % grain_size: return self.roundDownTo(sector, grain_size) + grain_size else: return sector def closestInsideGeometry(self, alignment, geometry, sector): if alignment.grain_size == 0: if alignment.is_aligned(geometry, sector) and \ ((geometry is None) or geometry.test_sector_inside(sector)): return sector else: return -1 if sector < geometry.start: sector += self.roundUpTo(geometry.start - sector, alignment.grain_size) if sector > geometry.end: sector -= self.roundUpTo(sector - geometry.end, alignment.grain_size) if not geometry.test_sector_inside(sector): return -1 return sector def closest(self, sector, a, b): if a == -1: return b if b == -1: return a if abs(sector - a) < abs(sector - b): return a else: return b # Base class for any test case that requires a labeled device class RequiresLabeledDevice(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) os.system("parted -s %s mklabel msdos" % (self.path,)) # Base class for any test case that requires a _ped.Disk. class RequiresDisk(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self._disk = _ped.disk_new_fresh(self._device, _ped.disk_type_get("msdos")) # Base class for any test case that requires a filesystem made and mounted. class RequiresMount(RequiresDevice): def mkfs(self): os.system("mkfs.ext2 -F -q %s" % self.path) def doMount(self): self.mountpoint = tempfile.mkdtemp() os.system("mount -o loop %s %s" % (self.path, self.mountpoint)) def tearDown(self): os.system("umount %s" % self.mountpoint) os.rmdir(self.mountpoint) RequiresDevice.tearDown(self) # Base class for any test case that requires a _ped.Partition. class RequiresPartition(RequiresDisk): def setUp(self): RequiresDisk.setUp(self) self._part = _ped.Partition(disk=self._disk, type=_ped.PARTITION_NORMAL, start=0, end=100, fs_type=_ped.file_system_type_get("ext2")) # Base class for any test case that requires a hash table of all # _ped.DiskType objects available class RequiresDiskTypes(unittest.TestCase): def setUp(self): self.disktype = {} type = _ped.disk_type_get_next() self.disktype[type.name] = type while True: try: type = _ped.disk_type_get_next(type) self.disktype[type.name] = type except: break # Base class for any test case that requires a list being built via successive # calls of some function. The function must raise IndexError when there's no # more output to add to the return list. This class is most useful for all # those _get_next methods. class BuildList: def getDeviceList(self, func): lst = [] prev = None while True: try: if not prev: prev = func() else: prev = func(prev) lst.append(prev) except IndexError: break return lst pyparted-3.6/tests/_ped/test_partition.py0000775000076400007640000002244411542255627015662 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class PartitionNewTestCase(RequiresDisk): def runTest(self): # Check that not passing args to _ped.Partition.__init__ is caught. self.assertRaises(TypeError, _ped.Partition) # Or passing the arguments in the wrong order. self.assertRaises(TypeError, _ped.Partition, _ped.file_system_type_get("ext2"), _ped.PARTITION_NORMAL, self._disk, 0, 100) part = _ped.Partition(self._disk, _ped.PARTITION_NORMAL, 0, 100, _ped.file_system_type_get("ext2")) self.assertTrue(isinstance(part, _ped.Partition)) # You don't need to pass a filesystem type at all, since this partition # might be FREESPACE or METADATA. part = _ped.Partition(self._disk, _ped.PARTITION_NORMAL, 0, 100) self.assertTrue(isinstance(part, _ped.Partition)) class PartitionGetSetTestCase(RequiresPartition): def runTest(self): # Test that passing the kwargs to __init__ works. self.assertEquals(self._part.disk, self._disk) self.assertTrue(isinstance(self._part.geom, _ped.Geometry)) self.assertEquals(self._part.type, _ped.PARTITION_NORMAL) self.assertEquals(self._part.fs_type.name, "ext2") # Test that setting the RW attributes directly works. self._part.type = _ped.PARTITION_EXTENDED self.assertEquals(getattr(self._part, "type"), _ped.PARTITION_EXTENDED) # Test that setting the RO attributes directly doesn't work. self.assertRaises(AttributeError, setattr, self._part, "num", 1) self.assertRaises(TypeError, setattr, self._part, "fs_type", _ped.file_system_type_get("fat32")) self.assertRaises(TypeError, setattr, self._part, "geom", _ped.Geometry(self._device, 10, 20)) self.assertRaises(TypeError, setattr, self._part, "disk", self._disk) # Check that values have the right type. self.assertRaises(TypeError, setattr, self._part, "type", "blah") # Check that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self._part, "blah") @unittest.skip("Unimplemented test case.") class PartitionDestroyTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class PartitionIsActiveTestCase(RequiresPartition): def runTest(self): # A partition is active as long as it's not METADATA or FREE. for ty in [_ped.PARTITION_NORMAL, _ped.PARTITION_LOGICAL, _ped.PARTITION_EXTENDED, _ped.PARTITION_PROTECTED]: self._part.type = ty self.assertTrue(self._part.is_active()) for ty in [_ped.PARTITION_FREESPACE, _ped.PARTITION_METADATA]: # Can't have a partition of these two types that also has a # filesystem type associated with it. libparted doesn't like # that combination. self._part = _ped.Partition(self._disk, ty, 0, 100) self.assertFalse(self._part.is_active()) @unittest.skip("Unimplemented test case.") class PartitionSetFlagTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class PartitionGetFlagTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class PartitionIsFlagAvailableTestCase(RequiresPartition): def runTest(self): # We don't know which flags should be available and which shouldn't, # but we can at least check that there aren't any tracebacks from # trying all of the valid ones. for flag in [_ped.PARTITION_BOOT, _ped.PARTITION_ROOT, _ped.PARTITION_SWAP, _ped.PARTITION_HIDDEN, _ped.PARTITION_RAID, _ped.PARTITION_LVM, _ped.PARTITION_HPSERVICE, _ped.PARTITION_PALO, _ped.PARTITION_PREP, _ped.PARTITION_MSFT_RESERVED, _ped.PARTITION_APPLE_TV_RECOVERY, _ped.PARTITION_BIOS_GRUB, _ped.PARTITION_DIAG]: self.assertTrue(isinstance(self._part.is_flag_available(flag), bool)) # However, an invalid flag should definitely not be available. self.assertFalse(self._part.is_flag_available(1000)) # Partitions that are inactive should not have any available flags. self._part = _ped.Partition(self._disk, _ped.PARTITION_FREESPACE, 0, 100) self.assertRaises(_ped.PartitionException, self._part.is_flag_available, _ped.PARTITION_BOOT) class PartitionSetSystemTestCase(RequiresPartition): def runTest(self): self.assertTrue(self._part.set_system(_ped.file_system_type_get("fat32"))) self.assertRaises(TypeError, self._part.set_system, 47) # Partitions that are inactive cannot have the system type set. self._part = _ped.Partition(self._disk, _ped.PARTITION_FREESPACE, 0, 100) self.assertRaises(_ped.PartitionException, self._part.set_system, _ped.file_system_type_get("ext2")) class PartitionSetNameTestCase(RequiresPartition): def runTest(self): # The DOS disklabel does not support naming. self.assertRaises(_ped.PartitionException, self._part.set_name, "blah") # These should work. self._disk = _ped.disk_new_fresh(self._device, _ped.disk_type_get("mac")) self._part = _ped.Partition(self._disk, _ped.PARTITION_NORMAL, 0, 100, _ped.file_system_type_get("fat32")) self.assertTrue(self._part.set_name("blah")) self.assertEqual(self._part.get_name(), "blah") # Partitions that are inactive won't work. self._part = _ped.Partition(self._disk, _ped.PARTITION_FREESPACE, 0, 100) self.assertRaises(_ped.PartitionException, self._part.get_name) class PartitionGetNameTestCase(RequiresPartition): def runTest(self): # The DOS disklabel does not support naming. self.assertRaises(_ped.PartitionException, self._part.get_name) # Partitions that are inactive won't work either. self._part = _ped.Partition(self._disk, _ped.PARTITION_FREESPACE, 0, 100) self.assertRaises(_ped.PartitionException, self._part.get_name) # Mac disk labels do support naming, but there still has to be a name. self._disk = _ped.disk_new_fresh(self._device, _ped.disk_type_get("mac")) self._part = _ped.Partition(self._disk, _ped.PARTITION_NORMAL, 0, 100, _ped.file_system_type_get("fat32")) self.assertEquals(self._part.get_name(), "untitled") # Finally, Mac disk labels with a name will work. self._part.set_name("blah") self.assertEqual(self._part.get_name(), "blah") @unittest.skip("Unimplemented test case.") class PartitionIsBusyTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class PartitionGetPathTestCase(RequiresPartition): def runTest(self): self.assertNotEquals(self._part.get_path(), "") @unittest.skip("Unimplemented test case.") class PartitionStrTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(PartitionNewTestCase()) suite.addTest(PartitionGetSetTestCase()) suite.addTest(PartitionDestroyTestCase()) suite.addTest(PartitionIsActiveTestCase()) suite.addTest(PartitionSetFlagTestCase()) suite.addTest(PartitionGetFlagTestCase()) suite.addTest(PartitionIsFlagAvailableTestCase()) suite.addTest(PartitionSetSystemTestCase()) suite.addTest(PartitionSetNameTestCase()) suite.addTest(PartitionGetNameTestCase()) suite.addTest(PartitionIsBusyTestCase()) suite.addTest(PartitionGetPathTestCase()) suite.addTest(PartitionStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/Makefile.am0000664000076400007640000000265011536234551014262 00000000000000# # Makefile.am for pyparted # # Copyright (C) 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # EXTRA_DIST = baseclass.py $(TESTS) MAINTAINERCLEANFILES = Makefile.in *.pyc TESTS_ENVIRONMENT = PYTHONPATH=$(top_builddir)/src/.libs $(PYTHON) TESTS = test_ped.py \ test_alignment.py \ test_chsgeometry.py \ test_constraint.py \ test_device.py \ test_disk.py \ test_disktype.py \ test_filesystem.py \ test_filesystemtype.py \ test_geometry.py \ test_partition.py pyparted-3.6/tests/_ped/test_geometry.py0000775000076400007640000003720711540274274015504 00000000000000# # Copyright (C) 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class GeometryNewTestCase(RequiresDevice): def runTest(self): # Check that not passing args to _ped.Geometry.__init__ is caught. self.assertRaises(TypeError, _ped.Geometry) # Or passing in the parameters in the wrong order. self.assertRaises(TypeError, _ped.Geometry, 0, self._device, 100) # And then the correct ways of creating a _ped.Geometry. self.assertTrue(isinstance(_ped.Geometry(self._device, 0, 100), _ped.Geometry)) self.assertTrue(isinstance(_ped.Geometry(self._device, 0, 100, 101), _ped.Geometry)) class GeometryGetSetTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): # Test that passing the kwargs to __init__ works. self.assertTrue(isinstance(self.g, _ped.Geometry)) self.assertEquals(self.g.start, 0) self.assertEquals(self.g.length, 100) self.assertEquals(self.g.end, 99) # Test that setting directly and getting with getattr works. self.g.start = 10 self.g.length = 90 self.g.end = 99 self.assertEquals(getattr(self.g, "start"), 10) self.assertEquals(getattr(self.g, "length"), 90) self.assertEquals(getattr(self.g, "end"), 99) # Check that setting with setattr and getting directly works. setattr(self.g, "start", 20) setattr(self.g, "length", 80) setattr(self.g, "end", 99) self.assertEquals(self.g.start, 20) self.assertEquals(self.g.length, 80) self.assertEquals(self.g.end, 99) # Check that values have the right type. self.assertRaises(TypeError, setattr, self.g, "start", "string") # Check that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self.g, "blah") class GeometryDuplicateTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self.dup = self.g.duplicate() self.assertEquals(self.g.start, self.dup.start) self.assertEquals(self.g.length, self.dup.length) self.assertEquals(self.g.end, self.dup.end) class GeometryIntersectTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g1 = _ped.Geometry(self._device, start=0, length=100) self.g2 = _ped.Geometry(self._device, start=0, length=100) def runTest(self): # g1 and g2 are the same, so their intersection is the same self.i = self.g1.intersect(self.g2) self.assertEquals(self.i.start, self.g1.start) self.assertEquals(self.i.end, self.g1.end) self.assertEquals(self.i.length, self.g1.length) # g2 is the second half of g1, so their intersection is the same as g2. self.g2.set_start(50) self.i = self.g1.intersect(self.g2) self.assertEquals(self.i.start, self.g2.start) self.assertEquals(self.i.end, self.g2.end) self.assertEquals(self.i.length, self.g2.length) # g2 only partially overlaps the end of g1, so they have a more # interesting intersection. self.g1.set_end(75) self.i = self.g1.intersect(self.g2) self.assertEquals(self.i.start, self.g2.start) self.assertEquals(self.i.end, self.g1.end) self.assertEquals(self.i.length, 26) # g1 and g2 do not overlap at all, so they have no intersection. self.g1.set(0, 25) self.g2.set(50, 100) self.assertRaises(ArithmeticError, self.g1.intersect, self.g2) class GeometrySetTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self.assertEquals(self.g.start, 0) self.assertEquals(self.g.length, 100) # Setting a negative for either value, or a length past the end of # the device should fail. self.assertRaises(_ped.CreateException, self.g.set, 100, -1000) self.assertRaises(_ped.CreateException, self.g.set, -1, 1000) self.assertRaises(_ped.CreateException, self.g.set, 0, 1000000000) class GeometrySetStartTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self.g.set_start(10) self.assertEquals(self.g.start, 10) self.assertEquals(self.g.length, 90) self.assertEquals(self.g.end, 99) # Setting a negative start or the start past the end of the device # should fail. self.assertRaises(_ped.CreateException, self.g.set_start, -1) self.assertRaises(_ped.CreateException, self.g.set_start, 1000000000) class GeometrySetEndTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self.g.set_end(50) self.assertEquals(self.g.start, 0) self.assertEquals(self.g.length, 51) self.assertEquals(self.g.end, 50) # Setting a negative end or the end past the end of the device or # before the start should fail. self.assertRaises(_ped.CreateException, self.g.set_end, -1) self.assertRaises(_ped.CreateException, self.g.set_end, 1000000000) self.g.set_start(10) self.assertRaises(_ped.CreateException, self.g.set_end, 5) class GeometryTestOverlapTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g1 = _ped.Geometry(self._device, start=0, length=100) self.g2 = _ped.Geometry(self._device, start=50, length=100) def runTest(self): # g2 occupies the second half of g1, so they overlap. self.assertTrue(self.g1.test_overlap(self.g2)) # g2 is entirely contained within g1, so they overlap. self.g2.set_end(75) self.assertTrue(self.g1.test_overlap(self.g2)) # g1 goes from inside g2 to the end, so they overlap. self.g1.set_start(60) self.assertTrue(self.g1.test_overlap(self.g2)) # g2 exists entirely before g1, so they do not overlap. self.g2.set(10, 10) self.assertFalse(self.g1.test_overlap(self.g2)) class GeometryTestInsideTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g1 = _ped.Geometry(self._device, start=0, length=100) self.g2 = _ped.Geometry(self._device, start=0, length=100) def runTest(self): # g1 and g2 are the same, so they exist inside each other. self.assertTrue(self.g1.test_inside(self.g2)) self.assertTrue(self.g2.test_inside(self.g1)) # g2 is entirely contained within g1, so it's inside. self.g2.set_end(75) self.assertTrue(self.g1.test_inside(self.g2)) self.assertFalse(self.g2.test_inside(self.g1)) # g1 goes from inside g2 to the end, so it's not inside. self.g1.set_start(60) self.assertFalse(self.g1.test_inside(self.g2)) self.assertFalse(self.g2.test_inside(self.g1)) # g2 exists entirely before g1, so it's not inside. self.g2.set(10, 10) self.assertFalse(self.g1.test_inside(self.g2)) self.assertFalse(self.g2.test_inside(self.g1)) class GeometryTestEqualTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g1 = _ped.Geometry(self._device, start=0, length=100) self.g2 = _ped.Geometry(self._device, start=0, length=100) def runTest(self): # g1 and g2 have the same start and end. self.assertTrue(self.g1.test_equal(self.g2)) # g1 and g2 have the same end, but different starts. self.g2.set_start(5) self.assertFalse(self.g1.test_equal(self.g2)) # g1 and g2 have the same start, but different ends. self.g2.set_start(5) self.g2.set_end(50) self.assertFalse(self.g1.test_equal(self.g2)) class GeometryTestSectorInsideTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=10, length=100) def runTest(self): # First check the boundary conditions. self.assertTrue(self.g.test_sector_inside(10)) self.assertTrue(self.g.test_sector_inside(109)) self.assertFalse(self.g.test_sector_inside(110)) # Then some sectors that are obviously out. self.assertFalse(self.g.test_sector_inside(0)) self.assertFalse(self.g.test_sector_inside(1000)) self.assertFalse(self.g.test_sector_inside(-1)) class GeometryReadTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=10, length=100) def runTest(self): # First try to read from a device that isn't open yet. self.assertRaises(_ped.IOException, self.g.read, 0, 10) # Our initial device is just full of zeros, so this should read a # whole lot of nothing. self._device.open() self.assertEquals(self.g.read(0, 10), "") # Test bad parameter passing. self.assertRaises(_ped.IOException, self.g.read, -10, 10) self.assertRaises(_ped.IOException, self.g.read, 0, -10) self.assertRaises(TypeError, self.g.read, None, None) # Can't read past the end of the geometry. self.assertRaises(_ped.IOException, self.g.read, 200, 1) self.assertRaises(_ped.IOException, self.g.read, 0, 200) # Now try writing something to the device, then reading to see if # we get the same thing back. self.g.write("1111111111", 0, 1) self.assertEquals(self.g.read(0, 10), "1111111111") # Write five bytes from the string to the geometry, so there's only # one byte present. So, only one "2" should be there when we read. self.g.write("2", 20, 5) self.assertEquals(self.g.read(20, 5), "2") self.assertEquals(self.g.read(20, 1), "2") self._device.close() class GeometrySyncTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self._device.open() # XXX: I don't know of a better way to test this method. self.g.write("1111111111", 0, 1) self.assertEquals(self.g.sync(), 1) self._device.close() class GeometrySyncFastTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) def runTest(self): self._device.open() # XXX: I don't know of a better way to test this method. self.g.write("1111111111", 0, 1) self.assertEquals(self.g.sync_fast(), 1) self._device.close() class GeometryWriteTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=10, length=100) def runTest(self): # First try to write to a device that isn't open yet. self.assertRaises(_ped.IOException, self.g.write, "X", 0, 10) # Now try a real write and make sure we (1) don't get an error code # and (2) the data actually ends up on the device. self._device.open() self.assertNotEquals(self.g.write("X", 0, 10), 0) self.assertEquals(self.g.read(0, 10), "X") self.assertNotEquals(self.g.write("XXXXXXXXXX", 0, 10), 0) self.assertEquals(self.g.read(0, 10), "XXXXXXXXXX") # Test bad parameter passing. self.assertRaises(_ped.IOException, self.g.write, "X", -10, 10) self.assertRaises(_ped.IOException, self.g.write, "X", 0, -10) self.assertRaises(TypeError, self.g.write, None, None, None) # Can't write past the end of the geometry. self.assertRaises(_ped.IOException, self.g.write, "X", 200, 1) self.assertRaises(_ped.IOException, self.g.write, "X", 0, 200) self._device.close() class GeometryCheckTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=10, length=100) def runTest(self): # trivial test case first self.assertRaises(_ped.IOException, self.g.check, 0, 0, 0) self._device.open() self.assertEquals(self.g.check(0, 0, 10), 0) self.assertEquals(self.g.check(0, 0, 50), 0) self._device.close() class GeometryMapTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g1 = _ped.Geometry(self._device, start=10, length=100) self.g2 = _ped.Geometry(self._device, start=10, length=90) def runTest(self): # write a word to the device starting at sector 25 self._device.open() self.g1.write("UNITTEST", 25, 8) val1 = self.g2.read(self.g2.map(self.g1, 25), 8) val2 = self.g1.read(25, 8) self.assertEquals(val1, val2) self._device.close() class GeometryStrTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=10, length=100) def runTest(self): lines = str(self.g).split('\n') self.assertEquals(lines[0], '_ped.Geometry instance --') self.assertEquals(lines[1], ' start: 10 end: 109 length: 100') self.assertTrue(lines[2].startswith(' device: <_ped.Device object at ')) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(GeometryNewTestCase()) suite.addTest(GeometryGetSetTestCase()) suite.addTest(GeometryDuplicateTestCase()) suite.addTest(GeometryIntersectTestCase()) suite.addTest(GeometrySetTestCase()) suite.addTest(GeometrySetStartTestCase()) suite.addTest(GeometrySetEndTestCase()) suite.addTest(GeometryTestOverlapTestCase()) suite.addTest(GeometryTestInsideTestCase()) suite.addTest(GeometryTestEqualTestCase()) suite.addTest(GeometryTestSectorInsideTestCase()) suite.addTest(GeometryReadTestCase()) suite.addTest(GeometrySyncTestCase()) suite.addTest(GeometrySyncFastTestCase()) suite.addTest(GeometryWriteTestCase()) suite.addTest(GeometryCheckTestCase()) suite.addTest(GeometryMapTestCase()) suite.addTest(GeometryStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_filesystemtype.py0000775000076400007640000000461211540274274016731 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # import _ped import unittest # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class FileSystemTypeNewTestCase(unittest.TestCase): def runTest(self): # You can't create a FileSystemType by hand. self.assertRaises(TypeError, _ped.FileSystemType) class FileSystemTypeGetSetTestCase(unittest.TestCase): def runTest(self): fstype = _ped.file_system_type_get("ext3") self.assertTrue(isinstance(fstype, _ped.FileSystemType)) self.assertEqual(fstype.name, "ext3") self.assertEqual(getattr(fstype, "name"), "ext3") self.assertRaises(AttributeError, setattr, fstype, "name", "vfat") self.assertRaises(AttributeError, getattr, fstype, "junk") class FileSystemTypeStrTestCase(unittest.TestCase): def runTest(self): fstype = _ped.file_system_type_get("ext3") self.assertEqual(str(fstype), "_ped.FileSystemType instance --\n name: ext3") # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(FileSystemTypeNewTestCase()) suite.addTest(FileSystemTypeGetSetTestCase()) suite.addTest(FileSystemTypeStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_chsgeometry.py0000775000076400007640000000553311540274274016177 00000000000000# # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class CHSGeometryNewTestCase(unittest.TestCase): def runTest(self): # You're not allowed to create a new CHSGeometry object by hand. self.assertRaises(TypeError, _ped.CHSGeometry) class CHSGeometryGetSetTestCase(RequiresDevice): def runTest(self): # A device has a CHSGeometry, so we can use that to attempt accessing # parameters. chs = self._device.hw_geom self.assertTrue(isinstance(chs, _ped.CHSGeometry)) # All attributes are read-only. self.assertRaises(AttributeError, setattr, chs, "cylinders", 47) self.assertRaises(AttributeError, setattr, chs, "heads", 47) self.assertRaises(AttributeError, setattr, chs, "sectors", 47) self.assertTrue(isinstance(chs.cylinders, int)) self.assertTrue(isinstance(chs.heads, int)) self.assertTrue(isinstance(chs.sectors, int)) class CHSGeometryStrTestCase(RequiresDevice): def runTest(self): expected = "_ped.CHSGeometry instance --\n cylinders: %d heads: %d sectors: %d" % (self._device.hw_geom.cylinders, self._device.hw_geom.heads, self._device.hw_geom.sectors,) result = str(self._device.hw_geom) self.assertEquals(result, expected) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(CHSGeometryNewTestCase()) suite.addTest(CHSGeometryGetSetTestCase()) suite.addTest(CHSGeometryStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_alignment.py0000775000076400007640000002413411540274274015622 00000000000000# # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class AlignmentNewTestCase(unittest.TestCase): def runTest(self): # Check that not passing args to _ped.Alignment.__init__ is caught. self.assertRaises(TypeError, _ped.Alignment) # And then the correct ways of creating a _ped.Alignment. self.assertTrue(isinstance(_ped.Alignment(0, 100), _ped.Alignment)) self.assertTrue(isinstance(_ped.Alignment(offset=0, grain_size=100), _ped.Alignment)) class AlignmentGetSetTestCase(unittest.TestCase): def setUp(self): self.a = _ped.Alignment(27, 49) def runTest(self): # Test that passing the args to __init__ works. self.assertTrue(isinstance(self.a, _ped.Alignment)) self.assertEquals(self.a.offset, 27) self.assertEquals(self.a.grain_size, 49) # Test that setting directly and getting with getattr works. self.a.offset = 10 self.a.grain_size = 90 self.assertEquals(getattr(self.a, "offset"), 10) self.assertEquals(getattr(self.a, "grain_size"), 90) # Check that setting with setattr and getting directly works. setattr(self.a, "offset", 20) setattr(self.a, "grain_size", 80) self.assertEquals(self.a.offset, 20) self.assertEquals(self.a.grain_size, 80) # Check that values have the right type. self.assertRaises(TypeError, setattr, self.a, "offset", "string") # Check that looking for invalid attributes fails properly. self.assertRaises(AttributeError, getattr, self.a, "blah") class AlignmentDuplicateTestCase(unittest.TestCase): def setUp(self): self.a = _ped.Alignment(27, 49) def runTest(self): self.dup = self.a.duplicate() self.assertEquals(self.a.offset, self.dup.offset) self.assertEquals(self.a.grain_size, self.dup.grain_size) class AlignmentIntersectTestCase(unittest.TestCase): def setUp(self): self.trivialA = _ped.Alignment(47, 0) self.trivialB = _ped.Alignment(47, 0) self.complexA = _ped.Alignment(512, 3) self.complexB = _ped.Alignment(256, 4) def orderAlignments(self, a, b): if a.grain_size < b.grain_size: tmp = a a = b b = tmp return (a, b) # from libparted/cs/natmath.c def extendedEuclid(self, a, b): if b == 0: gcd = a x = 1 y = 0 return (gcd, x, y) (tmp_gcd, tmp_x, tmp_y) = self.extendedEuclid(b, a % b) gcd = tmp_gcd x = tmp_y y = tmp_x - (a / b) * tmp_y return (gcd, x, y) def runTest(self): # trivial test first, result should be a duplicate of trivialA trivial = self.trivialA.intersect(self.trivialB) self.assertEquals(trivial.offset, self.trivialA.offset) self.assertEquals(trivial.grain_size, self.trivialA.grain_size) # complex test second, see libparted/cs/natmath.c for an explanation # of the math behind computing the intersection of two alignments (verifyA, verifyB) = self.orderAlignments(self.complexA, self.complexB) (gcd, x, y) = self.extendedEuclid(verifyA.grain_size, verifyB.grain_size) delta_on_gcd = (verifyB.offset - verifyA.offset) / gcd new_offset = verifyA.offset + x * delta_on_gcd * verifyA.grain_size new_grain_size = verifyA.grain_size * verifyB.grain_size / gcd complex = self.complexA.intersect(self.complexB) self.assertEquals(new_offset, complex.offset) self.assertEquals(new_grain_size, complex.grain_size) class AlignmentAlignUpTestCase(RequiresDeviceAlignment): def setUp(self): RequiresDeviceAlignment.setUp(self) self.trivialA = _ped.Alignment(10, 0) self.complexA = _ped.Alignment(512, 34) self.geometry = _ped.Geometry(self._device, start=0, length=100) self.sector = 47 def runTest(self): # trivial test case first, grain_size is zero expected = self.closestInsideGeometry(self.trivialA, self.geometry, self.trivialA.offset) result = self.trivialA.align_up(self.geometry, self.sector) self.assertEquals(result, expected) # complex test case second, grain_size is not zero tmp = self.roundUpTo(self.sector - self.complexA.offset, self.complexA.grain_size) + self.complexA.offset expected = self.closestInsideGeometry(self.complexA, self.geometry, tmp) result = self.complexA.align_up(self.geometry, self.sector) self.assertEquals(result, expected) class AlignmentAlignDownTestCase(RequiresDeviceAlignment): def setUp(self): RequiresDeviceAlignment.setUp(self) self.trivialA = _ped.Alignment(10, 0) self.complexA = _ped.Alignment(512, 34) self.geometry = _ped.Geometry(self._device, start=0, length=100) self.sector = 47 def runTest(self): # trivial test case first, grain_size is zero expected = self.closestInsideGeometry(self.trivialA, self.geometry, self.trivialA.offset) result = self.trivialA.align_down(self.geometry, self.sector) self.assertEquals(result, expected) # complex test case second, grain_size is not zero tmp = self.roundDownTo(self.sector - self.complexA.offset, self.complexA.grain_size) + self.complexA.offset expected = self.closestInsideGeometry(self.complexA, self.geometry, tmp) result = self.complexA.align_down(self.geometry, self.sector) self.assertEquals(result, expected) class AlignmentAlignNearestTestCase(RequiresDeviceAlignment): def setUp(self): RequiresDeviceAlignment.setUp(self) self.trivialA = _ped.Alignment(10, 0) self.complexA = _ped.Alignment(512, 34) self.geometry = _ped.Geometry(self._device, start=0, length=100) self.sector = 47 def runTest(self): # trivial test case first, grain_size is zero tmp = self.closestInsideGeometry(self.trivialA, self.geometry, self.trivialA.offset) expected = self.closest(self.sector, tmp, tmp) result = self.trivialA.align_nearest(self.geometry, self.sector) self.assertEquals(result, expected) # complex test case second, grain_size is not zero tmpA = self.roundUpTo(self.sector - self.complexA.offset, self.complexA.grain_size) + self.complexA.offset tmpA = self.closestInsideGeometry(self.complexA, self.geometry, tmpA) tmpB = self.roundDownTo(self.sector - self.complexA.offset, self.complexA.grain_size) + self.complexA.offset tmpB = self.closestInsideGeometry(self.complexA, self.geometry, tmpB) expected = self.closest(self.sector, tmpA, tmpB) result = self.complexA.align_nearest(self.geometry, self.sector) self.assertEquals(result, expected) class AlignmentIsAlignedTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) self.g = _ped.Geometry(self._device, start=0, length=100) self.a = _ped.Alignment(10, 0) def runTest(self): # Test a couple ways of passing bad arguments. self.assertRaises(TypeError, self.a.is_aligned, None, 12) self.assertRaises(TypeError, self.a.is_aligned, self.g, None) # Sector must be inside the geometry. self.assertFalse(self.a.is_aligned(self.g, 400)) # If grain_size is 0, sector must be the same as offset. self.assertTrue(self.a.is_aligned(self.g, 10)) self.assertFalse(self.a.is_aligned(self.g, 0)) self.assertFalse(self.a.is_aligned(self.g, 47)) # If grain_size is anything else, there's real math involved. self.a.grain_size = 5 self.assertTrue(self.a.is_aligned(self.g, 20)) self.assertFalse(self.a.is_aligned(self.g, 23)) class AlignmentStrTestCase(unittest.TestCase): def setUp(self): self.alignment = _ped.Alignment(10, 0) def runTest(self): expected = "_ped.Alignment instance --\n offset: 10 grain_size: 0" self.assertEquals(str(self.alignment), expected) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(AlignmentNewTestCase()) suite.addTest(AlignmentGetSetTestCase()) suite.addTest(AlignmentDuplicateTestCase()) suite.addTest(AlignmentIntersectTestCase()) suite.addTest(AlignmentAlignUpTestCase()) suite.addTest(AlignmentAlignDownTestCase()) suite.addTest(AlignmentAlignNearestTestCase()) suite.addTest(AlignmentIsAlignedTestCase()) suite.addTest(AlignmentStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_device.py0000775000076400007640000004237311542255501015102 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class DeviceNewTestCase(unittest.TestCase): def runTest(self): # You're not allowed to create a new Device object by hand. self.assertRaises(TypeError, _ped.Device) class DeviceGetSetTestCase(RequiresDevice): def runTest(self): # All attributes should be readable, but none should be writeable. for attr in ["model", "path", "type", "sector_size", "phys_sector_size", "length", "open_count", "read_only", "external_mode", "dirty", "boot_dirty", "host", "did"]: self.assertNotEquals(getattr(self._device, attr), None) self.assertRaises(AttributeError, setattr, self._device, attr, 47) class DeviceIsBusyTestCase(RequiresDevice): def runTest(self): # Devices aren't busy until they're mounted. self.assertFalse(self._device.is_busy()) # TODO: need to figure out how to make a loopback device look mounted to # libparted # self.mkfs() # self.doMount() # self.assertTrue(self._device.is_busy()) class DeviceOpenTestCase(RequiresDevice): def runTest(self): self.assertTrue(self._device.open()) self.assertEquals(self._device.open_count, 1) self._device.close() # Not allowed to open a device that's already been opened for external # access, so test that now. self._device.begin_external_access() self.assertRaises(_ped.IOException, self._device.open) self._device.end_external_access() self.assertTrue(self._device.open()) # You're allowed to open a device multiple times. It's already been # opened once above. Try to open it again and make sure the count is # is right. self.assertTrue(self._device.open()) self.assertEquals(self._device.open_count, 2) self._device.close() self._device.close() class DeviceCloseTestCase(RequiresDevice): def runTest(self): self._device.open() self.assertTrue(self._device.close()) self.assertEquals(self._device.open_count, 0) # Not allowed to close a device that's already been opened for external # access, so test that now. self._device.open() self._device.begin_external_access() self.assertRaises(_ped.IOException, self._device.close) self._device.end_external_access() self.assertTrue(self._device.close()) # Test opening a device multiple times and then closing it too many. self._device.open() self._device.open() self.assertEquals(self._device.open_count, 2) self._device.close() self.assertEquals(self._device.open_count, 1) self._device.close() self.assertEquals(self._device.open_count, 0) self.assertRaises(_ped.IOException, self._device.close) @unittest.skip("Unimplemented test case.") class DeviceDestroyTestCase(RequiresDevice): def runTest(self): # XXX: still broken, need to fix destroy function in pydevice.c #self.assertEquals(self._device.destroy(), None) self.fail("Unimplemented test case.") class DeviceCacheRemoveTestCase(RequiresDevice): def runTest(self): self.assertEquals(self._device.cache_remove(), None) class DeviceBeginExternalAccessTestCase(RequiresDevice): def runTest(self): # First test external access on a device that's not open. self.assertEquals(self._device.external_mode, 0) self.assertTrue(self._device.begin_external_access()) self.assertEquals(self._device.external_mode, 1) self.assertEquals(self._device.open_count, 0) # Now stop external access, open the device, and re-test. self._device.end_external_access() self._device.open() self.assertEquals(self._device.open_count, 1) self.assertTrue(self._device.begin_external_access()) self.assertEquals(self._device.open_count, 1) self._device.end_external_access() self._device.close() class DeviceEndExternalAccessTestCase(RequiresDevice): def runTest(self): # Attempt to end external access on a device that never had it begun. self.assertRaises(_ped.IOException, self._device.end_external_access) # Now test external access on a device that's not open. self._device.begin_external_access() self.assertEquals(self._device.external_mode, 1) self.assertEquals(self._device.open_count, 0) self.assertTrue(self._device.end_external_access()) self.assertEquals(self._device.external_mode, 0) self.assertEquals(self._device.open_count, 0) # Now on a device that's open. self._device.open() self._device.begin_external_access() self.assertEquals(self._device.external_mode, 1) self.assertEquals(self._device.open_count, 1) self.assertTrue(self._device.end_external_access()) self.assertEquals(self._device.external_mode, 0) self.assertEquals(self._device.open_count, 1) self._device.close() @unittest.skip("Unimplemented test case.") class DeviceReadTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class DeviceWriteTestCase(unittest.TestCase): def runTest(self): # TODO self.fail("Unimplemented test case.") class DeviceSyncTestCase(RequiresDevice): def runTest(self): # Can't sync a device that's not open or is in external mode. self.assertRaises(_ped.IOException, self._device.sync) self._device.open() self._device.begin_external_access() self.assertRaises(_ped.IOException, self._device.sync) # But this call should work. self._device.end_external_access() self.assertTrue(self._device.sync()) self._device.close() class DeviceSyncFastTestCase(RequiresDevice): def runTest(self): # Can't sync a device that's not open or is in external mode. self.assertRaises(_ped.IOException, self._device.sync_fast) self._device.open() self._device.begin_external_access() self.assertRaises(_ped.IOException, self._device.sync_fast) # But this call should work. self._device.end_external_access() self.assertTrue(self._device.sync_fast()) self._device.close() class DeviceCheckTestCase(RequiresDevice): def runTest(self): self._device.open() self.assertEquals(self._device.check(0, 20), 20) self._device.close() class DeviceGetConstraintTestCase(RequiresDevice): def runTest(self): # XXX: This test case would be a lot more useful testing on real # hardware with unusual sector sizes. self.assertTrue(isinstance(self._device.get_constraint(), _ped.Constraint)) class DeviceGetMinimalAlignedConstraintTestCase(RequiresDevice): def runTest(self): # XXX: This test case would be a lot more useful testing on real # hardware with unusual sector sizes. constraint = self._device.get_minimal_aligned_constraint() self.assertTrue(isinstance(constraint, _ped.Constraint)) self.assertEquals(constraint.start_align.offset, 0) self.assertEquals(constraint.start_align.grain_size, 1) self.assertEquals(constraint.end_align.offset, 0) self.assertEquals(constraint.end_align.grain_size, 1) class DeviceGetOptimalAlignedConstraintTestCase(RequiresDevice): def runTest(self): # XXX: This test case would be a lot more useful testing on real # hardware with unusual sector sizes. constraint = self._device.get_minimal_aligned_constraint() self.assertTrue(isinstance(constraint, _ped.Constraint)) self.assertEquals(constraint.start_align.offset, 0) self.assertEquals(constraint.start_align.grain_size, 1) self.assertEquals(constraint.end_align.offset, 0) self.assertEquals(constraint.end_align.grain_size, 1) class DeviceGetMinimumAlignmentTestCase(RequiresDevice): def runTest(self): # XXX: This test case would be a lot more useful testing on real # hardware with unusual sector sizes. alignment = self._device.get_minimum_alignment() self.assertTrue(isinstance(alignment, _ped.Alignment)) self.assertEquals(alignment.grain_size, 1) self.assertEquals(alignment.offset, 0) class DeviceGetOptimumAlignmentTestCase(RequiresDevice): def runTest(self): # XXX: This test case would be a lot more useful testing on real # hardware with unusual sector sizes. alignment = self._device.get_optimum_alignment() self.assertTrue(isinstance(alignment, _ped.Alignment)) self.assertEquals(alignment.grain_size, 2048) self.assertEquals(alignment.offset, 0) class UnitFormatCustomByteTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) pr = "%f" % (47.0 / self._device.unit_get_size(_ped.UNIT_PERCENT),) self.pairs = [(_ped.UNIT_SECTOR, '0s',), (_ped.UNIT_BYTE, '47B',), (_ped.UNIT_KILOBYTE, '0.05kB',), (_ped.UNIT_MEGABYTE, '0.00MB',), (_ped.UNIT_GIGABYTE, '0.00GB',), (_ped.UNIT_TERABYTE, '0.00TB',), (_ped.UNIT_COMPACT, '47.0B',), (_ped.UNIT_CYLINDER, '0cyl',), (_ped.UNIT_CHS, '0,0,0',), (_ped.UNIT_PERCENT, pr[:4] + "%",), (_ped.UNIT_KIBIBYTE, '0.05kiB',), (_ped.UNIT_MEBIBYTE, '0.00MiB',), (_ped.UNIT_GIBIBYTE, '0.00GiB',), (_ped.UNIT_TEBIBYTE, '0.00TiB',)] def runTest(self): for (unit, expected,) in self.pairs: self.assertEquals(self._device.unit_format_custom_byte(47, unit), expected) class UnitFormatByteTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) pr = "%f" % (47.0 / self._device.unit_get_size(_ped.UNIT_PERCENT),) self._initialDefault = _ped.unit_get_default() self.pairs = [(_ped.UNIT_SECTOR, '0s',), (_ped.UNIT_BYTE, '47B',), (_ped.UNIT_KILOBYTE, '0.05kB',), (_ped.UNIT_MEGABYTE, '0.00MB',), (_ped.UNIT_GIGABYTE, '0.00GB',), (_ped.UNIT_TERABYTE, '0.00TB',), (_ped.UNIT_COMPACT, '47.0B',), (_ped.UNIT_CYLINDER, '0cyl',), (_ped.UNIT_CHS, '0,0,0',), (_ped.UNIT_PERCENT, pr[:4] + "%",), (_ped.UNIT_KIBIBYTE, '0.05kiB',), (_ped.UNIT_MEBIBYTE, '0.00MiB',), (_ped.UNIT_GIBIBYTE, '0.00GiB',), (_ped.UNIT_TEBIBYTE, '0.00TiB',)] def runTest(self): for (unit, expected) in self.pairs: _ped.unit_set_default(unit) result = self._device.unit_format_byte(47) self.assertEquals(result, expected) def tearDown(self): _ped.unit_set_default(self._initialDefault) class UnitFormatCustomTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) sector_size = self._device.sector_size size = self._device.unit_get_size(_ped.UNIT_PERCENT) pr = "%f" % ((47.0 * sector_size) / size,) self.pairs = [(_ped.UNIT_SECTOR, '47s',), (_ped.UNIT_BYTE, '24064B',), (_ped.UNIT_KILOBYTE, '24.1kB',), (_ped.UNIT_MEGABYTE, '0.02MB',), (_ped.UNIT_GIGABYTE, '0.00GB',), (_ped.UNIT_TERABYTE, '0.00TB',), (_ped.UNIT_COMPACT, '24.1kB',), (_ped.UNIT_CYLINDER, '0cyl',), (_ped.UNIT_CHS, '0,1,15',), (_ped.UNIT_PERCENT, pr[:4] + "%",), (_ped.UNIT_KIBIBYTE, '23.5kiB',), (_ped.UNIT_MEBIBYTE, '0.02MiB',), (_ped.UNIT_GIBIBYTE, '0.00GiB',), (_ped.UNIT_TEBIBYTE, '0.00TiB',)] def runTest(self): for (unit, expected) in self.pairs: result = self._device.unit_format_custom(47, unit) self.assertEquals(result, expected) class UnitFormatTestCase(RequiresDevice): def setUp(self): RequiresDevice.setUp(self) sector_size = self._device.sector_size size = self._device.unit_get_size(_ped.UNIT_PERCENT) pr = "%f" % ((47.0 * sector_size) / size,) self._initialDefault = _ped.unit_get_default() self.pairs = [(_ped.UNIT_SECTOR, '47s',), (_ped.UNIT_BYTE, '24064B',), (_ped.UNIT_KILOBYTE, '24.1kB',), (_ped.UNIT_MEGABYTE, '0.02MB',), (_ped.UNIT_GIGABYTE, '0.00GB',), (_ped.UNIT_TERABYTE, '0.00TB',), (_ped.UNIT_COMPACT, '24.1kB',), (_ped.UNIT_CYLINDER, '0cyl',), (_ped.UNIT_CHS, '0,1,15',), (_ped.UNIT_PERCENT, pr[:4] + "%",), (_ped.UNIT_KIBIBYTE, '23.5kiB',), (_ped.UNIT_MEBIBYTE, '0.02MiB',), (_ped.UNIT_GIBIBYTE, '0.00GiB',), (_ped.UNIT_TEBIBYTE, '0.00TiB',)] def runTest(self): for (unit, expected) in self.pairs: _ped.unit_set_default(unit) result = self._device.unit_format(47) self.assertEquals(result, expected) def tearDown(self): _ped.unit_set_default(self._initialDefault) @unittest.skip("Unimplemented test case.") class UnitParseTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") @unittest.skip("Unimplemented test case.") class UnitParseCustomTestCase(unittest.TestCase): # TODO def runTest(self): self.fail("Unimplemented test case.") class DeviceStrTestCase(RequiresDevice): def runTest(self): expected = "_ped.Device instance --\n model: %s path: %s type: %d\n sector_size: %d phys_sector_size: %d\n length: %d open_count: %d read_only: %d\n external_mode: %d dirty: %d boot_dirty: %d\n host: %d did: %d\n hw_geom: %s bios_geom: %s" % (self._device.model, self._device.path, self._device.type, self._device.sector_size, self._device.phys_sector_size, self._device.length, self._device.open_count, self._device.read_only, self._device.external_mode, self._device.dirty, self._device.boot_dirty, self._device.host, self._device.did, repr(self._device.hw_geom), repr(self._device.bios_geom),) self.assertEquals(str(self._device), expected) # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(DeviceNewTestCase()) suite.addTest(DeviceGetSetTestCase()) suite.addTest(DeviceIsBusyTestCase()) suite.addTest(DeviceOpenTestCase()) suite.addTest(DeviceCloseTestCase()) suite.addTest(DeviceDestroyTestCase()) suite.addTest(DeviceCacheRemoveTestCase()) suite.addTest(DeviceBeginExternalAccessTestCase()) suite.addTest(DeviceEndExternalAccessTestCase()) suite.addTest(DeviceReadTestCase()) suite.addTest(DeviceWriteTestCase()) suite.addTest(DeviceSyncTestCase()) suite.addTest(DeviceSyncFastTestCase()) suite.addTest(DeviceCheckTestCase()) suite.addTest(DeviceGetConstraintTestCase()) suite.addTest(DeviceGetMinimalAlignedConstraintTestCase()) suite.addTest(DeviceGetOptimalAlignedConstraintTestCase()) suite.addTest(DeviceGetMinimumAlignmentTestCase()) suite.addTest(DeviceGetOptimumAlignmentTestCase()) suite.addTest(UnitFormatCustomByteTestCase()) suite.addTest(UnitFormatByteTestCase()) suite.addTest(UnitFormatCustomTestCase()) suite.addTest(UnitFormatTestCase()) suite.addTest(UnitParseTestCase()) suite.addTest(UnitParseCustomTestCase()) suite.addTest(DeviceStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/_ped/test_disktype.py0000775000076400007640000001126611540274274015502 00000000000000# # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import _ped import unittest from baseclass import * # One class per method, multiple tests per class. For these simple methods, # that seems like good organization. More complicated methods may require # multiple classes and their own test suite. class DiskTypeNewTestCase(unittest.TestCase): def runTest(self): # You're not allowed to create a new DiskType object by hand. self.assertRaises(TypeError, _ped.DiskType) class DiskTypeGetSetTestCase(RequiresDiskTypes): def runTest(self): # All attributes are read-only. for name in self.disktype.keys(): t = self.disktype[name] self.assertRaises(AttributeError, setattr, t, "name", "fakename") self.assertRaises(AttributeError, setattr, t, "features", 47) self.assertTrue(isinstance(t.name, str)) self.assertEquals(t.name, name) self.assertTrue(isinstance(t.features, long)) class DiskTypeCheckFeatureTestCase(RequiresDiskTypes): def runTest(self): # The following types have no features [that libparted supports] for name in ['aix', 'sun', 'bsd', 'loop']: self.assertFalse(self.disktype[name].check_feature(_ped.DISK_TYPE_EXTENDED)) self.assertFalse(self.disktype[name].check_feature(_ped.DISK_TYPE_PARTITION_NAME)) # The following types support DISK_TYPE_EXTENDED for name in ['msdos']: self.assertTrue(self.disktype[name].check_feature(_ped.DISK_TYPE_EXTENDED)) self.assertFalse(self.disktype[name].check_feature(_ped.DISK_TYPE_PARTITION_NAME)) # The following types support DISK_TYPE_PARTITION_NAME for name in ['amiga', 'gpt', 'mac', 'pc98']: self.assertFalse(self.disktype[name].check_feature(_ped.DISK_TYPE_EXTENDED)) self.assertTrue(self.disktype[name].check_feature(_ped.DISK_TYPE_PARTITION_NAME)) # The following types support all features for name in ['dvh']: self.assertTrue(self.disktype[name].check_feature(_ped.DISK_TYPE_EXTENDED)) self.assertTrue(self.disktype[name].check_feature(_ped.DISK_TYPE_PARTITION_NAME)) class DiskTypeStrTestCase(RequiresDiskTypes): def runTest(self): self.assertEquals(str(self.disktype['msdos']), '_ped.DiskType instance --\n name: msdos features: 1') self.assertEquals(str(self.disktype['aix']), '_ped.DiskType instance --\n name: aix features: 0') self.assertEquals(str(self.disktype['sun']), '_ped.DiskType instance --\n name: sun features: 0') self.assertEquals(str(self.disktype['amiga']), '_ped.DiskType instance --\n name: amiga features: 2') self.assertEquals(str(self.disktype['gpt']), '_ped.DiskType instance --\n name: gpt features: 2') self.assertEquals(str(self.disktype['mac']), '_ped.DiskType instance --\n name: mac features: 2') self.assertEquals(str(self.disktype['bsd']), '_ped.DiskType instance --\n name: bsd features: 0') self.assertEquals(str(self.disktype['pc98']), '_ped.DiskType instance --\n name: pc98 features: 2') self.assertEquals(str(self.disktype['loop']), '_ped.DiskType instance --\n name: loop features: 0') self.assertEquals(str(self.disktype['dvh']), '_ped.DiskType instance --\n name: dvh features: 3') # And then a suite to hold all the test cases for this module. def suite(): suite = unittest.TestSuite() suite.addTest(DiskTypeNewTestCase()) suite.addTest(DiskTypeGetSetTestCase()) suite.addTest(DiskTypeCheckFeatureTestCase()) suite.addTest(DiskTypeStrTestCase()) return suite s = suite() if __name__ == "__main__": unittest.main(defaultTest='s', verbosity=2) pyparted-3.6/tests/Makefile.am0000664000076400007640000000221711151317256013347 00000000000000# # Makefile.am for pyparted # # Copyright (C) 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # SUBDIRS = _ped parted MAINTAINERCLEANFILES = Makefile.in pyparted-3.6/Makefile.am0000664000076400007640000000630211313251731012177 00000000000000# # Makefile.am for pyparted # # Copyright (C) 2007, 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # ACLOCAL_AMFLAGS = -I m4 SUBDIRS = include src tests EXTRA_DIST = AUTHORS BUGS COPYING NEWS README TODO ChangeLog MAINTAINERCLEANFILES = Makefile.in config.guess config.h.in config.sub \ depcomp install-sh ltmain.sh missing ABOUT-NLS \ INSTALL aclocal.m4 configure CLEANFILES = *~ ChangeLog MOSTLYCLEANDIRS = m4 dist-hook: rm -rf `find $(distdir) -type f -name .gitignore` ChangeLog: (GIT_DIR=.git git log > .changelog.tmp && mv .changelog.tmp ChangeLog; rm -f .changelog.tmp) || (touch ChangeLog; echo 'git directory not found: installing possibly empty ChangeLog.' >&2) pychecker: all $(MAKE) -C src/parted pychecker tag: dist-gzip @if [ -z "$(GPGKEY)" ]; then \ echo "GPGKEY environment variable missing, please set this to the key ID" ; \ echo "you want to use to tag the repository." ; \ exit 1 ; \ fi @git tag -u $(GPGKEY) -m "Tag as $(PACKAGE)-$(VERSION)" -f $(PACKAGE)-$(VERSION) @echo "Tagged as $(PACKAGE)-$(VERSION) (GPG signed)" bumpver: @NEWSUBVER=$$((`echo $(PACKAGE_VERSION) |cut -d . -f 2` + 1)) ; \ NEWVERSION=`echo $(PACKAGE_VERSION).$$NEWSUBVER |cut -d . -f 1,3` ; \ sed -i "s/AC_INIT(\[$(PACKAGE_NAME)\], \[$(PACKAGE_VERSION)\], \[$(PACKAGE_BUGREPORT\])/AC_INIT(\[$(PACKAGE_NAME)\], \[$$NEWVERSION\], \[$(PACKAGE_BUGREPORT\])/" configure.ac release: tag rm -rf $(PACKAGE)-$(VERSION) gzip -dc $(PACKAGE)-$(VERSION).tar.gz | tar -xvf - ( cd $(PACKAGE)-$(VERSION) && ./configure && make ) || exit 1 @echo @echo "$(PACKAGE)-$(VERSION).tar.gz is now ready to upload." @echo "Do not forget to push changes to the repository with:" @echo " git push" @echo " git push --tags" @echo @echo "Do not forget to add a new Version entry on the Trac site:" @echo " https://fedorahosted.org/pyparted/admin/ticket/versions" @echo rpmlog: @prevtag="$$(git tag -l | grep -v "^start$$" | tail -n 2 | head -n 1)" ; \ git log --pretty="format:- %s (%ae)" $${prevtag}.. | \ sed -e 's/@.*)/)/' | \ sed -e 's/%/%%/g' | \ grep -v "New version" | \ fold -s -w 77 | \ while read line ; do \ if [ ! "$$(echo $$line | cut -c-2)" = "- " ]; then \ echo " $$line" ; \ else \ echo "$$line" ; \ fi ; \ done pyparted-3.6/src/0000775000076400007640000000000011542323614011015 500000000000000pyparted-3.6/src/pynatmath.c0000664000076400007640000002273211170723402013110 00000000000000/* * pynatmath.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pydevice.h" #include "pynatmath.h" #include "docstrings/pynatmath.h" #include "typeobjects/pynatmath.h" /* _ped.Alignment functions */ void _ped_Alignment_dealloc(_ped_Alignment *self) { PyObject_GC_UnTrack(self); PyObject_GC_Del(self); } int _ped_Alignment_compare(_ped_Alignment *self, PyObject *obj) { _ped_Alignment *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Alignment_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Alignment"); return -1; } comp = (_ped_Alignment *) obj; if ((self->offset == comp->offset) && (self->grain_size == comp->grain_size)) { return 0; } else { return 1; } } PyObject *_ped_Alignment_richcompare(_ped_Alignment *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Alignment_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Alignment_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Alignment"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Alignment_str(_ped_Alignment *self) { char *ret = NULL; if (asprintf(&ret, "_ped.Alignment instance --\n" " offset: %lld grain_size: %lld", self->offset, self->grain_size) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Alignment_traverse(_ped_Alignment *self, visitproc visit, void *arg) { return 0; } int _ped_Alignment_clear(_ped_Alignment *self) { return 0; } int _ped_Alignment_init(_ped_Alignment *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"offset", "grain_size", NULL}; PedAlignment *alignment = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "LL", kwlist, &self->offset, &self->grain_size)) { return -1; } else { alignment = ped_alignment_new(self->offset, self->grain_size); if (!alignment) { PyErr_SetString(CreateException, "Could not create new alignment"); return -1; } self->offset = alignment->offset; self->grain_size = alignment->grain_size; ped_alignment_destroy(alignment); return 0; } } PyObject *_ped_Alignment_get(_ped_Alignment *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Alignment()"); return NULL; } if (!strcmp(member, "offset")) { return PyLong_FromLongLong(self->offset); } else if (!strcmp(member, "grain_size")) { return PyLong_FromLongLong(self->grain_size); } else { PyErr_Format(PyExc_AttributeError, "_ped.Alignment object has no attribute %s", member); return NULL; } } int _ped_Alignment_set(_ped_Alignment *self, PyObject *value, void *closure) { char *member = (char *) closure; if (member == NULL) { return -1; } if (!strcmp(member, "offset")) { self->offset = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } } else if (!strcmp(member, "grain_size")) { self->grain_size = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } } else { return -1; } return 0; } /* 1:1 function mappings for natmath.h in libparted */ PyObject *py_ped_alignment_duplicate(PyObject *s, PyObject *args) { PedAlignment *alignment = NULL, *align = NULL; _ped_Alignment *ret = NULL; alignment = _ped_Alignment2PedAlignment(s); if (alignment == NULL) { return NULL; } align = ped_alignment_duplicate(alignment); ped_alignment_destroy(alignment); if (align) { ret = PedAlignment2_ped_Alignment(align); } else { PyErr_SetString(CreateException, "Could not duplicate alignment"); return NULL; } ped_alignment_destroy(align); return (PyObject *) ret; } PyObject *py_ped_alignment_intersect(PyObject *s, PyObject *args) { PyObject *in_b = NULL; PedAlignment *out_a = NULL, *out_b = NULL, *align = NULL; _ped_Alignment *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Alignment_Type_obj, &in_b)) { return NULL; } out_a = _ped_Alignment2PedAlignment(s); if (out_a == NULL) { return NULL; } out_b = _ped_Alignment2PedAlignment(in_b); if (out_b == NULL) { return NULL; } align = ped_alignment_intersect(out_a, out_b); ped_alignment_destroy(out_a); ped_alignment_destroy(out_b); if (align) { ret = PedAlignment2_ped_Alignment(align); } else { PyErr_SetString(PyExc_ArithmeticError, "Could not find alignment intersection"); return NULL; } ped_alignment_destroy(align); return (PyObject *) ret; } PyObject *py_ped_alignment_align_up(PyObject *s, PyObject *args) { PyObject *in_geom = NULL; PedAlignment *align = NULL; PedGeometry *out_geom = NULL; PedSector sector, ret; if (!PyArg_ParseTuple(args, "O!L", &_ped_Geometry_Type_obj, &in_geom, §or)) { return NULL; } align = _ped_Alignment2PedAlignment(s); if (align == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_alignment_align_up(align, out_geom, sector); ped_alignment_destroy(align); if (ret == -1) { PyErr_SetString(PyExc_ArithmeticError, "Could not align up to sector"); return NULL; } return PyLong_FromLongLong(ret); } PyObject *py_ped_alignment_align_down(PyObject *s, PyObject *args) { PyObject *in_geom = NULL; PedAlignment *align = NULL; PedGeometry *out_geom = NULL; PedSector sector, ret; if (!PyArg_ParseTuple(args, "O!L", &_ped_Geometry_Type_obj, &in_geom, §or)) { return NULL; } align = _ped_Alignment2PedAlignment(s); if (align == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_alignment_align_down(align, out_geom, sector); ped_alignment_destroy(align); if (ret == -1) { PyErr_SetString(PyExc_ArithmeticError, "Could not align down to sector"); return NULL; } return PyLong_FromLongLong(ret); } PyObject *py_ped_alignment_align_nearest(PyObject *s, PyObject *args) { PyObject *in_geom = NULL; PedAlignment *align = NULL; PedGeometry *out_geom = NULL; PedSector sector, ret; if (!PyArg_ParseTuple(args, "O!L", &_ped_Geometry_Type_obj, &in_geom, §or)) { return NULL; } align = _ped_Alignment2PedAlignment(s); if (align == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_alignment_align_nearest(align, out_geom, sector); ped_alignment_destroy(align); if (ret == -1) { PyErr_SetString(PyExc_ArithmeticError, "Could not align to closest sector"); return NULL; } return PyLong_FromLongLong(ret); } PyObject *py_ped_alignment_is_aligned(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_geom = NULL; PedAlignment *align = NULL; PedGeometry *out_geom = NULL; PedSector sector; if (!PyArg_ParseTuple(args, "O!L", &_ped_Geometry_Type_obj, &in_geom, §or)) { return NULL; } align = _ped_Alignment2PedAlignment(s); if (align == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_alignment_is_aligned(align, out_geom, sector); ped_alignment_destroy(align); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/pydevice.c0000664000076400007640000006274411313012376012722 00000000000000/* * pydevice.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pyconstraint.h" #include "pydevice.h" #include "docstrings/pydevice.h" #include "typeobjects/pydevice.h" /* _ped.CHSGeometry functions */ void _ped_CHSGeometry_dealloc(_ped_CHSGeometry *self) { PyObject_GC_UnTrack(self); PyObject_GC_Del(self); } int _ped_CHSGeometry_compare(_ped_CHSGeometry *self, PyObject *obj) { _ped_CHSGeometry *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_CHSGeometry_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.CHSGeometry"); return -1; } comp = (_ped_CHSGeometry *) obj; if ((self->cylinders == comp->cylinders) && (self->heads == comp->heads) && (self->sectors == comp->sectors)) { return 0; } else { return 1; } } PyObject *_ped_CHSGeometry_richcompare(_ped_CHSGeometry *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_CHSGeometry_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_CHSGeometry_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.CHSGeometry"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_CHSGeometry_str(_ped_CHSGeometry *self) { char *ret = NULL; if (asprintf(&ret, "_ped.CHSGeometry instance --\n" " cylinders: %d heads: %d sectors: %d", self->cylinders, self->heads, self->sectors) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_CHSGeometry_traverse(_ped_CHSGeometry *self, visitproc visit, void *arg) { return 0; } int _ped_CHSGeometry_clear(_ped_CHSGeometry *self) { return 0; } PyObject *_ped_CHSGeometry_get(_ped_CHSGeometry *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.CHSGeometry()"); return NULL; } if (!strcmp(member, "cylinders")) { return Py_BuildValue("i", self->cylinders); } else if (!strcmp(member, "heads")) { return Py_BuildValue("i", self->heads); } else if (!strcmp(member, "sectors")) { return Py_BuildValue("i", self->sectors); } else { PyErr_Format(PyExc_AttributeError, "_ped.CHSGeometry object has no attribute %s", member); return NULL; } } /* _ped.Device functions */ void _ped_Device_dealloc(_ped_Device *self) { PyObject_GC_UnTrack(self); free(self->model); free(self->path); Py_CLEAR(self->hw_geom); self->hw_geom = NULL; Py_CLEAR(self->bios_geom); self->bios_geom = NULL; PyObject_GC_Del(self); } int _ped_Device_compare(_ped_Device *self, PyObject *obj) { _ped_Device *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Device_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Device"); return -1; } comp = (_ped_Device *) obj; if ((!strcmp(self->model, comp->model)) && (!strcmp(self->path, comp->path)) && (self->type == comp->type) && (self->sector_size == comp->sector_size) && (self->phys_sector_size == comp->phys_sector_size) && (self->length == comp->length) && (self->open_count == comp->open_count) && (self->read_only == comp->read_only) && (self->external_mode == comp->external_mode) && (self->dirty == comp->dirty) && (self->boot_dirty == comp->dirty) && (_ped_CHSGeometry_Type_obj.tp_richcompare(self->hw_geom, comp->hw_geom, Py_EQ)) && (_ped_CHSGeometry_Type_obj.tp_richcompare(self->bios_geom, comp->bios_geom, Py_EQ)) && (self->host == comp->host) && (self->did == comp->did)) { return 0; } else { return 1; } } PyObject *_ped_Device_richcompare(_ped_Device *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Device_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Device_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Device"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Device_str(_ped_Device *self) { char *ret = NULL; char *hw_geom = NULL, *bios_geom = NULL; hw_geom = PyString_AsString(_ped_CHSGeometry_Type_obj.tp_repr(self->hw_geom)); if (hw_geom == NULL) { return NULL; } bios_geom = PyString_AsString(_ped_CHSGeometry_Type_obj.tp_repr(self->bios_geom)); if (bios_geom == NULL) { return NULL; } if (asprintf(&ret, "_ped.Device instance --\n" " model: %s path: %s type: %lld\n" " sector_size: %lld phys_sector_size: %lld\n" " length: %lld open_count: %d read_only: %d\n" " external_mode: %d dirty: %d boot_dirty: %d\n" " host: %hd did: %hd\n" " hw_geom: %s bios_geom: %s", self->model, self->path, self->type, self->sector_size, self->phys_sector_size, self->length, self->open_count, self->read_only, self->external_mode, self->dirty, self->boot_dirty, self->host, self->did, hw_geom, bios_geom) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Device_traverse(_ped_Device *self, visitproc visit, void *arg) { int err; if (self->hw_geom) { if ((err = visit(self->hw_geom, arg))) { return err; } } if (self->bios_geom) { if ((err = visit(self->bios_geom, arg))) { return err; } } return 0; } int _ped_Device_clear(_ped_Device *self) { Py_CLEAR(self->hw_geom); self->hw_geom = NULL; Py_CLEAR(self->bios_geom); self->bios_geom = NULL; return 0; } PyObject *_ped_Device_get(_ped_Device *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Device()"); return NULL; } if (!strcmp(member, "model")) { if (self->model != NULL) return PyString_FromString(self->model); else return PyString_FromString(""); } else if (!strcmp(member, "path")) { if (self->path != NULL) return PyString_FromString(self->path); else return PyString_FromString(""); } else if (!strcmp(member, "type")) { return PyLong_FromLongLong(self->type); } else if (!strcmp(member, "sector_size")) { return PyLong_FromLongLong(self->sector_size); } else if (!strcmp(member, "phys_sector_size")) { return PyLong_FromLongLong(self->phys_sector_size); } else if (!strcmp(member, "length")) { return PyLong_FromLongLong(self->length); } else if (!strcmp(member, "open_count")) { return Py_BuildValue("i", self->open_count); } else if (!strcmp(member, "read_only")) { return Py_BuildValue("i", self->read_only); } else if (!strcmp(member, "external_mode")) { return Py_BuildValue("i", self->external_mode); } else if (!strcmp(member, "dirty")) { return Py_BuildValue("i", self->dirty); } else if (!strcmp(member, "boot_dirty")) { return Py_BuildValue("i", self->boot_dirty); } else if (!strcmp(member, "host")) { return Py_BuildValue("h", self->host); } else if (!strcmp(member, "did")) { return Py_BuildValue("h", self->did); } else { PyErr_Format(PyExc_AttributeError, "_ped.Device object has no attribute %s", member); return NULL; } } /* * Returns the _ped.DiskType for the specified _ped.Device. * Even though this function is part of pydisk.c, it's a method * on _ped.Device since it operates on _ped.Device objects and * not on _ped.Disk objects. */ PyObject *py_ped_disk_probe(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedDiskType *type = NULL; _ped_DiskType *ret = NULL; device = _ped_Device2PedDevice(s); if (device) { type = ped_disk_probe(device); if (type == NULL) { PyErr_Format(IOException, "Could not probe device %s", device->path); return NULL; } ret = PedDiskType2_ped_DiskType(type); if (ret == NULL) { return NULL; } } return (PyObject *) ret; } /* 1:1 function mappings for device.h in libparted */ PyObject *py_ped_device_probe_all(PyObject *s, PyObject *args) { ped_device_probe_all(); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_device_free_all(PyObject *s, PyObject *args) { ped_device_free_all(); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_device_get(PyObject *s, PyObject *args) { PedDevice *device = NULL; _ped_Device *ret = NULL; char *path = NULL; if (!PyArg_ParseTuple(args, "z", &path)) { return NULL; } if (path == NULL) { PyErr_Format(DeviceException, "Could not find device for empty path"); return NULL; } device = ped_device_get(path); if (device) { ret = PedDevice2_ped_Device(device); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DeviceException, "Could not find device for path %s", path); return NULL; } return (PyObject *) ret; } PyObject *py_ped_device_get_next(PyObject *s, PyObject *args) { PyObject *in_device = NULL; PedDevice *cur = NULL, *next = NULL; _ped_Device *ret = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_Device_Type_obj, &in_device)) { return NULL; } if (in_device) { cur = _ped_Device2PedDevice(in_device); if (!cur) { return NULL; } } next = ped_device_get_next(cur); if (next) { ret = PedDevice2_ped_Device(next); return (PyObject *) ret; } else { PyErr_SetNone(PyExc_IndexError); return NULL; } } PyObject *py_ped_device_is_busy(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } ret = ped_device_is_busy(device); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_open(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_open(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not open device %s", device->path); return NULL; } ((_ped_Device *) s)->open_count = device->open_count; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_close(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_close(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not close device %s", device->path); return NULL; } ((_ped_Device *) s)->open_count = device->open_count; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_destroy(PyObject *s, PyObject *args) { _ped_Device *dev = (_ped_Device *) s; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } ped_device_destroy(device); Py_CLEAR(dev->hw_geom); dev->hw_geom = NULL; Py_CLEAR(dev->bios_geom); dev->bios_geom = NULL; Py_CLEAR(dev); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_device_cache_remove(PyObject *s, PyObject *args) { PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } ped_device_cache_remove(device); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_device_begin_external_access(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_begin_external_access(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not begin external access mode on device %s", device->path); return NULL; } ((_ped_Device *) s)->external_mode = device->external_mode; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_end_external_access(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->external_mode) { PyErr_Format(IOException, "Device %s is not open for external access.", device->path); return NULL; } ret = ped_device_end_external_access(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not end external access mode on device %s", device->path); return NULL; } ((_ped_Device *) s)->external_mode = device->external_mode; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_read(PyObject *s, PyObject *args) { PyObject *ret = NULL; PedSector start, count; PedDevice *device = NULL; char *out_buf = NULL; if (!PyArg_ParseTuple(args, "LL", &start, &count)) { return NULL; } device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } if ((out_buf = malloc(device->sector_size * count)) == NULL) { return PyErr_NoMemory(); } if (ped_device_read(device, out_buf, start, count) == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not read from device %s", device->path); free(out_buf); return NULL; } ret = PyString_FromString(out_buf); free(out_buf); return ret; } PyObject *py_ped_device_write(PyObject *s, PyObject *args) { PyObject *in_buf = NULL; PedSector start, count, ret; PedDevice *device = NULL; void *out_buf = NULL; if (!PyArg_ParseTuple(args, "OLL", &in_buf, &start, &count)) { return NULL; } device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } out_buf = PyCObject_AsVoidPtr(in_buf); if (out_buf == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_write(device, out_buf, start, count); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not write to device %s", device->path); return NULL; } return PyLong_FromLongLong(ret); } PyObject *py_ped_device_sync(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_sync(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not sync device %s", device->path); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_sync_fast(PyObject *s, PyObject *args) { int ret = -1; PedDevice *device = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } ret = ped_device_sync_fast(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(IOException, "Could not sync device %s", device->path); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_device_check(PyObject *s, PyObject *args) { PedSector start, count, ret; PedDevice *device = NULL; char *out_buf = NULL; if (!PyArg_ParseTuple(args, "LL", &start, &count)) { return NULL; } device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } if (!device->open_count) { PyErr_Format(IOException, "Device %s is not open.", device->path); return NULL; } if (device->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", device->path); return NULL; } if ((out_buf = malloc(device->sector_size * 32)) == NULL) { return PyErr_NoMemory(); } ret = ped_device_check(device, out_buf, start, count); free(out_buf); return PyLong_FromLongLong(ret); } PyObject *py_ped_device_get_constraint(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } constraint = ped_device_get_constraint(device); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create constraint"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_device_get_minimal_aligned_constraint(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } constraint = ped_device_get_minimal_aligned_constraint(device); if (!constraint) { PyErr_SetString(CreateException, "Could not create constraint"); return NULL; } ret = PedConstraint2_ped_Constraint(constraint); ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_device_get_optimal_aligned_constraint(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } constraint = ped_device_get_optimal_aligned_constraint(device); if (!constraint) { PyErr_SetString(CreateException, "Could not create constraint"); return NULL; } ret = PedConstraint2_ped_Constraint(constraint); ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_device_get_minimum_alignment(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedAlignment *alignment = NULL; _ped_Alignment *ret = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } alignment = ped_device_get_minimum_alignment(device); if (!alignment) { PyErr_SetString(CreateException, "Could not get alignment for device"); return NULL; } ret = PedAlignment2_ped_Alignment(alignment); ped_alignment_destroy(alignment); return (PyObject *) ret; } PyObject *py_ped_device_get_optimum_alignment(PyObject *s, PyObject *args) { PedDevice *device = NULL; PedAlignment *alignment = NULL; _ped_Alignment *ret = NULL; device = _ped_Device2PedDevice(s); if (device == NULL) { return NULL; } alignment = ped_device_get_optimum_alignment(device); if (!alignment) { PyErr_SetString(CreateException, "Could not get alignment for device"); return NULL; } ret = PedAlignment2_ped_Alignment(alignment); ped_alignment_destroy(alignment); return (PyObject *) ret; } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/Makefile.in0000664000076400007640000010502611542323606013007 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted src subdirectory # # Copyright (C) 2007, 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(pkgpyexecdir)" LTLIBRARIES = $(pkgpyexec_LTLIBRARIES) am__DEPENDENCIES_1 = _pedmodule_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) am__pedmodule_la_OBJECTS = _pedmodule_la-convert.lo \ _pedmodule_la-_pedmodule.lo _pedmodule_la-pyconstraint.lo \ _pedmodule_la-pydevice.lo _pedmodule_la-pydisk.lo \ _pedmodule_la-pyfilesys.lo _pedmodule_la-pygeom.lo \ _pedmodule_la-pynatmath.lo _pedmodule_la-pytimer.lo \ _pedmodule_la-pyunit.lo _pedmodule_la_OBJECTS = $(am__pedmodule_la_OBJECTS) _pedmodule_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(_pedmodule_la_CFLAGS) \ $(CFLAGS) $(_pedmodule_la_LDFLAGS) $(LDFLAGS) -o $@ DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(_pedmodule_la_SOURCES) DIST_SOURCES = $(_pedmodule_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ -Werror -Wmissing-prototypes -fno-strict-aliasing CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ # install Python module code directly in to site-packages pkgpyexecdir = $(pyexecdir) pkgpythondir = $(pythondir) prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = parted INCLUDES = -I$(top_srcdir)/include $(PYTHON_INCLUDES) $(LIBPARTED_INCLUDES) # libparted binding pkgpyexec_LTLIBRARIES = _pedmodule.la _pedmodule_la_CFLAGS = $(LIBPARTED_CFLAGS) _pedmodule_la_LDFLAGS = -module -avoid-version $(PYTHON_LDFLAGS) \ $(LIBPARTED_LDFLAGS) _pedmodule_la_LIBADD = $(PYTHON_LIBS) $(LIBPARTED_LIBS) _pedmodule_la_SOURCES = convert.c _pedmodule.c pyconstraint.c pydevice.c \ pydisk.c pyfilesys.c pygeom.c pynatmath.c pytimer.c \ pyunit.c MAINTAINERCLEANFILES = Makefile.in all: all-recursive .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkgpyexecLTLIBRARIES: $(pkgpyexec_LTLIBRARIES) @$(NORMAL_INSTALL) test -z "$(pkgpyexecdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgpyexecdir)" @list='$(pkgpyexec_LTLIBRARIES)'; test -n "$(pkgpyexecdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkgpyexecdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkgpyexecdir)"; \ } uninstall-pkgpyexecLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkgpyexec_LTLIBRARIES)'; test -n "$(pkgpyexecdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkgpyexecdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkgpyexecdir)/$$f"; \ done clean-pkgpyexecLTLIBRARIES: -test -z "$(pkgpyexec_LTLIBRARIES)" || rm -f $(pkgpyexec_LTLIBRARIES) @list='$(pkgpyexec_LTLIBRARIES)'; for p in $$list; do \ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ test "$$dir" != "$$p" || dir=.; \ echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done _pedmodule.la: $(_pedmodule_la_OBJECTS) $(_pedmodule_la_DEPENDENCIES) $(_pedmodule_la_LINK) -rpath $(pkgpyexecdir) $(_pedmodule_la_OBJECTS) $(_pedmodule_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-_pedmodule.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-convert.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pyconstraint.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pydevice.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pydisk.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pyfilesys.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pygeom.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pynatmath.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pytimer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/_pedmodule_la-pyunit.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c $< .c.obj: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< _pedmodule_la-convert.lo: convert.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-convert.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-convert.Tpo -c -o _pedmodule_la-convert.lo `test -f 'convert.c' || echo '$(srcdir)/'`convert.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-convert.Tpo $(DEPDIR)/_pedmodule_la-convert.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='convert.c' object='_pedmodule_la-convert.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-convert.lo `test -f 'convert.c' || echo '$(srcdir)/'`convert.c _pedmodule_la-_pedmodule.lo: _pedmodule.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-_pedmodule.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-_pedmodule.Tpo -c -o _pedmodule_la-_pedmodule.lo `test -f '_pedmodule.c' || echo '$(srcdir)/'`_pedmodule.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-_pedmodule.Tpo $(DEPDIR)/_pedmodule_la-_pedmodule.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='_pedmodule.c' object='_pedmodule_la-_pedmodule.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-_pedmodule.lo `test -f '_pedmodule.c' || echo '$(srcdir)/'`_pedmodule.c _pedmodule_la-pyconstraint.lo: pyconstraint.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pyconstraint.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pyconstraint.Tpo -c -o _pedmodule_la-pyconstraint.lo `test -f 'pyconstraint.c' || echo '$(srcdir)/'`pyconstraint.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pyconstraint.Tpo $(DEPDIR)/_pedmodule_la-pyconstraint.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pyconstraint.c' object='_pedmodule_la-pyconstraint.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pyconstraint.lo `test -f 'pyconstraint.c' || echo '$(srcdir)/'`pyconstraint.c _pedmodule_la-pydevice.lo: pydevice.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pydevice.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pydevice.Tpo -c -o _pedmodule_la-pydevice.lo `test -f 'pydevice.c' || echo '$(srcdir)/'`pydevice.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pydevice.Tpo $(DEPDIR)/_pedmodule_la-pydevice.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pydevice.c' object='_pedmodule_la-pydevice.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pydevice.lo `test -f 'pydevice.c' || echo '$(srcdir)/'`pydevice.c _pedmodule_la-pydisk.lo: pydisk.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pydisk.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pydisk.Tpo -c -o _pedmodule_la-pydisk.lo `test -f 'pydisk.c' || echo '$(srcdir)/'`pydisk.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pydisk.Tpo $(DEPDIR)/_pedmodule_la-pydisk.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pydisk.c' object='_pedmodule_la-pydisk.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pydisk.lo `test -f 'pydisk.c' || echo '$(srcdir)/'`pydisk.c _pedmodule_la-pyfilesys.lo: pyfilesys.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pyfilesys.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pyfilesys.Tpo -c -o _pedmodule_la-pyfilesys.lo `test -f 'pyfilesys.c' || echo '$(srcdir)/'`pyfilesys.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pyfilesys.Tpo $(DEPDIR)/_pedmodule_la-pyfilesys.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pyfilesys.c' object='_pedmodule_la-pyfilesys.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pyfilesys.lo `test -f 'pyfilesys.c' || echo '$(srcdir)/'`pyfilesys.c _pedmodule_la-pygeom.lo: pygeom.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pygeom.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pygeom.Tpo -c -o _pedmodule_la-pygeom.lo `test -f 'pygeom.c' || echo '$(srcdir)/'`pygeom.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pygeom.Tpo $(DEPDIR)/_pedmodule_la-pygeom.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pygeom.c' object='_pedmodule_la-pygeom.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pygeom.lo `test -f 'pygeom.c' || echo '$(srcdir)/'`pygeom.c _pedmodule_la-pynatmath.lo: pynatmath.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pynatmath.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pynatmath.Tpo -c -o _pedmodule_la-pynatmath.lo `test -f 'pynatmath.c' || echo '$(srcdir)/'`pynatmath.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pynatmath.Tpo $(DEPDIR)/_pedmodule_la-pynatmath.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pynatmath.c' object='_pedmodule_la-pynatmath.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pynatmath.lo `test -f 'pynatmath.c' || echo '$(srcdir)/'`pynatmath.c _pedmodule_la-pytimer.lo: pytimer.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pytimer.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pytimer.Tpo -c -o _pedmodule_la-pytimer.lo `test -f 'pytimer.c' || echo '$(srcdir)/'`pytimer.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pytimer.Tpo $(DEPDIR)/_pedmodule_la-pytimer.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pytimer.c' object='_pedmodule_la-pytimer.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pytimer.lo `test -f 'pytimer.c' || echo '$(srcdir)/'`pytimer.c _pedmodule_la-pyunit.lo: pyunit.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -MT _pedmodule_la-pyunit.lo -MD -MP -MF $(DEPDIR)/_pedmodule_la-pyunit.Tpo -c -o _pedmodule_la-pyunit.lo `test -f 'pyunit.c' || echo '$(srcdir)/'`pyunit.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/_pedmodule_la-pyunit.Tpo $(DEPDIR)/_pedmodule_la-pyunit.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pyunit.c' object='_pedmodule_la-pyunit.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(_pedmodule_la_CFLAGS) $(CFLAGS) -c -o _pedmodule_la-pyunit.lo `test -f 'pyunit.c' || echo '$(srcdir)/'`pyunit.c mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgpyexecdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkgpyexecLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkgpyexecLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkgpyexecLTLIBRARIES .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ clean-pkgpyexecLTLIBRARIES ctags ctags-recursive distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkgpyexecLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-pkgpyexecLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/src/parted/0000775000076400007640000000000011542323614012274 500000000000000pyparted-3.6/src/parted/decorators.py0000664000076400007640000000266111535516731014746 00000000000000# # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Peter Jones # import locale import functools def localeC(fn): @functools.wraps(fn) def new(*args, **kwds): oldlocale = locale.getlocale(locale.LC_MESSAGES) locale.setlocale(locale.LC_MESSAGES, 'C') try: ret = fn(*args, **kwds) finally: locale.setlocale(locale.LC_MESSAGES, oldlocale) return ret return new pyparted-3.6/src/parted/Makefile.in0000664000076400007640000003335511542323606014273 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted src subdirectory # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/parted DIST_COMMON = $(parted_PYTHON) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__installdirs = "$(DESTDIR)$(parteddir)" py_compile = $(top_srcdir)/py-compile DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ parteddir = $(pyexecdir)/parted parted_PYTHON = *.py MAINTAINERCLEANFILES = Makefile.in *.pyc all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/parted/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/parted/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-partedPYTHON: $(parted_PYTHON) @$(NORMAL_INSTALL) test -z "$(parteddir)" || $(MKDIR_P) "$(DESTDIR)$(parteddir)" @list='$(parted_PYTHON)'; dlist=; list2=; test -n "$(parteddir)" || list=; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(parteddir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(parteddir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ if test -z "$(DESTDIR)"; then \ PYTHON=$(PYTHON) $(py_compile) --basedir "$(parteddir)" $$dlist; \ else \ PYTHON=$(PYTHON) $(py_compile) --destdir "$(DESTDIR)" --basedir "$(parteddir)" $$dlist; \ fi; \ else :; fi uninstall-partedPYTHON: @$(NORMAL_UNINSTALL) @list='$(parted_PYTHON)'; test -n "$(parteddir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$files" || exit 0; \ filesc=`echo "$$files" | sed 's|$$|c|'`; \ fileso=`echo "$$files" | sed 's|$$|o|'`; \ echo " ( cd '$(DESTDIR)$(parteddir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(parteddir)" && rm -f $$files || exit $$?; \ echo " ( cd '$(DESTDIR)$(parteddir)' && rm -f" $$filesc ")"; \ cd "$(DESTDIR)$(parteddir)" && rm -f $$filesc || exit $$?; \ echo " ( cd '$(DESTDIR)$(parteddir)' && rm -f" $$fileso ")"; \ cd "$(DESTDIR)$(parteddir)" && rm -f $$fileso tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: for dir in "$(DESTDIR)$(parteddir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-partedPYTHON install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-partedPYTHON .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ distclean distclean-generic distclean-libtool distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-partedPYTHON \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ uninstall uninstall-am uninstall-partedPYTHON pychecker: PYTHONPATH=$(top_builddir)/src/.libs:$(top_builddir)/src pychecker $(PYCHECKEROPTS) *.py # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/src/parted/partition.py0000664000076400007640000002314611540272306014604 00000000000000# # partition.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import math import string import warnings import _ped import parted from decorators import localeC # XXX: add docstrings class Partition(object): @localeC def __init__(self, disk=None, type=None, fs=None, geometry=None, PedPartition=None): if PedPartition is None: if disk is None: raise parted.PartitionException, "no disk specified" elif type is None: raise parted.PartitionException, "no type specified" elif geometry is None: raise parted.PartitionException, "no geometry specified" self._fileSystem = fs self._geometry = geometry self._disk = disk if fs is None: self.__partition = _ped.Partition(disk.getPedDisk(), type, geometry.start, geometry.end) else: self.__partition = _ped.Partition(disk.getPedDisk(), type, geometry.start, geometry.end, parted.fileSystemType[fs.type]) else: self.__partition = PedPartition self._geometry = parted.Geometry(PedGeometry=self.__partition.geom) if disk is None: self._disk = parted.Disk(PedDisk=self.__partition.disk) else: self._disk = disk if self.__partition.fs_type is None: self._fileSystem = None else: self._fileSystem = parted.FileSystem(type=self.__partition.fs_type.name, geometry=self._geometry) def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.path != other.path or self.type != other.type or self.geometry != other.geometry or self.fileSystem != other.fileSystem def __str__(self): try: name = self.name except parted.PartitionException: name = None s = ("parted.Partition instance --\n" " disk: %(disk)r fileSystem: %(fileSystem)r\n" " number: %(number)s path: %(path)s type: %(type)s\n" " name: %(name)s active: %(active)s busy: %(busy)s\n" " geometry: %(geometry)r PedPartition: %(ped)r" % {"disk": self.disk, "fileSystem": self.fileSystem, "geometry": self.geometry, "number": self.number, "path": self.path, "type": self.type, "name": name, "active": self.active, "busy": self.busy, "ped": self.__partition}) return s def __writeOnly(self, property): raise parted.WriteOnlyProperty, property @property @localeC def active(self): """True if the partition is active, False otherwise.""" return bool(self.__partition.is_active()) @property @localeC def busy(self): """True if the partition is active, False otherwise.""" return bool(self.__partition.is_busy()) @property def disk(self): """The Disk this partition belongs to.""" return self._disk @property @localeC def path(self): """The filesystem path to this partition's device node.""" return self.__partition.get_path() @property @localeC def name(self): """The name of this partition.""" try: return self.__partition.get_name() except parted.PartitionException as msg: return None @property def number(self): """The partition number.""" return self.__partition.num fileSystem = property(lambda s: s._fileSystem, lambda s, v: setattr(s, "_fileSystem", v)) geometry = property(lambda s: s._geometry, lambda s, v: setattr(s, "_geometry", v)) system = property(lambda s: s.__writeOnly("system"), lambda s, v: s.__partition.set_system(v)) type = property(lambda s: s.__partition.type, lambda s, v: setattr(s.__partition, "type", v)) @localeC def getFlag(self, flag): """Get the value of a particular flag on the partition. Valid flags are the _ped.PARTITION_* constants. See _ped.flag_get_name() and _ped.flag_get_by_name() for more help working with partition flags. """ return self.__partition.get_flag(flag) @localeC def setFlag(self, flag): """Set the flag on a partition to the provided value. On error, a PartitionException will be raised. See getFlag() for more help on working with partition flags.""" return self.__partition.set_flag(flag, 1) @localeC def unsetFlag(self, flag): """Unset the flag on this Partition. On error, a PartitionException will be raised. See getFlag() for more help on working with partition flags.""" return self.__partition.set_flag(flag, 0) @localeC def getMaxGeometry(self, constraint): """Given a constraint, return the maximum Geometry that self can be grown to. Raises Partitionexception on error.""" return parted.Geometry(PedGeometry=self.disk.getPedDisk().get_max_partition_geometry(self.__partition, constraint)) @localeC def isFlagAvailable(self, flag): """Return True if flag is available on this Partition, False otherwise.""" return self.__partition.is_flag_available(flag) @localeC def nextPartition(self): """Return the Partition following this one on the Disk.""" partition = self.disk.getPedDisk().next_partition(self.__partition) if partition is None: return None else: return parted.Partition(disk=self.disk, PedPartition=partition) @localeC def getSize(self, unit="MB"): """Return the size of the partition in the unit specified. The unit is given as a string corresponding to one of the following abbreviations: b (bytes), KB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes). An invalid unit string will raise a SyntaxError exception. The default unit is MB.""" warnings.warn("use the getLength method", DeprecationWarning) return self.geometry.getSize(unit) @localeC def getLength(self, unit='sectors'): """Return the length of the partition in sectors. Optionally, a SI or IEC prefix followed by a 'B' may be given in order to convert the length into bytes. The allowed values include B, kB, MB, GB, TB, KiB, MiB, GiB, and TiB.""" return self.geometry.getLength(unit) def getFlagsAsString(self): """Return a comma-separated string representing the flags on this partition.""" flags = [] for flag in partitionFlag.keys(): if self.getFlag(flag): flags.append(partitionFlag[flag]) return string.join(flags, ', ') def getMaxAvailableSize(self, unit="MB"): """Return the maximum size this Partition can grow to by looking at contiguous freespace partitions. The size is returned in the unit specified (default is megabytes). The unit is a string corresponding to one of the following abbreviations: b (bytes), KB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes). An invalid unit string will raise a SyntaxError exception.""" lunit = unit.lower() if lunit not in parted._exponent.keys(): raise SyntaxError, "invalid unit %s given" % (unit,) maxLength = self.geometry.length sectorSize = self.geometry.device.sectorSize for partition in self.disk.partitions: if partition.type & parted.PARTITION_FREESPACE: maxLength += partition.geometry.length else: break return math.floor(maxLength * math.pow(sectorSize, parted._exponent[lunit])) def getDeviceNodeName(self): """Return the device name for this Partition.""" return self.path[5:] def getPedPartition(self): """Return the _ped.Partition object contained in this Partition. For internal module use only.""" return self.__partition # collect all partition flags and store them in a hash partitionFlag = {} __flag = _ped.partition_flag_next(0) partitionFlag[__flag] = _ped.partition_flag_get_name(__flag) __readFlags = True while __readFlags: __flag = _ped.partition_flag_next(__flag) if not __flag: __readFlags = False else: partitionFlag[__flag] = _ped.partition_flag_get_name(__flag) pyparted-3.6/src/parted/alignment.py0000664000076400007640000001223311223300062014531 00000000000000# # geometry.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import parted import _ped from decorators import localeC class Alignment(object): """Alignment() An Alignment object describes constraints on how sectors and Geometry objects are aligned. Being aligned means that the sector be located at a specific sector multiple on a device, or that a geometry must start and end at sectors at those specific multiples. Most methods on this object raise ArithmeticError if calculating alignments fails.""" @localeC def __init__(self, *args, **kwargs): """Create a new Alignment object from the sectors offset and grainSize.""" if kwargs.has_key("PedAlignment"): self.__alignment = kwargs.get("PedAlignment") elif kwargs.has_key("offset") and kwargs.has_key("grainSize"): self.__alignment = _ped.Alignment(kwargs.get("offset"), kwargs.get("grainSize")) else: raise parted.AlignmentException, "no offset+grainSize or PedAlignment specified" offset = property(lambda s: s.__alignment.offset, lambda s, v: setattr(s.__alignment, "offset", v)) grainSize = property(lambda s: s.__alignment.grain_size, lambda s, v: setattr(s.__alignment, "grain_size", v)) def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.offset != other.offset or self.grainSize != other.grainSize def __str__(self): s = ("parted.Alignment instance --\n" " offset: %(offset)s grainSize: %(grainSize)s\n" " PedAlignment: %(ped)r" % {"offset": self.offset, "grainSize": self.grainSize, "ped": self.__alignment}) return s @localeC def intersect(self, b): """Create and return a new Alignment that describes the intersection of self and alignment b. A sector will satisfy the new alignment iff it satisfies both of the original alignments. Whether a sector satisfies a given alignment is determined by is_aligned().""" return parted.Alignment(PedAlignment=self.__alignment.intersect(b.getPedAlignment())) @localeC def alignUp(self, geom, sector): """Return the closest sector to the provided sector that lies inside geom and satisfies the alignment constraint self. This method prefers, but does not guarantee, that the result is beyond sector. If no such sector can be found, an ArithmeticError is raised.""" return self.__alignment.align_up(geom.getPedGeometry(), sector) @localeC def alignDown(self, geom, sector): """Return the closest sector to the provided sector that lies inside geom and satisfies the alignment constraint self. This method prefers, but does not guarantee, that the result is below sector. If no such sector can be found, an ArithmeticError is raised.""" return self.__alignment.align_down(geom.getPedGeometry(), sector) @localeC def alignNearest(self, geom, sector): """Return the closest sector to the input sector that lies inside geom and satisfies the alignment constraint self. If no such sector can be found, an ArithmeticError is raised.""" return self.__alignment.align_nearest(geom.getPedGeometry(), sector) @localeC def isAligned(self, geom, sector): """Determine whether sector lies inside geom and satisfies the alignment constraint self.""" if not geom: raise TypeError, "missing parted.Geometry parameter" if sector is None: raise TypeError, "missing sector parameter" return self.__alignment.is_aligned(geom.getPedGeometry(), sector) def getPedAlignment(self): """Return the _ped.Alignment object contained in this Alignment. For internal module use only.""" return self.__alignment pyparted-3.6/src/parted/constraint.py0000664000076400007640000001733411542264060014761 00000000000000# # constraint.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import parted import _ped from decorators import localeC class Constraint(object): """Constraint() A Constraint object describes a set of restrictions on other pyparted operations. Constraints can restrict the location and alignment of the start and end of a partition, and its minimum and maximum size. Most constraint operations can raise CreateException if creating temporary objects fails, or ArithmeticError if an error occurs during calculations.""" @localeC def __init__(self, *args, **kwargs): """Create a new Constraint object. There are many different ways to create a Constraint, all depending on the parameters passed to __init__. If minGeom and maxGeom are supplied, the constraint will be created to satisfy both. If only one of minGeom or maxGeom are supplied, the constraint is only guaranteed to solve the given paramter. If exactGeom is given, the constraint will only be satisfied by the given geometry. If device is given, any region on that device will satisfy the constraint. If none of the previously mentioned parameters are supplied, all of startAlign, EndAlign, startRange, endRange, minSize, and maxSize must be given.""" if kwargs.has_key("PedConstraint"): self.__constraint = kwargs.get("PedConstraint") elif kwargs.has_key("minGeom") and kwargs.has_key("maxGeom"): ming = kwargs.get("minGeom").getPedGeometry() maxg = kwargs.get("maxGeom").getPedGeometry() self.__constraint = _ped.constraint_new_from_min_max(ming, maxg) elif kwargs.has_key("minGeom"): ming = kwargs.get("minGeom").getPedGeometry() self.__constraint = _ped.constraint_new_from_min(ming) elif kwargs.has_key("maxGeom"): maxg = kwargs.get("maxGeom").getPedGeometry() self.__constraint = _ped.constraint_new_from_max(maxg) elif kwargs.has_key("exactGeom"): exact = kwargs.get("exactGeom").getPedGeometry() self.__constraint = _ped.constraint_exact(exact) elif kwargs.has_key("device"): dev = kwargs.get("device").getPedDevice() self.__constraint = _ped.constraint_any(dev) elif kwargs.has_key("startAlign") and kwargs.has_key("endAlign") and \ kwargs.has_key("startRange") and kwargs.has_key("endRange") and \ kwargs.has_key("minSize") and kwargs.has_key("maxSize"): starta = kwargs.get("startAlign").getPedAlignment() enda = kwargs.get("endAlign").getPedAlignment() startr = kwargs.get("startRange").getPedGeometry() endr = kwargs.get("endRange").getPedGeometry() mins = kwargs.get("minSize") maxs = kwargs.get("maxSize") self.__constraint = _ped.Constraint(starta, enda, startr, endr, mins, maxs) else: raise parted.ConstraintException, "missing initialization parameters" def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True c1 = self.getPedConstraint() c2 = other.getPedConstraint() return self.minSize != other.minSize \ or self.maxSize != other.maxSize \ or c1.start_align != c2.start_align \ or c1.end_align != c2.end_align \ or c1.start_range != c2.start_range \ or c1.end_range != c2.end_range startAlign = property( lambda s: parted.Alignment(PedAlignment=s.__constraint.start_align), lambda s, v: setattr(s.__constraint, "start_align", v.getPedAlignment())) endAlign = property( lambda s: parted.Alignment(PedAlignment=s.__constraint.end_align), lambda s, v: setattr(s.__constraint, "end_align", v.getPedAlignment())) startRange = property( lambda s: parted.Geometry(PedGeometry=s.__constraint.start_range), lambda s, v: setattr(s.__constraint, "start_range", v.getPedGeometry())) endRange = property( lambda s: parted.Geometry(PedGeometry=s.__constraint.end_range), lambda s, v: setattr(s.__constraint, "end_range", v.getPedGeometry())) minSize = property( lambda s: s.__constraint.min_size, lambda s, v: setattr(s.__constraint, "min_size", v)) maxSize = property( lambda s: s.__constraint.max_size, lambda s, v: setattr(s.__constraint, "max_size", v)) def __str__(self): s = ("parted.Constraint instance --\n" " startAlign: %(startAlign)r endAlign: %(endAlign)r\n" " startRange: %(startRange)r endRange: %(endRange)r\n" " minSize: %(minSize)s maxSize: %(maxSize)s\n" " PedConstraint: %(ped)r" % {"startAlign": self.startAlign, "endAlign": self.endAlign, "startRange": self.startRange, "endRange": self.endRange, "minSize": self.minSize, "maxSize": self.maxSize, "ped": self.__constraint}) return s @localeC def intersect(self, b): """Return a new constraint that is the intersection of self and the provided constraint b. The returned constraint will therefore be more restrictive than either input as it will have to satisfy both.""" return parted.Constraint(PedConstraint=self.__constraint.intersect(b.getPedConstraint())) @localeC def solveMax(self): """Return a new geometry that is the largest region satisfying self. There may be more than one solution, and there are no guarantees as to which solution will be returned.""" return parted.Geometry(PedGeometry=self.__constraint.solve_max()) @localeC def solveNearest(self, geom): """Return a new geometry that is the nearest region to geom that satisfies self. This function does not guarantee any specific meaning of 'nearest'.""" return parted.Geometry(PedGeometry=self.__constraint.solve_nearest(geom.getPedGeometry())) @localeC def isSolution(self, geom): """Does geom satisfy this constraint?""" return self.__constraint.is_solution(geom.getPedGeometry()) def getPedConstraint(self): """Return the _ped.Constraint object contained in this Constraint. For internal module use only.""" return self.__constraint pyparted-3.6/src/parted/disk.py0000664000076400007640000004221611313012377013522 00000000000000# # disk.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted from cachedlist import CachedList from decorators import localeC class Disk(object): """Disk() A Disk object describes a type of device in the system. Disks can hold partitions. A Disk is a basic operating system-specific object.""" @localeC def __init__(self, device=None, PedDisk=None): """Create a new Disk object from the device and type specified. The device is a Device object and type is a string matching a key in the diskType hash.""" if PedDisk: self.__disk = PedDisk if device is None: self._device = parted.Device(PedDevice=self.__disk.dev) else: self._device = device elif device is None: raise parted.DiskException, "no device specified" else: self.__disk = _ped.Disk(device.getPedDevice()) self._device = device self._partitions = CachedList(lambda : self.__getPartitions()) def _hasSameParts(self, other): import itertools if len(self.partitions) != len(other.partitions): return False partIter = itertools.izip(self.partitions, other.partitions) while True: try: (left, right) = partIter.next() if left != right: return False except StopIteration: return True def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.device != other.device or not self._hasSameParts(other) def __str__(self): s = ("parted.Disk instance --\n" " type: %(type)s primaryPartitionCount: %(primaryCount)s\n" " lastPartitionNumber: %(last)s maxPrimaryPartitionCount: %(max)s\n" " partitions: %(partitions)s\n" " device: %(device)r\n" " PedDisk: %(ped)r" % {"type": self.type, "primaryCount": self.primaryPartitionCount, "last": self.lastPartitionNumber, "max": self.maxPrimaryPartitionCount, "partitions": self.partitions, "device": self.device, "ped": self.__disk}) return s def __getPartitions(self): """Construct a list of partitions on the disk. This is called only as needed from the self.partitions property, which just happens to be a CachedList.""" partitions = [] partition = self.getFirstPartition() while partition: if partition.type & parted.PARTITION_FREESPACE or \ partition.type & parted.PARTITION_METADATA or \ partition.type & parted.PARTITION_PROTECTED: partition = partition.nextPartition() continue partitions.append(partition) partition = partition.nextPartition() return partitions @property @localeC def primaryPartitionCount(self): """The number of primary partitions on this disk.""" return self.__disk.get_primary_partition_count() @property @localeC def lastPartitionNumber(self): """The last assigned partition number currently on this disk.""" return self.__disk.get_last_partition_num() @property @localeC def maxPrimaryPartitionCount(self): """The maximum number of primary partitions allowed on this disk.""" return self.__disk.get_max_primary_partition_count() @property @localeC def maxSupportedPartitionCount(self): """The maximum number of partitions allowed on this disk.""" return self.__disk.get_max_supported_partition_count() @property @localeC def partitionAlignment(self): """Partition start address Alignment.""" alignment = self.__disk.get_partition_alignment() return parted.Alignment(PedAlignment=alignment) @property @localeC def maxPartitionLength(self): """Max Partition Length the disk's label can represent.""" return self.__disk.max_partition_length() @property @localeC def maxPartitionStartSector(self): """Max Partition Start Sector the disk's label can represent.""" return self.__disk.max_partition_start_sector() @localeC def getFlag(self, flag): """Get the value of a particular flag on the disk. Valid flags are the _ped.DISK_* constants. See _ped.disk_flag_get_name() and _ped.disk_flag_get_by_name() for more help working with disk flags. """ return self.__disk.get_flag(flag) @localeC def setFlag(self, flag): """Set the flag on this disk. On error, an Exception will be raised. See getFlag() for more help on working with disk flags.""" return self.__disk.set_flag(flag, 1) @localeC def unsetFlag(self, flag): """Unset the flag on this disk. On error, an Exception will be raised. See getFlag() for more help on working with disk flags.""" return self.__disk.set_flag(flag, 0) @localeC def isFlagAvailable(self, flag): """Return True if flag is available on this Disk, False otherwise.""" return self.__disk.is_flag_available(flag) @property def partitions(self): """The list of partitions currently on this disk.""" return self._partitions @property def device(self): """The underlying Device holding this disk and partitions.""" return self._device type = property(lambda s: s.__disk.type.name, lambda s, v: setattr(s.__disk, "type", parted.diskType[v])) @localeC def duplicate(self): """Make a deep copy of this Disk.""" return Disk(PedDisk=self.__disk.duplicate()) @localeC def destroy(self): """Closes the Disk ensuring all outstanding writes are flushed.""" return self.__disk.destroy() @localeC def commit(self): """Writes in-memory changes to a partition table to disk and informs the operating system of the changes. Equivalent to calling self.commitToDevice() then self.commitToOS().""" self.partitions.invalidate() return self.__disk.commit() @localeC def commitToDevice(self): """Write the changes made to the in-memory description of a partition table to the device.""" self.partitions.invalidate() return self.__disk.commit_to_dev() @localeC def commitToOS(self): """Tell the operating system kernel about the partition table layout of this Disk.""" self.partitions.invalidate() return self.__disk.commit_to_os() @localeC def check(self): """Perform a sanity check on the partition table of this Disk.""" return self.__disk.check() @localeC def supportsFeature(self, feature): """Check that the disk type supports the provided feature.""" return self.__disk.type.check_feature(feature) @localeC def addPartition(self, partition=None, constraint=None): """Add a new Partition to this Disk with the given Constraint.""" if constraint: result = self.__disk.add_partition(partition.getPedPartition(), constraint.getPedConstraint()) elif not partition: raise parted.DiskException, "no partition or constraint specified" else: result = self.__disk.add_partition(partition.getPedPartition()) if result: partition.geometry = parted.Geometry(PedGeometry=partition.getPedPartition().geom) self.partitions.invalidate() return True else: return False @localeC def removePartition(self, partition=None): """Removes specified Partition from this Disk. NOTE: If the Partition is an extended partition, it must not contain any logical partitions. Also note that the partition is not actually destroyed unless you use the deletePartition() method.""" if not partition: raise parted.DiskException, "no partition specified" if self.__disk.remove_partition(partition.getPedPartition()): self.partitions.invalidate() return True else: return False @localeC def deletePartition(self, partition): """Removes specified Partition from this Disk under the same conditions as removePartition(), but also destroy the removed Partition.""" if self.__disk.delete_partition(partition.getPedPartition()): self.partitions.invalidate() return True else: return False @localeC def deleteAllPartitions(self): """Removes and destroys all Partitions in this Disk.""" if self.__disk.delete_all(): self.partitions.invalidate() return True else: return False @localeC def setPartitionGeometry(self, partition=None, constraint=None, start=None, end=None): """Sets the Geometry of the specified Partition using the given Constraint and start and end sectors. Note that this method does not modify the partition contents, just the partition table.""" if not partition or not constraint: raise parted.DiskException, "no partition or constraint specified" if not start or not end: raise parted.DiskException, "no start or end geometry specified" return self.__disk.set_partition_geom(partition.getPedPartition(), constraint.getPedConstraint(), start, end) @localeC def maximizePartition(self, partition=None, constraint=None): """Grow the Partition's Geometry to the maximum possible subject to Constraint.""" if not partition: raise parted.DiskException, "no partition specified" if constraint: return self.__disk.maximize_partition(partition.getPedPartition(), constraint.getPedConstraint()) else: return self.__disk.maximize_partition(partition.getPedPartition()) @localeC def calculateMaxPartitionGeometry(self, partition=None, constraint=None): """Get the maximum Geometry the Partition can be grown to, subject to the given Constraint.""" if not partition: raise parted.DiskException, "no partition specified" if constraint: return parted.Geometry(PedGeometry=self.__disk.get_max_partition_geometry(partition.getPedPartition(), constraint.getPedConstraint())) else: return parted.Geometry(PedGeometry=self.__disk.get_max_partition_geometry(partition.getPedPartition())) @localeC def minimizeExtendedPartition(self): """Reduce the size of the extended partition to a minimum while still wrapping its logical partitions. If there are no logical partitions, remove the extended partition.""" ret = self.__disk.minimize_extended_partition() if ret: self.partitions.invalidate() return ret @localeC def getPartitionBySector(self, sector): """Returns the Partition that contains the sector. If the sector lies within a logical partition, then the logical partition is returned (not the extended partition).""" return parted.Partition(disk=self, PedPartition=self.__disk.get_partition_by_sector(sector)) def getMaxLogicalPartitions(self): """Return the maximum number of logical partitions this Disk will hold. Returns 0 if there is no extended partition on the disk, returns 11 when all else fails.""" if not self.supportsFeature(parted.DISK_TYPE_EXTENDED): return 0 # maximum number of logical partitions per device type maxLogicalPartitionCount = { "hd": 59, "sd": 11, "ataraid/": 11, "rd/": 3, "cciss/": 11, "i2o/": 11, "iseries/vd": 3, "ida/": 11, "sx8/": 11, "xvd": 11, "vd": 11, "mmcblk": 5 } dev = self.device.path[5:] for key in maxLogicalPartitionCount.keys(): if dev.startswith(key): return maxLogicalPartitionCount[key] # XXX: if we don't know about it, should we pretend it can't have # logicals? probably safer to just use something reasonable return 11 @localeC def getExtendedPartition(self): """Return the extended Partition, if any, on this Disk.""" try: return parted.Partition(disk=self, PedPartition=self.__disk.extended_partition()) except: return None def __filterPartitions(self, fn): return filter(fn, self.partitions) def getLogicalPartitions(self): """Return a list of logical Partitions on this Disk.""" return self.__filterPartitions(lambda p: p.active and p.type & parted.PARTITION_LOGICAL) def getPrimaryPartitions(self): """Return a list of primary (or normal) Partitions on this Disk.""" return self.__filterPartitions(lambda p: p.type == parted.PARTITION_NORMAL) def getRaidPartitions(self): """Return a list of RAID (or normal) Partitions on this Disk.""" return self.__filterPartitions(lambda p: p.active and p.getFlag(parted.PARTITION_RAID)) def getLVMPartitions(self): """Return a list of physical volume-type Partitions on this Disk.""" return self.__filterPartitions(lambda p: p.active and p.getFlag(parted.PARTITION_LVM)) @localeC def getFreeSpaceRegions(self): """Return a list of Geometry objects representing the available free space regions on this Disk.""" freespace = [] part = self.__disk.next_partition() while part: if part.type & parted.PARTITION_FREESPACE: freespace.append(parted.Geometry(PedGeometry=part.geom)) part = self.__disk.next_partition(part) return freespace @localeC def getFreeSpacePartitions(self): """Return a list of Partition objects representing the available free space regions on this Disk.""" freespace = [] part = self.__disk.next_partition() while part: if part.type & parted.PARTITION_FREESPACE: freespace.append(parted.Partition(disk=self, PedPartition=part)) part = self.__disk.next_partition(part) return freespace @localeC def getFirstPartition(self): """Return the first Partition object on the disk or None if there is not one.""" return parted.Partition(disk=self, PedPartition=self.__disk.next_partition()) @localeC def getPartitionByPath(self, path): """Return a Partition object associated with the partition device path, such as /dev/sda1. Returns None if no partition is found.""" for partition in self.partitions: if "/dev/%s" % partition.getDeviceNodeName() == path: return partition return None def getPedDisk(self): """Return the _ped.Disk object contained in this Disk. For internal module use only.""" return self.__disk # collect all disk types and store them in a hash diskType = {} __type = _ped.disk_type_get_next() diskType[__type.name] = __type while True: try: __type = _ped.disk_type_get_next(__type) diskType[__type.name] = __type except: break # collect all disk flags and store them in a hash diskFlag = {} __flag = _ped.disk_flag_next(0) diskFlag[__flag] = _ped.disk_flag_get_name(__flag) __readFlags = True while __readFlags: __flag = _ped.disk_flag_next(__flag) if not __flag: __readFlags = False else: diskFlag[__flag] = _ped.disk_flag_get_name(__flag) pyparted-3.6/src/parted/Makefile.am0000664000076400007640000000244711152036723014256 00000000000000# # Makefile.am for pyparted src subdirectory # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # parteddir = $(pyexecdir)/parted parted_PYTHON = *.py MAINTAINERCLEANFILES = Makefile.in *.pyc pychecker: PYTHONPATH=$(top_builddir)/src/.libs:$(top_builddir)/src pychecker $(PYCHECKEROPTS) *.py pyparted-3.6/src/parted/__init__.py0000664000076400007640000003377111542257653014351 00000000000000# # __init__.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2007, 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # Chris Lumens # from __future__ import division import platform import re import sys import warnings __all__ = ['alignment', 'constraint', 'device', 'disk', 'filesystem', 'geometry', 'partition'] from _ped import AlignmentException from _ped import CreateException from _ped import ConstraintException from _ped import DeviceException from _ped import DiskException from _ped import DiskLabelException from _ped import FileSystemException from _ped import GeometryException from _ped import IOException from _ped import NotNeededException from _ped import PartedException from _ped import PartitionException from _ped import TimerException from _ped import UnknownDeviceException from _ped import UnknownTypeException from alignment import Alignment from constraint import Constraint from device import Device from disk import Disk from disk import diskType from disk import diskFlag from filesystem import FileSystem from filesystem import fileSystemType from geometry import Geometry from partition import Partition from partition import partitionFlag # the enumerated types in _ped need to be available from here too from _ped import UNIT_SECTOR from _ped import UNIT_BYTE from _ped import UNIT_KILOBYTE from _ped import UNIT_MEGABYTE from _ped import UNIT_GIGABYTE from _ped import UNIT_TERABYTE from _ped import UNIT_COMPACT from _ped import UNIT_CYLINDER from _ped import UNIT_CHS from _ped import UNIT_PERCENT from _ped import UNIT_KIBIBYTE from _ped import UNIT_MEBIBYTE from _ped import UNIT_GIBIBYTE from _ped import UNIT_TEBIBYTE from _ped import DEVICE_UNKNOWN from _ped import DEVICE_SCSI from _ped import DEVICE_IDE from _ped import DEVICE_DAC960 from _ped import DEVICE_CPQARRAY from _ped import DEVICE_FILE from _ped import DEVICE_ATARAID from _ped import DEVICE_I2O from _ped import DEVICE_UBD from _ped import DEVICE_DASD from _ped import DEVICE_VIODASD from _ped import DEVICE_SX8 from _ped import DEVICE_DM from _ped import DEVICE_XVD from _ped import DEVICE_SDMMC from _ped import DEVICE_VIRTBLK from _ped import PARTITION_NORMAL from _ped import PARTITION_LOGICAL from _ped import PARTITION_EXTENDED from _ped import PARTITION_FREESPACE from _ped import PARTITION_METADATA from _ped import PARTITION_PROTECTED from _ped import PARTITION_BOOT from _ped import PARTITION_ROOT from _ped import PARTITION_SWAP from _ped import PARTITION_HIDDEN from _ped import PARTITION_RAID from _ped import PARTITION_LVM from _ped import PARTITION_LBA from _ped import PARTITION_HPSERVICE from _ped import PARTITION_PALO from _ped import PARTITION_PREP from _ped import PARTITION_MSFT_RESERVED from _ped import PARTITION_APPLE_TV_RECOVERY from _ped import PARTITION_BIOS_GRUB from _ped import PARTITION_DIAG try: from _ped import PARTITION_LEGACY_BOOT except ImportError: pass from _ped import DISK_CYLINDER_ALIGNMENT from _ped import DISK_TYPE_EXTENDED from _ped import DISK_TYPE_PARTITION_NAME from decorators import localeC partitionTypesDict = { 0x00: "Empty", 0x01: "DOS 12-bit FAT", 0x02: "XENIX root", 0x03: "XENIX usr", 0x04: "DOS 16-bit <32M", 0x05: "Extended", 0x06: "DOS 16-bit >=32M", 0x07: "NTFS/HPFS", 0x08: "AIX", 0x09: "AIX bootable", 0x0a: "OS/2 Boot Manager", 0x0b: "Win95 FAT32", 0x0c: "Win95 FAT32", 0x0e: "Win95 FAT16", 0x0f: "Win95 Ext'd", 0x10: "OPUS", 0x11: "Hidden FAT12", 0x12: "Compaq Setup", 0x14: "Hidden FAT16 <32M", 0x16: "Hidden FAT16", 0x17: "Hidden HPFS/NTFS", 0x18: "AST SmartSleep", 0x1b: "Hidden Win95 FAT32", 0x1c: "Hidden Win95 FAT32 (LBA)", 0x1e: "Hidden Win95 FAT16 (LBA)", 0x24: "NEC_DOS", 0x39: "Plan 9", 0x40: "Venix 80286", 0x41: "PPC_PReP Boot", 0x42: "SFS", 0x4d: "QNX4.x", 0x4e: "QNX4.x 2nd part", 0x4f: "QNX4.x 2nd part", 0x51: "Novell?", 0x52: "Microport", 0x63: "GNU HURD", 0x64: "Novell Netware 286", 0x65: "Novell Netware 386", 0x75: "PC/IX", 0x80: "Old MINIX", 0x81: "Linux/MINIX", 0x82: "Linux swap", 0x83: "Linux native", 0x84: "OS/2 hidden C:", 0x85: "Linux Extended", 0x86: "NTFS volume set", 0x87: "NTFS volume set", 0x8e: "Linux LVM", 0x93: "Amoeba", 0x94: "Amoeba BBT", 0x9f: "BSD/OS", 0xa0: "IBM Thinkpad hibernation", 0xa5: "BSD/386", 0xa6: "OpenBSD", 0xb7: "BSDI fs", 0xb8: "BSDI swap", 0xbf: "Solaris", 0xc7: "Syrinx", 0xdb: "CP/M", 0xde: "Dell Utility", 0xe1: "DOS access", 0xe3: "DOS R/O", 0xeb: "BEOS", 0xee: "EFI GPT", 0xef: "EFI (FAT-12/16/32)", 0xf2: "DOS secondary", 0xfd: "Linux RAID", 0xff: "BBT" } # Exponents for 1024 used when converting sizes to byte-sized # units for display. The keys are: # b bytes 1024^0 = 1 # kb kilobytes 1024^1 = 1024 # mb megabytes 1024^2 = 1048576 # gb gigabytes 1024^3 = 1073741824 # tb terabytes 1024^4 = 1099511627776 # pb petabytes 1024^5 = 1125899906842624 # eb exabytes 1024^6 = 1152921504606846976 # zb zettabytes 1024^7 = 1180591620717411303424 # yb yottabytes 1024^8 = 1208925819614629174706176 # The resulting value for 1024 raised to the power is used as # the divisor for conversion functions. _exponent = {'b': 0, 'kb': 1, 'mb': 2, 'gb': 3, 'tb': 4, 'pb': 5, 'eb': 6, 'zb': 7, 'yb': 8} # Refercences: # # 1. NIST Special Publication 330, 2008 Edition, Barry N. Taylor and Ambler # Thompson, Editors # The International System of Units (SI) # Available from: http://physics.nist.gov/cuu/pdf/sp811.pdf # # 2. International standard IEC 60027-2, third edition, # Letter symbols to be used in electrical technology -- # Part 2: Telecommunications and electronics. # # See the links below for quick online summaries: # # SI units: http://physics.nist.gov/cuu/Units/prefixes.html # IEC units: http://physics.nist.gov/cuu/Units/binary.html __exponents = { "B": 1, # byte "kB": 1000**1, # kilobyte "MB": 1000**2, # megabyte "GB": 1000**3, # gigabyte "TB": 1000**4, # terabyte "PB": 1000**5, # petabyte "EB": 1000**6, # exabyte "ZB": 1000**7, # zettabyte "YB": 1000**8, # yottabyte "KiB": 1024**1, # kibibyte "MiB": 1024**2, # mebibyte "GiB": 1024**3, # gibibyte "TiB": 1024**4, # tebibyte "PiB": 1024**5, # pebibyte "EiB": 1024**6, # exbibyte "ZiB": 1024**7, # zebibyte "YiB": 1024**8 # yobibyte } def formatBytes(bytes_, unit): """Convert bytes_ using an SI or IEC prefix. Note that unit is a case sensitive string that must exactly match one of the IEC or SI prefixes followed by 'B' (e.g. 'GB').""" if unit not in __exponents.keys(): raise SyntaxError("{:} is not a valid SI or IEC byte unit".format(unit)) else: return (bytes_ / __exponents[unit]) def sizeToSectors(bytes_, unit, sector_size): """Convert bytes_ of unit to a number of sectors. Note that unit is a case sensitive string that must exactly match one of the IEC or SI prefixes followed by 'B' (e.g. 'GB').""" if unit not in __exponents.keys(): raise SyntaxError("{:} is not a valid SI or IEC byte unit".format(unit)) else: return bytes_ * __exponents[unit] // sector_size # Valid disk labels per architecture type. The list of label # names map to keys in the parted.diskType hash table. archLabels = {'i386': ['msdos', 'gpt'], 's390': ['dasd', 'msdos'], 'alpha': ['bsd', 'msdos'], 'sparc': ['sun'], 'ia64': ['msdos', 'gpt'], 'ppc': ['msdos', 'mac', 'amiga', 'gpt'], 'x86_64': ['msdos', 'gpt']} # Adapted from: # http://stackoverflow.com/questions/922550/how-to-mark-a-global-as-deprecated-in-python # # Remember that DeprecationWarnings are ignored by default as they are not really # useful to users. Developers can turn on DeprecationWarning notices by passing # the -Wd option to python or by setting PYTHONWARNINGS=d in the environment. def Deprecated(mod, deprecated={}): """ Return a wrapped object that warns about deprecated accesses. """ class Wrapper(object): warnmsg = "%s is deprecated and will be removed in a future release." def __getattr__(self, attr): if attr in deprecated.keys(): msg = self.warnmsg + " " + deprecated[attr] warnings.warn(msg % attr, DeprecationWarning) return getattr(mod, attr) def __setattr__(self, attr, value): if attr in deprecated.keys(): msg = self.warnmsg + " " + deprecated[attr] warnings.warn(msg % attr, DeprecationWarning) return setattr(mod, attr, value) return Wrapper() # Valid disk labels and their applicable architectures. The label names map # to keys in the parted.diskType hash table. __archLabels = (('amiga', 'ppc(64)?$'), ('bsd', 'alpha$'), ('dasd', 's390x?$'), ('gpt', 'i[3-6]86$|x86_64$|ia64$|ppc(64)?$'), ('mac', 'ppc(64)?$'), ('msdos', 'i[3-6]86$|x86_64$|s390x?$|alpha$|ia64$|ppc(64)?$'), ('sun', 'sparc(64)?$')) def getLabels(arch=None): """Return a set containing the disk labels compatible with the architecture of the computer calling this function. If an architecture is passed, return the labels compatible with that architecture.""" labels = set() if arch is None: arch = platform.machine() for label, regex in __archLabels: if re.match(regex, arch): labels.add(label) return labels class ReadOnlyProperty(Exception): """Exception raised when a write operation occurs on a read-only property.""" def __init__(self, property=''): self.message = "%s is a read-only property" % (property,) class WriteOnlyProperty(Exception): """Exception raised when a read operation occurs on a write-only property.""" def __init__(self, property=''): self.message = "%s is a write-only property" % (property,) @localeC def getDevice(path): """Given the operating system level path to a device node, return a Device object for that disk. Raises DeviceException if an invalid path is given.""" return Device(path=path) @localeC def getAllDevices(): """Return a list of Device objects for all devices in the system.""" from _ped import device_probe_all from _ped import device_get_next lst = [] device = None device_probe_all() while True: try: if not device: device = device_get_next() else: device = device_get_next(device) lst.append(Device(PedDevice=device)) except IndexError: return lst @localeC def freeAllDevices(): """Free all Device objects. There is no reason to call this function.""" from _ped import device_free_all return device_free_all() @localeC def probeForSpecificFileSystem(fstype, geometry): """Call the _ped.file_system_probe_specific() function given the filesystem type and geometry. fstype must be a string representing a valid _ped.FileSystemType, geometry is a parted.Geometry.""" from _ped import file_system_probe_specific geom = file_system_probe_specific(fileSystemType[fstype], geometry.getPedGeometry()) return geometry.Geometry(PedGeometry=geom) @localeC def probeFileSystem(geometry): """Return the name of the filesystem detected on the given Geometry. Returns None is no filesystem found.""" from _ped import file_system_probe fstype = file_system_probe(geometry.getPedGeometry()) return fstype.name @localeC def freshDisk(device, ty): """Return a Disk object for this Device and using this DiskType. The type should be a member of the parted.diskType hash, either a key or a value. The new label is not written to disk until commitToDevice() is called on the Disk.""" from _ped import disk_new_fresh, DiskType if type(ty) == str: ty = diskType[ty] elif not isinstance(ty, DiskType): raise SyntaxError, "type must be a key or value in parted.diskType" peddisk = disk_new_fresh(device.getPedDevice(), ty) return Disk(PedDisk=peddisk) @localeC def version(): """Return a dict containing the pyparted and libparted versions.""" from _ped import libparted_version from _ped import pyparted_version ver = {} ver['libparted'] = libparted_version() ver['pyparted'] = pyparted_version() return ver # Mark deprecated items _deprecated = {"partitionTypesDict": "DOS disk label types are not provided " "by libparted, so the codes are not " "useful.", "_exponent": "Use __exponents instead.", "archLabels": "Use getLabels() instead.", } sys.modules[__name__] = Deprecated(sys.modules[__name__], _deprecated) pyparted-3.6/src/parted/geometry.py0000664000076400007640000002032411540272306014421 00000000000000# # geometry.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # David Cantrell # import math import warnings import _ped import parted from decorators import localeC class Geometry(object): """Geometry() Geometry represents a region on a device in the system - a disk or partition. It is expressed in terms of a starting sector and a length. Many methods (read and write methods in particular) throughout pyparted take in a Geometry object as an argument.""" @localeC def __init__(self, device=None, start=None, length=None, end=None, PedGeometry=None): """Create a new Geometry object for the given _ped.Device that extends for length sectors from the start sector. Optionally, an end sector can also be provided.""" if PedGeometry: self.__geometry = PedGeometry if device is None: self._device = parted.Device(PedDevice=self.__geometry.dev) else: self._device = device elif not end: self._device = device self.__geometry = _ped.Geometry(self.device.getPedDevice(), start, length) elif not length and (end > start): self._device = device self.__geometry = _ped.Geometry(self.device.getPedDevice(), start, (end - start + 1), end=end) elif start and length and end and (end > start): self._device = device self.__geometry = _ped.Geometry(self.device.getPedDevice(), start, length, end=end) else: raise parted.GeometryException, "must specify PedGeometry or (device, start, length) or (device, start, end) or (device, start, length, end)" def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.device != other.device or self.start != other.start or self.length != other.length def __str__(self): s = ("parted.Geometry instance --\n" " start: %(start)s end: %(end)s length: %(length)s\n" " device: %(device)r PedGeometry: %(ped)r" % {"start": self.start, "end": self.end, "length": self.length, "device": self.device, "ped": self.__geometry}) return s @property def device(self): """The Device this geometry describes.""" return self._device start = property(lambda s: s.__geometry.start, lambda s, v: s.__geometry.set_start(v)) end = property(lambda s: s.__geometry.end, lambda s, v: s.__geometry.set_end(v)) length = property(lambda s: s.__geometry.length, lambda s, v: s.__geometry.set(s.__geometry.start, v)) @localeC def check(self, offset, granularity, count, timer=None): """Check the region described by self for errors on the disk. offset -- The beginning of the region to check, in sectors from the start of the geometry. granularity -- How sectors should be grouped together count -- How many sectors from the region to check.""" if not timer: return self.__geometry.check(offset, granularity, count) else: return self.__geometry.check(offset, granularity, count, timer) @localeC def contains(self, b): """Return whether Geometry b is contained entirely within self and on the same physical device.""" return self.__geometry.test_inside(b.getPedGeometry()) @localeC def containsSector(self, sector): """Return whether the sectory is contained entirely within self.""" return self.__geometry.test_sector_inside(sector) @localeC def getSize(self, unit="MB"): """Return the size of the geometry in the unit specified. The unit is given as a string corresponding to one of the following abbreviations: b (bytes), KB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes). An invalid unit string will raise a SyntaxError exception. The default unit is MB.""" warnings.warn("use the getLength method", DeprecationWarning) lunit = unit.lower() size = self.length * self.device.sectorSize if lunit not in parted._exponent.keys(): raise SyntaxError, "invalid unit %s given" % (unit,) return (size / math.pow(1024.0, parted._exponent[lunit])) @localeC def getLength(self, unit='sectors'): """Return the length of the geometry in sectors. Optionally, a SI or IEC prefix followed by a 'B' may be given in order to convert the length into bytes. The allowed values include B, kB, MB, GB, TB, KiB, MiB, GiB, and TiB.""" sectors = self.length if unit == "sectors": return sectors return parted.formatBytes(sectors * self.device.sectorSize, unit) @localeC def intersect(self, b): """Return a new Geometry describing the region common to both self and Geometry b. Raises ArithmeticError if the regions do not intersect.""" return Geometry(PedGeometry=self.__geometry.intersect(b.getPedGeometry())) @localeC def map(self, src, sector): """Given a Geometry src that overlaps with self and a sector inside src, this method translates the address of the sector into an address inside self. If self does not contain sector, ArithmeticError will be raised.""" return parted.Geometry(PedGeometry=self.__geometry.map(src.getPedGeometry(), sector)) @localeC def overlapsWith(self, b): """Return whether self and b are on the same device and share at least some of the same region.""" try: self.__geometry.intersect(b.getPedGeometry()) return True except ArithmeticError: return False @localeC def read(self, offset, count): """Read data from the region described by self. offset -- The number of sectors from the beginning of the region (not the beginning of the disk) to read. count -- The number of sectors to read.""" return self.__geometry.read(offset, count) @localeC def sync(self, fast=False): """Flushes all caches on the device described by self. If fast is True, the flush will be quicked by cache coherency is not guaranteed.""" if fast: return self.__geometry.sync_fast() else: return self.__geometry.sync() @localeC def write(self, buf, offset, count): """Write data into the region described by self. buf -- The data to be written. offset -- Where to start writing to region, expressed as the number of sectors from the start of the region (not the disk). count -- How many sectors of buf to write out.""" return self.__geometry.write(buf, offset, count) def getPedGeometry(self): """Return the _ped.Geometry object contained in this Geometry. For internal module use only.""" return self.__geometry pyparted-3.6/src/parted/cachedlist.py0000664000076400007640000000676011515125776014713 00000000000000# # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Chris Lumens # from collections import Sequence class CachedList(Sequence): """CachedList() Provides an immutable list that is constructed from a function that could take a while to run. This is basically the same concept as memoization, except that the function does not take any parameters and therefore there is nothing to use as a memo. The constructor function is provided to __init__, must not take any parameters, and must return a list. The invalidate() method indicates that the list is no longer valid and should be reconstucted by calling the function again. It is up to client code to call invalidate. The rest of the procedure is handled by this class. In all ways, this should appear to be just like a list.""" def __init__(self, lstFn): """Construct a new CachedList. The lstFn is a function that takes no parameters and returns a list. It will be called lazily - the list is not constructed until the first access, which could be quite a while after this method is called.""" Sequence.__init__(self) self._invalid = True self._lst = [] self._lstFn = lstFn def __rebuildList(self): if self._invalid: self._lst = self._lstFn() self._invalid = False def __contains__(self, value): self.__rebuildList() return self._lst.__contains__(value) def __getitem__(self, index): self.__rebuildList() return self._lst.__getitem__(index) def __iter__(self): self.__rebuildList() return self._lst.__iter__() def __len__(self): self.__rebuildList() return len(self._lst) def __reversed__(self): self.__rebuildList() return self._lst.__reversed__() def __repr__(self): self.__rebuildList() return repr(self._lst) def __str__(self): self.__rebuildList() return str(self._lst) def count(self, value): self.__rebuildList() return self._lst.count(value) def index(self, value): self.__rebuildList() return self._lst.index(value) def invalidate(self): """Indicate that the list is no longer valid, due to some external changes. The next access to the list will result in the provided list construction function being called to build a new list.""" self._invalid = True pyparted-3.6/src/parted/filesystem.py0000664000076400007640000001120311223300062014733 00000000000000# # filesystem.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import _ped import parted from decorators import localeC # XXX: add docstrings! class FileSystem(object): @localeC def __init__(self, type=None, geometry=None, checked=False, PedFileSystem=None): if checked: c = 1 else: c = 0 if PedFileSystem is None: if type is None: raise parted.FileSystemException, "no type specified" elif geometry is None: raise parted.FileSystemException, "no geometry specified" self._type = type self._geometry = geometry self._checked = checked self.__fileSystem = _ped.FileSystem(type=fileSystemType[type], geom=geometry.getPedGeometry(), checked=c) else: self.__fileSystem = PedFileSystem self._type = self.__fileSystem.type.name self._geometry = parted.Geometry(PedGeometry=self.__fileSystem.geom) if self.__fileSystem.checked: self._checked = True else: self._checked = False def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.type != other.type or self.geometry != other.geometry def __str__(self): s = ("parted.FileSystem instance --\n" " type: %(type)s geometry: %(geometry)r checked: %(checked)s\n" " PedFileSystem: %(ped)r" % {"type": self.type, "geometry": self.geometry, "checked": self.checked, "ped": self.__fileSystem}) return s @property def type(self): """The type of this filesystem, e.g. ext3.""" return self._type @property def geometry(self): """The Geometry object describing this filesystem.""" return self._geometry @property def checked(self): """True if this filesystem has been checked, False otherwise.""" return bool(self._checked) @localeC def clobber(self): return self.__fileSystem.clobber() @localeC def open(self): return parted.FileSystem(PedFileSystem=self.__fileSystem.open()) # XXX: this can take in a Timer @localeC def create(self): return parted.FileSystem(PedFileSystem=self.__fileSystem.create()) @localeC def close(self): return self.__fileSystem.close() # XXX: this can take in a Timer @localeC def check(self): return self.__fileSystem.check() self._checked = self.__fileSystem.checked # XXX: this can take in a Timer @localeC def copy(self, geometry): return parted.FileSystem(PedFileSystem=self.__fileSystem.copy(geometry.getPedGeometry())) # XXX: this can take in a Timer @localeC def resize(self, geometry): return self.__fileSystem.resize(geometry.getPedGeometry()) @localeC def getResizeConstraint(self): return parted.Constraint(PedConstraint=self.__fileSystem.get_resize_constraint()) def getPedFileSystem(self): """Return the _ped.FileSystem object contained in this FileSystem. For internal module use only.""" return self.__fileSystem # collect all filesystem types and store them in a hash fileSystemType = {} __type = _ped.file_system_type_get_next() fileSystemType[__type.name] = __type while True: try: __type = _ped.file_system_type_get_next(__type) fileSystemType[__type.name] = __type except: break pyparted-3.6/src/parted/device.py0000664000076400007640000003054211540272306014030 00000000000000# # device.py # Python bindings for libparted (built on top of the _ped Python module). # # Copyright (C) 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # import math import warnings import _ped import parted from disk import diskType from decorators import localeC class Device(object): """Device() Device represents a phyiscal piece of hardware in the system, e.g. a disk. A Device should be considered a low-level and operating system specific interface to the hardware. A number of read-only properties about the Device are available. For information on the individual methods, see help(Device.METHODNAME)""" @localeC def __init__(self, path=None, PedDevice=None): """Create a new Device object based on the specified path or the already existing _ped.Device object. You must provide either a path (e.g., "/dev/sda") or an existing _ped.Device object, but not both.""" if PedDevice: self.__device = PedDevice elif path is not None: self.__device = _ped.device_get(path) else: raise parted.DeviceException, "no path or PedDevice specified" def __eq__(self, other): return not self.__ne__(other) def __ne__(self, other): if hash(self) == hash(other): return False if type(self) != type(other): return True return self.model != other.model or self.path != other.path or self.type != other.type or self.length != other.length def __getCHS(self, geometry): return (geometry.cylinders, geometry.heads, geometry.sectors) @property def model(self): """Model name and vendor of this device.""" return self.__device.model @property def path(self): """Filesystem node path of this device (e.g., /dev/sda).""" return self.__device.path @property def type(self): """Type of this device. An integer constant corresponding to one of the parted.DEVICE_* values. """ return self.__device.type @property def sectorSize(self): """Sector size (in bytes) for this device.""" return self.__device.sector_size @property def physicalSectorSize(self): """Physical sector size (in bytes) for this device. Not always the same as sectorSize, but is a multiple of sectorSize. """ return self.__device.phys_sector_size @property def length(self): """The size of this device in sectors.""" return self.__device.length @property def openCount(self): """How many times the open() method has been called on this device.""" return self.__device.open_count @property def readOnly(self): """True if the device is currently in read-only mode, False otherwise. """ return bool(self.__device.read_only) @property def externalMode(self): """True if external access mode is currently activated on this device, False otherwise. External access mode has to be used if you want to use an external command on the device while you are currently using it in pyparted. """ return bool(self.__device.external_mode) @property def dirty(self): """True if the device is marked dirty, False otherwise.""" return bool(self.__device.dirty) @property def bootDirty(self): """True if the device is marked boot dirty, False otherwise.""" return bool(self.__device.boot_dirty) @property def host(self): """The host value of this device.""" return self.__device.host @property def did(self): """The did value of this device.""" return self.__device.did @property def busy(self): """True if this device is busy, False otherwise.""" return self.__device.is_busy() @property def hardwareGeometry(self): """A 3-tuple representing the hardware geometry of this device. The tuple is in order of cylinders, heads, and sectors. """ return self.__getCHS(self.__device.hw_geom) @property def biosGeometry(self): """A 3-tuple representing the BIOS geometry of this device. The tuple is in order of cylinders, heads, and sectors. """ return self.__getCHS(self.__device.bios_geom) def __str__(self): s = ("parted.Device instance --\n" " model: %(model)s path: %(path)s type: %(type)s\n" " sectorSize: %(sectorSize)s physicalSectorSize: %(physSectorSize)s\n" " length: %(length)s openCount: %(openCount)s readOnly: %(readOnly)s\n" " externalMode: %(external)s dirty: %(dirty)s bootDirty: %(bootDirty)s\n" " host: %(host)s did: %(did)s busy: %(busy)s\n" " hardwareGeometry: %(hardwareGeom)s biosGeometry: %(biosGeom)s\n" " PedDevice: %(ped)r" % {"model": self.model, "path": self.path, "type": self.type, "sectorSize": self.sectorSize, "physSectorSize": self.physicalSectorSize, "length": self.length, "openCount": self.openCount, "readOnly": self.readOnly, "external": self.externalMode, "dirty": self.dirty, "bootDirty": self.bootDirty, "host": self.host, "did": self.did, "busy": self.busy, "hardwareGeom": self.hardwareGeometry, "biosGeom": self.biosGeometry, "ped": self.__device}) return s @localeC def clobber(self): """Remove all identifying signatures of the partition table.""" return self.__device.clobber() @localeC def open(self): """Open this Device for read operations.""" return self.__device.open() @localeC def close(self): """Close this Device. Used after open() method calls.""" return self.__device.close() @localeC def destroy(self): """Destroy this Device. Operating system specific.""" return self.__device.destroy() @localeC def removeFromCache(self): """Remove this Device from the internal libparted device cache.""" return self.__device.cache_remove() @localeC def beginExternalAccess(self): """Set up the Device for use by an external program. Call this method before running an external program that uses the Device.""" return self.__device.begin_external_access() @localeC def endExternalAccess(self): """Turn off external access mode for the Device. Call this method once your external program has finished using the Device.""" return self.__device.end_external_access() @localeC def read(self, start, count): """From the sector indentified by start, read and return count sectors from the Device.""" return self.__device.read(start, count) @localeC def write(self, buffer, start, count): """From the sector identified by start, write count sectors from buffer to the Device.""" return self.__device.write(buffer, start, count) @localeC def sync(self, fast=False): """Perform a operating-system specific sync(2) operation on the Device. If fast is True, try to perform a fast sync(2).""" if fast: return self.__device.sync_fast() else: return self.__device.sync() @localeC def check(self, start, count): """From the sector identified by start, perform an operating system specific check on count sectors.""" return self.__device.check(start, count) @localeC def startSectorToCylinder(self, sector): """Return the closest cylinder (round down) to sector on this Device.""" (cylinders, heads, sectors) = self.biosGeometry return long(math.floor((float(sector) / (heads * sectors)) + 1)) @localeC def endSectorToCylinder(self, sector): """Return the closest cylinder (round up) to sector on this Device.""" (cylinders, heads, sectors) = self.biosGeometry return long(math.ceil(float((sector + 1)) / (heads * sectors))) @localeC def startCylinderToSector(self, cylinder): """Return the sector corresponding to cylinder as a starting cylinder on this Device.""" (cylinders, heads, sectors) = self.biosGeometry return long((cylinder - 1) * (heads * sectors)) @localeC def endCylinderToSector(self, cylinder): """Return the sector corresponding to cylinder as an ending cylinder on this Device.""" (cylinders, heads, sectors) = self.biosGeometry return long(((cylinder) * (heads * sectors)) - 1) def getSize(self, unit="MB"): """Return the size of the Device in the unit specified. The unit is given as a string corresponding to one of the following abbreviations: b (bytes), KB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes). An invalid unit string will raise a SyntaxError exception. The default unit is MB.""" warnings.warn("use the getLength method", DeprecationWarning) lunit = unit.lower() if lunit not in parted._exponent.keys(): raise SyntaxError, "invalid unit %s given" % (unit,) size = float(self.__device.length) size /= math.pow(1024.0, parted._exponent[lunit]) size *= self.sectorSize return size @localeC def getLength(self, unit='sectors'): """Return the length of the device in sectors. Optionally, a SI or IEC prefix followed by a 'B' may be given in order to convert the length into bytes. The allowed values include B, kB, MB, GB, TB, KiB, MiB, GiB, and TiB.""" sectors = self.__device.length if unit == "sectors": return sectors return parted.formatBytes(sectors * self.sectorSize, unit) @localeC def getConstraint(self): """Return a Constraint defining the limitations imposed by this Device.""" return parted.Constraint(PedConstraint=self.__device.get_constraint()) @property @localeC def minimalAlignedConstraint(self): """Return a Constraint defining the limitations and minimal advisable alignment imposed by this Device.""" constraint = self.__device.get_minimal_aligned_constraint() return parted.Constraint(PedConstraint=constraint) @property @localeC def optimalAlignedConstraint(self): """Return a Constraint defining the limitations and optimal alignment imposed by this Device.""" constraint = self.__device.get_optimal_aligned_constraint() return parted.Constraint(PedConstraint=constraint) @property @localeC def minimumAlignment(self): """Return an Alignment defining the minimum alignment for this Device.""" alignment = self.__device.get_minimum_alignment() return parted.Alignment(PedAlignment=alignment) @property @localeC def optimumAlignment(self): """Return an Alignment defining the optimum alignment for this Device.""" alignment = self.__device.get_optimum_alignment() return parted.Alignment(PedAlignment=alignment) def getPedDevice(self): """Return the _ped.Device object contained in this Device. For internal module use only.""" return self.__device pyparted-3.6/src/convert.c0000664000076400007640000004670111341564432012573 00000000000000/* * convert.c * Convert _ped Python types to libparted typedefs. Only typedef structs * need to be converted. When a typedef in libparted is a primitive type, * we can just use it directly. * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pyconstraint.h" #include "pydevice.h" #include "pygeom.h" #include "pynatmath.h" #include "pytimer.h" #include "pyunit.h" /* * These functions convert between pyparted Python types and libparted types. * Some are structures, some are simply C primitives. Important notes: * * 1) When using a _ped_X2Y() function, you are converting a pyparted Python * type to a libparted type. If the function returns a pointer, you need * to free it when you are done using it. * 2) When using a PedX2_ped_Y() function, you are converting a libparted * type to a pyparted Python type. You will get a pointer to a PyObject * back, but don't free this variable. Python will handle clean up of * these variables through reference counts. * 3) Some functions return C primitives, so no memory management needs to * be done. * * Error handling notes: * * 1) When converting from a PyObject, first check for NULL and raise a * PyExc_Type error. * 2) When converting from a Ped*, first check for NULL and raise a * PyExc_Type error. * 3) Check the return value of all calls to other convert.c functions and * return NULL on error, but do not raise an exception. * 4) Check the return value of PyObject_new, malloc, strdup, and any other * functions that allocate memory and call PyErr_NoMemory on error. * 5) When calling a libparted function, check for error conditions and * raise the appropriate exceptions. Create new exceptions if needed. * 6) At the end of a conversion function, make sure the return value is * not NULL. Raise the appropriate exception if it is. */ /* _ped_Alignment -> PedAlignment functions */ PedAlignment *_ped_Alignment2PedAlignment(PyObject *s) { PedAlignment *ret = NULL; _ped_Alignment *alignment = (_ped_Alignment *) s; if (alignment == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Alignment()"); return NULL; } ret = ped_alignment_new(alignment->offset, alignment->grain_size); if (ret == NULL) return (PedAlignment *) PyErr_NoMemory(); return ret; } _ped_Alignment *PedAlignment2_ped_Alignment(PedAlignment *alignment) { _ped_Alignment *ret = NULL; PyObject *args = NULL; if (alignment == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedAlignment()"); return NULL; } ret = (_ped_Alignment *) _ped_Alignment_Type_obj.tp_new(&_ped_Alignment_Type_obj, NULL, NULL); if (!ret) return (_ped_Alignment *) PyErr_NoMemory(); args = Py_BuildValue("LL", alignment->offset, alignment->grain_size); if (args == NULL) { goto error; } if (_ped_Alignment_Type_obj.tp_init((PyObject *) ret, args, NULL)) { goto error; } Py_DECREF(args); return ret; error: Py_XDECREF(args); Py_DECREF(ret); return NULL; } /* _ped_Constraint -> PedConstraint functions */ PedConstraint *_ped_Constraint2PedConstraint(PyObject *s) { PedConstraint *ret = NULL; PedAlignment *start_align = NULL, *end_align = NULL; PedGeometry *start_range = NULL, *end_range = NULL; _ped_Constraint *constraint = (_ped_Constraint *) s; if (constraint == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Constraint()"); return NULL; } start_align = _ped_Alignment2PedAlignment(constraint->start_align); if (start_align == NULL) { return NULL; } end_align = _ped_Alignment2PedAlignment(constraint->end_align); if (end_align == NULL) { ped_alignment_destroy(start_align); return NULL; } start_range = _ped_Geometry2PedGeometry(constraint->start_range); if (start_range == NULL) { ped_alignment_destroy(start_align); ped_alignment_destroy(end_align); return NULL; } end_range = _ped_Geometry2PedGeometry(constraint->end_range); if (end_range == NULL) { ped_alignment_destroy(start_align); ped_alignment_destroy(end_align); return NULL; } ret = ped_constraint_new(start_align, end_align, start_range, end_range, constraint->min_size, constraint->max_size); if (ret == NULL) { /* Fall through to clean up memory, but set the error condition now. */ PyErr_NoMemory(); } ped_alignment_destroy(start_align); ped_alignment_destroy(end_align); return ret; } _ped_Constraint *PedConstraint2_ped_Constraint(PedConstraint *constraint) { _ped_Constraint *ret = NULL; _ped_Alignment *start_align = NULL; _ped_Alignment *end_align = NULL; _ped_Geometry *start_range = NULL; _ped_Geometry *end_range = NULL; PyObject *args = NULL; if (constraint == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedConstraint()"); return NULL; } ret = (_ped_Constraint *) _ped_Constraint_Type_obj.tp_new(&_ped_Constraint_Type_obj, NULL, NULL); if (!ret) return (_ped_Constraint *) PyErr_NoMemory(); if ((start_align = PedAlignment2_ped_Alignment(constraint->start_align)) == NULL) goto error; if ((end_align = PedAlignment2_ped_Alignment(constraint->end_align)) == NULL) goto error; if ((start_range = PedGeometry2_ped_Geometry(constraint->start_range)) == NULL) goto error; if ((end_range = PedGeometry2_ped_Geometry(constraint->end_range)) == NULL) goto error; args = Py_BuildValue("OOOOLL", start_align, end_align, start_range, end_range, constraint->min_size, constraint->max_size); if (args == NULL) { goto error; } if (_ped_Constraint_Type_obj.tp_init((PyObject *) ret, args, NULL)) { goto error; } Py_DECREF(args); Py_DECREF(start_align); Py_DECREF(end_align); Py_DECREF(start_range); Py_DECREF(end_range); return ret; error: Py_XDECREF(args); Py_XDECREF(start_align); Py_XDECREF(end_align); Py_XDECREF(start_range); Py_XDECREF(end_range); Py_DECREF(ret); return NULL; } /* _ped_Device -> PedDevice functions */ PedDevice *_ped_Device2PedDevice(PyObject *s) { _ped_Device *dev = (_ped_Device *) s; PedDevice *ret; if (dev == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Device()"); return NULL; } ret = ped_device_get(dev->path); if (ret == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(DeviceException, partedExnMessage); } else PyErr_Format(DeviceException, "Could not find device for path %s", dev->path); } return ret; } /* PedDevice -> _ped_Device functions */ _ped_Device *PedDevice2_ped_Device(PedDevice *device) { _ped_Device *ret = NULL; if (device == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedDevice"); return NULL; } ret = (_ped_Device *) _ped_Device_Type_obj.tp_alloc(&_ped_Device_Type_obj, 1); if (!ret) return (_ped_Device *) PyErr_NoMemory(); ret->model = strdup(device->model); if (ret->model == NULL) { PyErr_NoMemory(); goto error; } ret->path = strdup(device->path); if (ret->path == NULL) { PyErr_NoMemory(); goto error; } ret->type = device->type; ret->sector_size = device->sector_size; ret->phys_sector_size = device->phys_sector_size; ret->open_count = device->open_count; ret->read_only = device->read_only; ret->external_mode = device->external_mode; ret->dirty = device->dirty; ret->boot_dirty = device->boot_dirty; ret->host = device->host; ret->did = device->did; ret->length = device->length; ret->hw_geom = (PyObject *) PedCHSGeometry2_ped_CHSGeometry(&device->hw_geom); if (ret->hw_geom == NULL) goto error; ret->bios_geom = (PyObject *) PedCHSGeometry2_ped_CHSGeometry(&device->bios_geom); if (ret->bios_geom == NULL) goto error; return ret; error: Py_DECREF(ret); return NULL; } PedDisk *_ped_Disk2PedDisk(PyObject *s) { _ped_Disk *disk = (_ped_Disk *) s; if (disk == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Disk()"); return NULL; } return disk->ped_disk; } _ped_Disk *PedDisk2_ped_Disk(PedDisk *disk) { _ped_Disk *ret = NULL; if (disk == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedDisk()"); return NULL; } ret = (_ped_Disk *) _ped_Disk_Type_obj.tp_new(&_ped_Disk_Type_obj, NULL, NULL); if (!ret) { ped_disk_destroy(disk); return (_ped_Disk *) PyErr_NoMemory(); } ret->ped_disk = disk; ret->dev = (PyObject *) PedDevice2_ped_Device(disk->dev); if (!ret->dev) goto error; ret->type = (PyObject *) PedDiskType2_ped_DiskType(disk->type); if (!ret->type) goto error; return ret; error: Py_DECREF(ret); return NULL; } PedDiskType *_ped_DiskType2PedDiskType(PyObject *s) { PedDiskType *ret = NULL; _ped_DiskType *type = (_ped_DiskType *) s; if (type == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.DiskType()"); return NULL; } ret = ped_disk_type_get(type->name); if (ret == NULL) { PyErr_SetString(UnknownTypeException, type->name); return NULL; } return ret; } _ped_DiskType *PedDiskType2_ped_DiskType(const PedDiskType *type) { _ped_DiskType *ret = NULL; if (type == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedDiskType()"); return NULL; } ret = (_ped_DiskType *) _ped_DiskType_Type_obj.tp_alloc(&_ped_DiskType_Type_obj, 1); if (!ret) return (_ped_DiskType *) PyErr_NoMemory(); ret->name = strdup(type->name); if (ret->name == NULL) { Py_DECREF(ret); return (_ped_DiskType *) PyErr_NoMemory(); } ret->features = type->features; return ret; } /* _ped_FileSystem -> PedFileSystem functions */ PedFileSystem *_ped_FileSystem2PedFileSystem(PyObject *s) { PedFileSystem *ret = NULL; PedGeometry *geom = NULL; _ped_FileSystem *fs = (_ped_FileSystem *) s; if (fs->ped_filesystem == NULL) { if (fs == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.FileSystem"); return NULL; } geom = _ped_Geometry2PedGeometry(fs->geom); if (geom == NULL) { return NULL; } ret = ped_file_system_open(geom); if (ret == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (PyErr_ExceptionMatches(PartedException) || PyErr_ExceptionMatches(PyExc_NotImplementedError)) return NULL; PyErr_SetString(FileSystemException, partedExnMessage); return NULL; } } return ret; } else { return fs->ped_filesystem; } } _ped_FileSystem *PedFileSystem2_ped_FileSystem(PedFileSystem *fs) { _ped_FileSystem *ret = NULL; _ped_FileSystemType *type = NULL; _ped_Geometry *geom = NULL; PyObject *args = NULL; if (fs == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedFileSystem()"); return NULL; } ret = (_ped_FileSystem *) _ped_FileSystem_Type_obj.tp_new(&_ped_FileSystem_Type_obj, NULL, NULL); if (!ret) return (_ped_FileSystem *) PyErr_NoMemory(); if ((type = PedFileSystemType2_ped_FileSystemType(fs->type)) == NULL) goto error; if ((geom = PedGeometry2_ped_Geometry(fs->geom)) == NULL) goto error; args = Py_BuildValue("OOi", type, geom, fs->checked); if (args == NULL) { goto error; } if (_ped_FileSystem_Type_obj.tp_init((PyObject *) ret, args, NULL)) { goto error; } Py_DECREF(args); Py_DECREF(type); Py_DECREF(geom); return ret; error: Py_XDECREF(args); Py_XDECREF(type); Py_XDECREF(geom); Py_DECREF(ret); return NULL; } /* _ped_FileSystemType -> PedFileSystemType functions */ PedFileSystemType *_ped_FileSystemType2PedFileSystemType(PyObject *s) { PedFileSystemType *ret = NULL; _ped_FileSystemType *type = (_ped_FileSystemType *) s; if (type == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.FileSystemType"); return NULL; } if ((ret = ped_file_system_type_get(type->name)) == NULL) { PyErr_SetString(UnknownTypeException, type->name); return NULL; } return ret; } _ped_FileSystemType *PedFileSystemType2_ped_FileSystemType(const PedFileSystemType *fstype) { _ped_FileSystemType *ret = NULL; if (fstype == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedFileSystemType()"); return NULL; } ret = (_ped_FileSystemType *) _ped_FileSystemType_Type_obj.tp_alloc(&_ped_FileSystemType_Type_obj, 1); if (!ret) return (_ped_FileSystemType *) PyErr_NoMemory(); ret->name = strdup(fstype->name); if (ret->name == NULL) { Py_DECREF(ret); return (_ped_FileSystemType *) PyErr_NoMemory(); } return ret; } /* _ped_Geometry -> PedGeometry functions */ PedGeometry *_ped_Geometry2PedGeometry(PyObject *s) { _ped_Geometry *geometry = (_ped_Geometry *) s; if (geometry == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Geometry()"); return NULL; } return geometry->ped_geometry; } _ped_Geometry *PedGeometry2_ped_Geometry(PedGeometry *geometry) { _ped_Geometry *ret = NULL; _ped_Device *dev = NULL; PyObject *args = NULL; if (geometry == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedGeometry()"); return NULL; } ret = (_ped_Geometry *) _ped_Geometry_Type_obj.tp_new(&_ped_Geometry_Type_obj, NULL, NULL); if (!ret) return (_ped_Geometry *) PyErr_NoMemory(); if ((dev = PedDevice2_ped_Device(geometry->dev)) == NULL) goto error; args = Py_BuildValue("OLLL", dev, geometry->start, geometry->length, geometry->end); if (args == NULL) { goto error; } if (_ped_Geometry_Type_obj.tp_init((PyObject *) ret, args, NULL)) { goto error; } Py_DECREF(args); Py_DECREF(dev); return ret; error: Py_XDECREF(args); Py_XDECREF(dev); Py_DECREF(ret); return NULL; } /* _ped_CHSGeometry -> PedCHSGeometry functions */ PedCHSGeometry *_ped_CHSGeometry2PedCHSGeometry(PyObject *s) { PedCHSGeometry *ret = NULL; _ped_CHSGeometry *srcgeom = (_ped_CHSGeometry *) s; if (srcgeom == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.CHSGeometry()"); return NULL; } if ((ret = malloc(sizeof(PedCHSGeometry))) == NULL) return (PedCHSGeometry *) PyErr_NoMemory(); ret->cylinders = srcgeom->cylinders; ret->heads = srcgeom->heads; ret->sectors = srcgeom->sectors; return ret; } /* PedCHSGeometry -> _ped_CHSGeometry functions */ _ped_CHSGeometry *PedCHSGeometry2_ped_CHSGeometry(PedCHSGeometry *geom) { _ped_CHSGeometry *ret = NULL; if (geom == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedCHSGeometry()"); return NULL; } ret = (_ped_CHSGeometry *) _ped_CHSGeometry_Type_obj.tp_alloc(&_ped_CHSGeometry_Type_obj, 1); if (!ret) return (_ped_CHSGeometry *) PyErr_NoMemory(); ret->cylinders = geom->cylinders; ret->heads = geom->heads; ret->sectors = geom->sectors; return ret; } PedPartition *_ped_Partition2PedPartition(_ped_Partition *s) { if (s == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Partition()"); return NULL; } return s->ped_partition; } _ped_Partition *PedPartition2_ped_Partition(PedPartition *part, _ped_Disk *pydisk) { _ped_Partition *ret = NULL; if (part == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedPartition()"); return NULL; } if (pydisk == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped_Disk()"); return NULL; } ret = (_ped_Partition *) _ped_Partition_Type_obj.tp_new(&_ped_Partition_Type_obj, NULL, NULL); if (!ret) return (_ped_Partition *) PyErr_NoMemory(); ret->disk = (PyObject *)pydisk; Py_INCREF(ret->disk); ret->geom = (PyObject *)PedGeometry2_ped_Geometry(&part->geom); if (!ret->geom) goto error; if (part->fs_type == NULL) { ret->fs_type = Py_None; Py_INCREF(ret->fs_type); } else { ret->fs_type = (PyObject *)PedFileSystemType2_ped_FileSystemType(part->fs_type); if (!ret->fs_type) goto error; } ret->type = part->type; ret->ped_partition = part; return ret; error: Py_DECREF(ret); return NULL; } /* _ped_Timer -> PedTimer functions */ PedTimer *_ped_Timer2PedTimer(PyObject *s) { PedTimer *ret = NULL; _ped_Timer *timer = (_ped_Timer *) s; if (timer == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Timer()"); return NULL; } if ((ret = malloc(sizeof(PedTimer))) == NULL) return (PedTimer *) PyErr_NoMemory(); ret->frac = timer->frac; ret->start = timer->start; ret->now = timer->now; ret->predicted_end = timer->predicted_end; ret->handler = timer->handler; ret->context = timer->context; ret->state_name = strdup(timer->state_name); if (ret->state_name == NULL) { free(ret); return (PedTimer *) PyErr_NoMemory(); } return ret; } /* PedTimer -> _ped_Timer functions */ _ped_Timer *PedTimer2_ped_Timer(PedTimer *timer) { _ped_Timer *ret = NULL; if (timer == NULL) { PyErr_SetString(PyExc_TypeError, "Empty PedTimer()"); return NULL; } ret = (_ped_Timer *) _ped_Timer_Type_obj.tp_new(&_ped_Timer_Type_obj, NULL, NULL); if (!ret) return (_ped_Timer *) PyErr_NoMemory(); ret->frac = timer->frac; ret->start = timer->start; ret->now = timer->now; ret->predicted_end = timer->predicted_end; ret->state_name = strdup(timer->state_name); if (ret->state_name == NULL) { Py_DECREF(ret); return (_ped_Timer *) PyErr_NoMemory(); } /* XXX: don't know what to do with these */ ret->handler = timer->handler; ret->context = timer->context; return ret; } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/Makefile.am0000664000076400007640000000346211170723402012772 00000000000000# # Makefile.am for pyparted src subdirectory # # Copyright (C) 2007, 2008, 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # SUBDIRS = parted CFLAGS += -Werror -Wmissing-prototypes -fno-strict-aliasing INCLUDES = -I$(top_srcdir)/include $(PYTHON_INCLUDES) $(LIBPARTED_INCLUDES) # install Python module code directly in to site-packages pkgpyexecdir = $(pyexecdir) pkgpythondir = $(pythondir) # libparted binding pkgpyexec_LTLIBRARIES = _pedmodule.la _pedmodule_la_CFLAGS = $(LIBPARTED_CFLAGS) _pedmodule_la_LDFLAGS = -module -avoid-version $(PYTHON_LDFLAGS) \ $(LIBPARTED_LDFLAGS) _pedmodule_la_LIBADD = $(PYTHON_LIBS) $(LIBPARTED_LIBS) _pedmodule_la_SOURCES = convert.c _pedmodule.c pyconstraint.c pydevice.c \ pydisk.c pyfilesys.c pygeom.c pynatmath.c pytimer.c \ pyunit.c MAINTAINERCLEANFILES = Makefile.in pyparted-3.6/src/pytimer.c0000664000076400007640000002307311170723402012573 00000000000000/* * pytimer.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pytimer.h" #include "typeobjects/pytimer.h" /* _ped.Timer functions */ void _ped_Timer_dealloc(_ped_Timer *self) { PyObject_GC_UnTrack(self); free(self->state_name); PyObject_GC_Del(self); } int _ped_Timer_compare(_ped_Timer *self, PyObject *obj) { _ped_Timer *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Timer_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Timer"); return -1; } comp = (_ped_Timer *) obj; if ((self->frac == comp->frac) && (self->start == comp->start) && (self->now == comp->now) && (self->predicted_end == comp->predicted_end) && (!strcmp(self->state_name, comp->state_name)) && (self->handler == comp->handler) && (self->context == comp->context)) { return 0; } else { return 1; } } PyObject *_ped_Timer_richcompare(_ped_Timer *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Timer_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Timer_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Timer"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Timer_str(_ped_Timer *self) { char *ret = NULL; if (asprintf(&ret, "_ped.Timer instance --\n" " start: %s now: %s\n" " predicted_end: %s frac: %f\n" " state_name: %s", ctime(&(self->start)), ctime(&(self->now)), ctime(&(self->predicted_end)), self->frac, self->state_name) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Timer_traverse(_ped_Timer *self, visitproc visit, void *arg) { return 0; } int _ped_Timer_clear(_ped_Timer *self) { return 0; } int _ped_Timer_init(_ped_Timer *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"frac", "start", "now", "predicted_end", "state_name", NULL}; self->state_name = NULL; /* XXX: timers aren't really done yet in pyparted */ PyErr_SetString(PyExc_NotImplementedError, NULL); return -1; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "|fdddz", &self->frac, &self->start, &self->now, &self->predicted_end, &self->state_name)) return -1; } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "|fdddz", kwlist, &self->frac, &self->start, &self->now, &self->predicted_end, &self->state_name)) return -2; } /* self->state_name now points to the internal buffer of a PyString object, * which may be freed when its refcount drops to zero, so strdup it. */ if (self->state_name) { self->state_name = strdup(self->state_name); if (!self->state_name) { PyErr_NoMemory(); return -3; } } return 0; } PyObject *_ped_Timer_get(_ped_Timer *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Timer()"); return NULL; } if (!strcmp(member, "frac")) { return Py_BuildValue("f", self->frac); } else if (!strcmp(member, "start")) { return Py_BuildValue("d", self->start); } else if (!strcmp(member, "now")) { return Py_BuildValue("d", self->now); } else if (!strcmp(member, "predicted_end")) { return Py_BuildValue("d", self->predicted_end); } else if (!strcmp(member, "state_name")) { if (self->state_name != NULL) return PyString_FromString(self->state_name); else return PyString_FromString(""); } else { PyErr_Format(PyExc_AttributeError, "_ped.Timer object has no attribute %s", member); return NULL; } } int _ped_Timer_set(_ped_Timer *self, PyObject *value, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Timer()"); return -1; } if (!strcmp(member, "frac")) { if (!PyArg_ParseTuple(value, "f", &self->frac)) { return -1; } } else if (!strcmp(member, "start")) { self->start = PyFloat_AsDouble(value); if (PyErr_Occurred()) { return -1; } } else if (!strcmp(member, "now")) { self->now = PyFloat_AsDouble(value); if (PyErr_Occurred()) { return -1; } } else if (!strcmp(member, "predicted_end")) { self->predicted_end = PyFloat_AsDouble(value); if (PyErr_Occurred()) { return -1; } } else if (!strcmp(member, "state_name")) { self->state_name = PyString_AsString(value); if (PyErr_Occurred()) { return -1; } /* self->state_name now points to the internal buffer of a PyString obj * which may be freed when its refcount drops to zero, so strdup it. */ if (self->state_name) { self->state_name = strdup(self->state_name); if (!self->state_name) { PyErr_NoMemory(); return -2; } } } else { PyErr_Format(PyExc_AttributeError, "_ped.Timer object has no attribute %s", member); return -1; } return 0; } /* 1:1 function mappings for timer.h in libparted */ PyObject *py_ped_timer_destroy(PyObject *s, PyObject *args) { Py_CLEAR(s); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_timer_new_nested(PyObject *s, PyObject *args) { float nest_frac; PedTimer *parent = NULL, *timer = NULL; _ped_Timer *ret = NULL; if (!PyArg_ParseTuple(args, "f", &nest_frac)) return NULL; parent = _ped_Timer2PedTimer(s); if (parent == NULL) { return NULL; } timer = ped_timer_new_nested(parent, nest_frac); ped_timer_destroy(parent); if (timer) { ret = PedTimer2_ped_Timer(timer); } else { PyErr_SetString(CreateException, "Could not create new nested timer"); return NULL; } ped_timer_destroy(timer); return (PyObject *) ret; } PyObject *py_ped_timer_destroy_nested(PyObject *s, PyObject *args) { PedTimer *timer = NULL; timer = _ped_Timer2PedTimer(s); if (timer == NULL) { return NULL; } ped_timer_destroy_nested(timer); ped_timer_destroy(timer); Py_CLEAR(s); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_timer_touch(PyObject *s, PyObject *args) { PedTimer *timer = NULL; timer = _ped_Timer2PedTimer(s); if (timer == NULL) { return NULL; } ped_timer_touch(timer); ped_timer_destroy(timer); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_timer_reset(PyObject *s, PyObject *args) { PedTimer *timer = NULL; timer = _ped_Timer2PedTimer(s); if (timer == NULL) { return NULL; } ped_timer_reset(timer); ped_timer_destroy(timer); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_timer_update(PyObject *s, PyObject *args) { float frac; PedTimer *timer = NULL; if (!PyArg_ParseTuple(args, "f", &frac)) return NULL; timer = _ped_Timer2PedTimer(s); if (timer == NULL) { return NULL; } ped_timer_update(timer, frac); ped_timer_destroy(timer); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_timer_set_state_name(PyObject *s, PyObject *args) { char *str = NULL; PedTimer *timer = NULL; if (!PyArg_ParseTuple(args, "z", &str)) { return NULL; } timer = _ped_Timer2PedTimer(s); if (timer == NULL) { return NULL; } ped_timer_set_state_name(timer, str); ped_timer_destroy(timer); free(str); Py_INCREF(Py_None); return Py_None; } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/pydisk.c0000664000076400007640000014755011514547437012432 00000000000000/* * pydisk.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include #include "convert.h" #include "exceptions.h" #include "pydisk.h" #include "docstrings/pydisk.h" #include "typeobjects/pydisk.h" /* _ped.Partition functions */ void _ped_Partition_dealloc(_ped_Partition *self) { PyObject_GC_UnTrack(self); Py_CLEAR(self->disk); self->disk = NULL; Py_CLEAR(self->geom); self->geom = NULL; Py_CLEAR(self->fs_type); self->fs_type = NULL; PyObject_GC_Del(self); } int _ped_Partition_compare(_ped_Partition *self, PyObject *obj) { _ped_Partition *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Partition_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Partition"); return -1; } comp = (_ped_Partition *) obj; if ((_ped_Disk_Type_obj.tp_richcompare(self->disk, comp->disk, Py_EQ)) && (_ped_Geometry_Type_obj.tp_richcompare(self->geom, comp->geom, Py_EQ)) && (self->ped_partition->num == comp->ped_partition->num) && (self->type == comp->type) && (_ped_FileSystemType_Type_obj.tp_richcompare(self->fs_type, comp->fs_type, Py_EQ))) { return 0; } else { return 1; } } PyObject *_ped_Partition_richcompare(_ped_Partition *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Partition_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Partition_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Partition"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Partition_str(_ped_Partition *self) { char *ret = NULL; char *disk = NULL, *fs_type = NULL, *geom = NULL; disk = PyString_AsString(_ped_Disk_Type_obj.tp_repr(self->disk)); if (disk == NULL) { return NULL; } fs_type = PyString_AsString(_ped_FileSystemType_Type_obj.tp_repr(self->fs_type)); if (fs_type == NULL) { return NULL; } geom = PyString_AsString(_ped_Geometry_Type_obj.tp_repr(self->geom)); if (geom == NULL) { return NULL; } if (asprintf(&ret, "_ped.Partition instance --\n" " disk: %s fs_type: %s\n" " num: %d type: %d\n" " geom: %s", disk, fs_type, self->ped_partition->num, self->type, geom) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Partition_traverse(_ped_Partition *self, visitproc visit, void *arg) { int err; if (self->disk) { if ((err = visit(self->disk, arg))) { return err; } } if (self->geom) { if ((err = visit(self->geom, arg))) { return err; } } if (self->fs_type) { if ((err = visit(self->fs_type, arg))) { return err; } } return 0; } int _ped_Partition_clear(_ped_Partition *self) { Py_CLEAR(self->disk); self->disk = NULL; Py_CLEAR(self->geom); self->geom = NULL; Py_CLEAR(self->fs_type); self->fs_type = NULL; return 0; } int _ped_Partition_init(_ped_Partition *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"disk", "type", "start", "end", "fs_type", NULL}; PedSector start, end; PedDisk *disk = NULL; PedFileSystemType *fstype = NULL; PedPartition *part = NULL; self->fs_type = Py_None; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "O!iLL|O!", &_ped_Disk_Type_obj, &self->disk, &self->type, &start, &end, &_ped_FileSystemType_Type_obj, &self->fs_type)) { self->disk = self->fs_type = NULL; return -1; } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!iLL|O!", kwlist, &_ped_Disk_Type_obj, &self->disk, &self->type, &start, &end, &_ped_FileSystemType_Type_obj, &self->fs_type)) { self->disk = self->fs_type = NULL; return -1; } } /* * try to call libparted with provided information, * on failure, raise an exception */ disk = _ped_Disk2PedDisk(self->disk); if (self->fs_type != Py_None) fstype = _ped_FileSystemType2PedFileSystemType(self->fs_type); part = ped_partition_new(disk, self->type, fstype, start, end); if (part == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) { PyErr_SetString(PartitionException, partedExnMessage); } } else { PyErr_Format(PartitionException, "Could not create new partition on device %s", disk->dev->path); } self->disk = self->fs_type = NULL; return -3; } /* increment reference count for PyObjects read by PyArg_ParseTuple */ Py_INCREF(self->disk); Py_INCREF(self->fs_type); /* copy in non-PyObject object members generated by libparted */ self->type = part->type; /* * copy in PyObject object members generated by libparted * first, we drop the reference count to zero and set it to NULL * second, we convert the libparted type to a PyObject */ Py_CLEAR(self->geom); self->geom = (PyObject *) PedGeometry2_ped_Geometry(&(part->geom)); if (self->geom == NULL) { Py_CLEAR(self->disk); Py_CLEAR(self->fs_type); ped_partition_destroy(part); return -4; } self->ped_partition = part; /* On creation the object is not owned by any disk */ self->_owned = 0; return 0; } PyObject *_ped_Partition_get(_ped_Partition *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Partition()"); return NULL; } if (!strcmp(member, "num")) { return Py_BuildValue("i", self->ped_partition->num); } else if (!strcmp(member, "type")) { return PyLong_FromLongLong(self->type); } else { PyErr_Format(PyExc_AttributeError, "_ped.Partition object has no attribute %s", member); return NULL; } } int _ped_Partition_set(_ped_Partition *self, PyObject *value, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Partition()"); return -1; } if (!strcmp(member, "type")) { self->type = PyInt_AsLong(value); if (PyErr_Occurred()) { return -1; } } else { PyErr_Format(PyExc_AttributeError, "_ped.Partition object has no attribute %s", member); return -1; } return 0; } /* _ped.Disk functions */ void _ped_Disk_dealloc(_ped_Disk *self) { if (self->ped_disk) { ped_disk_destroy(self->ped_disk); } PyObject_GC_UnTrack(self); Py_CLEAR(self->dev); self->dev = NULL; Py_CLEAR(self->type); self->type = NULL; PyObject_GC_Del(self); } int _ped_Disk_compare(_ped_Disk *self, PyObject *obj) { _ped_Disk *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Disk_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Disk"); return -1; } comp = (_ped_Disk *) obj; if ((_ped_Device_Type_obj.tp_richcompare(self->dev, comp->dev, Py_EQ)) && (_ped_DiskType_Type_obj.tp_richcompare(self->type, comp->type, Py_EQ))) { return 0; } else { return 1; } } PyObject *_ped_Disk_richcompare(_ped_Disk *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Disk_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Disk_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Disk"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Disk_str(_ped_Disk *self) { char *ret = NULL; char *dev = NULL, *type = NULL; dev = PyString_AsString(_ped_Device_Type_obj.tp_repr(self->dev)); if (dev == NULL) { return NULL; } type = PyString_AsString(_ped_Device_Type_obj.tp_repr(self->type)); if (type == NULL) { return NULL; } if (asprintf(&ret, "_ped.Disk instance --\n dev: %s type: %s", dev, type) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Disk_traverse(_ped_Disk *self, visitproc visit, void *arg) { int err; if (self->dev) { if ((err = visit(self->dev, arg))) { return err; } } if (self->type) { if ((err = visit(self->type, arg))) { return err; } } return 0; } int _ped_Disk_clear(_ped_Disk *self) { Py_CLEAR(self->dev); self->dev = NULL; Py_CLEAR(self->type); self->type = NULL; return 0; } int _ped_Disk_init(_ped_Disk *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"dev", NULL}; PedDevice *device = NULL; PedDisk *disk = NULL; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "O!", &_ped_Device_Type_obj, &self->dev)) { self->dev = NULL; return -1; } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist, &_ped_Device_Type_obj, &self->dev)) { self->dev = NULL; return -2; } } device = _ped_Device2PedDevice(self->dev); if (device == NULL) { self->dev = NULL; return -3; } disk = ped_disk_new(device); if (disk == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) { PyErr_SetString(DiskLabelException, partedExnMessage); } } else { PyErr_Format(IOException, "Failed to read partition table from device %s", device->path); } self->dev = NULL; return -4; } Py_INCREF(self->dev); self->type = (PyObject *) PedDiskType2_ped_DiskType((PedDiskType *) disk->type); self->ped_disk = disk; return 0; } /* _ped.DiskType functions */ void _ped_DiskType_dealloc(_ped_DiskType *self) { PyObject_GC_UnTrack(self); free(self->name); PyObject_GC_Del(self); } int _ped_DiskType_compare(_ped_DiskType *self, PyObject *obj) { _ped_DiskType *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_DiskType_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.DiskType"); return -1; } comp = (_ped_DiskType *) obj; if ((!strcmp(self->name, comp->name)) && (self->features == comp->features)) { return 0; } else { return 1; } } PyObject *_ped_DiskType_richcompare(_ped_DiskType *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_DiskType_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_DiskType_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.DiskType"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_DiskType_str(_ped_DiskType *self) { char *ret = NULL; if (asprintf(&ret, "_ped.DiskType instance --\n" " name: %s features: %lld", self->name, self->features) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_DiskType_traverse(_ped_DiskType *self, visitproc visit, void *arg) { return 0; } int _ped_DiskType_clear(_ped_DiskType *self) { return 0; } PyObject *_ped_DiskType_get(_ped_DiskType *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.DiskType()"); return NULL; } if (!strcmp(member, "name")) { if (self->name != NULL) return PyString_FromString(self->name); else return PyString_FromString(""); } else if (!strcmp(member, "features")) { return PyLong_FromLongLong(self->features); } else { PyErr_Format(PyExc_AttributeError, "_ped.DiskType object has no attribute %s", member); return NULL; } } /* 1:1 function mappings for disk.h in libparted */ PyObject *py_ped_disk_type_get_next(PyObject *s, PyObject *args) { PyObject *in_type = NULL; PedDiskType *cur = NULL, *next = NULL; _ped_DiskType *ret = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_DiskType_Type_obj, &in_type)) { return NULL; } if (in_type) { cur = _ped_DiskType2PedDiskType(in_type); if (!cur) { return NULL; } } next = ped_disk_type_get_next(cur); if (next) { ret = PedDiskType2_ped_DiskType(next); return (PyObject *) ret; } else { PyErr_SetNone(PyExc_IndexError); return NULL; } } PyObject *py_ped_disk_type_get(PyObject *s, PyObject *args) { char *in_name = NULL; PedDiskType *out_type = NULL; _ped_DiskType *ret = NULL; if (!PyArg_ParseTuple(args, "s", &in_name)) { return NULL; } if (in_name) { out_type = ped_disk_type_get(in_name); if (out_type == NULL) { PyErr_SetString(UnknownTypeException, in_name); return NULL; } ret = PedDiskType2_ped_DiskType(out_type); if (ret == NULL) { return NULL; } } return (PyObject *) ret; } PyObject *py_ped_disk_type_check_feature(PyObject *s, PyObject *args) { PedDiskType *disktype = NULL; PedDiskTypeFeature feature = -1; int ret = 0; if (!PyArg_ParseTuple(args, "i", &feature)) { return NULL; } disktype = _ped_DiskType2PedDiskType(s); if (disktype) { ret = ped_disk_type_check_feature(disktype, feature); } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_clobber(PyObject *s, PyObject *args) { PedDevice *device = NULL; int ret = 0; device = _ped_Device2PedDevice(s); if (device == NULL) return NULL; ret = ped_disk_clobber(device); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Failed to clobber partition table on device %s", device->path); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } /* XXX: is this necessary? */ PyObject *py_ped_disk_duplicate(PyObject *s, PyObject *args) { PedDisk *disk = NULL, *pass_disk = NULL; _ped_Disk *ret = NULL; disk = _ped_Disk2PedDisk(s); if (disk) { pass_disk = ped_disk_duplicate(disk); if (pass_disk == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Could not duplicate device %s", disk->dev->path); return NULL; } ret = PedDisk2_ped_Disk(pass_disk); if (ret == NULL) { return NULL; } } else { return NULL; } return (PyObject *) ret; } /* * XXX: * We need to call ped_disk_destroy() to make sure the OS-specific * free() function is called on the disk. That flushes buffers, * closes handles, arms, and crosschecks and whatnot. * * Similar to py_ped_device_destroy(), we might move the call to * ped_disk_destroy() to the tp_clear() for _ped.Disk. */ PyObject *py_ped_disk_destroy(PyObject *s, PyObject *args) { PedDisk *disk = NULL; disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } ped_disk_destroy(disk); Py_CLEAR(s); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_disk_commit(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_commit(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Could not commit to disk %s, (%s)", disk->dev->path, __func__); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_commit_to_dev(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_commit_to_dev(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Could not commit to disk %s, (%s)", disk->dev->path, __func__); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_commit_to_os(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_commit_to_os(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Could not commit to disk %s, (%s)", disk->dev->path, __func__); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_check(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_check(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_Format(DiskException, "Could not check disk %s", disk->dev->path); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_print(PyObject *s, PyObject *args) { PedDisk *disk = NULL; disk = _ped_Disk2PedDisk(s); if (disk) { ped_disk_print(disk); } else { return NULL; } Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_disk_get_primary_partition_count(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_get_primary_partition_count(disk); } else { return NULL; } return PyInt_FromLong(ret); } PyObject *py_ped_disk_get_last_partition_num(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_get_last_partition_num(disk); } else { return NULL; } return PyInt_FromLong(ret); } PyObject *py_ped_disk_get_max_primary_partition_count(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_get_max_primary_partition_count(disk); } else { return NULL; } return PyInt_FromLong(ret); } PyObject *py_ped_disk_get_max_supported_partition_count(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int max = 0; disk = _ped_Disk2PedDisk(s); if (disk) { if (ped_disk_get_max_supported_partition_count(disk, &max) == true) { return Py_BuildValue("i", max); } } Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_disk_get_partition_alignment(PyObject *s, PyObject *args) { PedDisk *disk = NULL; PedAlignment *alignment = NULL; _ped_Alignment *ret = NULL; disk = _ped_Disk2PedDisk(s); if (!disk) return NULL; alignment = ped_disk_get_partition_alignment(disk); if (!alignment) { PyErr_SetString(CreateException, "Could not get alignment for device"); return NULL; } ret = PedAlignment2_ped_Alignment(alignment); ped_alignment_destroy(alignment); return (PyObject *) ret; } PyObject *py_ped_disk_max_partition_length(PyObject *s, PyObject *args) { PedDisk *disk = NULL; disk = _ped_Disk2PedDisk(s); if (!disk) return NULL; return PyLong_FromUnsignedLongLong(ped_disk_max_partition_length(disk)); } PyObject *py_ped_disk_max_partition_start_sector(PyObject *s, PyObject *args) { PedDisk *disk = NULL; disk = _ped_Disk2PedDisk(s); if (!disk) return NULL; return PyLong_FromUnsignedLongLong(ped_disk_max_partition_start_sector(disk)); } PyObject *py_ped_disk_set_flag(PyObject *s, PyObject *args) { int ret, flag, state; PedDisk *disk = NULL; if (!PyArg_ParseTuple(args, "ii", &flag, &state)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } ret = ped_disk_set_flag(disk, flag, state); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(DiskException, partedExnMessage); } else PyErr_Format(DiskException, "Could not set flag on disk %s", disk->dev->path); return NULL; } Py_RETURN_TRUE; } PyObject *py_ped_disk_get_flag(PyObject *s, PyObject *args) { int flag; PedDisk *disk = NULL; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } if (ped_disk_get_flag(disk, flag)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_is_flag_available(PyObject *s, PyObject *args) { int flag; PedDisk *disk = NULL; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } if (ped_disk_is_flag_available(disk, flag)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_flag_get_name(PyObject *s, PyObject *args) { int flag; char *ret = NULL; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } if ((flag < PED_DISK_FIRST_FLAG) || (flag > PED_DISK_LAST_FLAG)) { PyErr_SetString(PyExc_ValueError, "Invalid flag provided."); return NULL; } ret = (char *) ped_disk_flag_get_name(flag); if (ret == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(DiskException, partedExnMessage); } else PyErr_Format(DiskException, "Could not get disk flag name for %d", flag); return NULL; } return PyString_FromString(ret); } PyObject *py_ped_disk_flag_get_by_name(PyObject *s, PyObject *args) { char *name = NULL; if (!PyArg_ParseTuple(args, "s", &name)) { return NULL; } return PyLong_FromLongLong(ped_disk_flag_get_by_name(name)); } PyObject *py_ped_disk_flag_next(PyObject *s, PyObject *args) { int flag; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } return Py_BuildValue("i", ped_disk_flag_next(flag)); } /* * XXX: * We need to call ped_disk_destroy() to make sure the OS-specific * free() function is called on the disk. That flushes buffers, * closes handles, arms, and crosschecks and whatnot. * * Similar to py_ped_device_destroy(), we might move the call to * ped_disk_destroy() to the tp_clear() for _ped.Disk. */ PyObject *py_ped_partition_destroy(_ped_Partition *s, PyObject *args) { PedPartition *partition = NULL; partition = _ped_Partition2PedPartition(s); if (partition == NULL) { return NULL; } ped_partition_destroy(partition); Py_CLEAR(s); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_partition_is_active(_ped_Partition *s, PyObject *args) { PedPartition *partition = NULL; int ret = 0; partition = _ped_Partition2PedPartition(s); if (partition) { ret = ped_partition_is_active(partition); } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_set_flag(_ped_Partition *s, PyObject *args) { int in_state = -1; PedPartition *part = NULL; int flag; int ret = 0; if (!PyArg_ParseTuple(args, "ii", &flag, &in_state)) { return NULL; } part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } if (part && flag && in_state > -1) { ret = ped_partition_set_flag(part, flag, in_state); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not set flag on partition %s%d", part->disk->dev->path, part->num); return NULL; } } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_get_flag(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; int flag; int ret = -1; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } /* ped_partition_get_flag will assert on this. */ if (!ped_partition_is_active(part)) { PyErr_Format(PartitionException, "Could not get flag on inactive partition %s%d", part->disk->dev->path, part->num); return NULL; } ret = ped_partition_get_flag(part, flag); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_is_flag_available(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; int flag; int ret = 0; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } if (!ped_partition_is_active(part)) { PyErr_Format(PartitionException, "Flag is not available on inactive partition %s%d", part->disk->dev->path, part->num); return NULL; } ret = ped_partition_is_flag_available(part, flag); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_set_system(_ped_Partition *s, PyObject *args) { PyObject *in_fstype = NULL; PedPartition *part = NULL; PedFileSystemType *out_fstype = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "O!", &_ped_FileSystemType_Type_obj, &in_fstype)) { return NULL; } part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } out_fstype = _ped_FileSystemType2PedFileSystemType(in_fstype); if (out_fstype == NULL) { return NULL; } /* ped_partition_set_system will assert on this. */ if (!ped_partition_is_active(part)) { PyErr_Format(PartitionException, "Could not set system flag on inactive partition %s%d", part->disk->dev->path, part->num); return NULL; } ret = ped_partition_set_system(part, out_fstype); if (ret == 0) { PyErr_Format(PartitionException, "Could not set system flag on partition %s%d", part->disk->dev->path, part->num); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_set_name(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; char *in_name = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "s", &in_name)) { return NULL; } part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } /* ped_partition_set_name will assert on this. */ if (!ped_partition_is_active(part)) { PyErr_Format(PartitionException, "Could not set system flag on inactive partition %s%d", part->disk->dev->path, part->num); return NULL; } if (part) { ret = ped_partition_set_name(part, in_name); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not set name on partition %s%d", part->disk->dev->path, part->num); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_get_name(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; char *ret = NULL; part = _ped_Partition2PedPartition(s); if (part == NULL) { return NULL; } /* ped_partition_get_name will assert on this. */ if (!ped_partition_is_active(part)) { PyErr_Format(PartitionException, "Could not get name on inactive partition %s%d", part->disk->dev->path, part->num); return NULL; } if (part) { ret = (char *) ped_partition_get_name(part); if (ret == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not read name on partition %s%d", part->disk->dev->path, part->num); return NULL; } } else { return NULL; } return PyString_FromString(ret); } PyObject *py_ped_partition_is_busy(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; int ret = 0; part = _ped_Partition2PedPartition(s); if (part) { ret = ped_partition_is_busy(part); } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_partition_get_path(_ped_Partition *s, PyObject *args) { PedPartition *part = NULL; char *ret = NULL; part = _ped_Partition2PedPartition(s); if (part) { ret = ped_partition_get_path(part); if (ret == NULL) { PyErr_Format(PartitionException, "Could not get path for partition %s%d", part->disk->dev->path, part->num); return NULL; } } else { return NULL; } return PyString_FromString(ret); } PyObject *py_ped_partition_type_get_name(PyObject *s, PyObject *args) { long type; char *ret = NULL; if (!PyArg_ParseTuple(args, "i", &type)) { return NULL; } if (type) { ret = (char *) ped_partition_type_get_name(type); } if (ret != NULL) return PyString_FromString(ret); else return PyString_FromString(""); } PyObject *py_ped_partition_flag_get_name(PyObject *s, PyObject *args) { int flag; char *ret = NULL; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } if ((flag < PED_PARTITION_FIRST_FLAG) || (flag > PED_PARTITION_LAST_FLAG)) { PyErr_SetString(PyExc_ValueError, "Invalid flag provided."); return NULL; } if (flag) { ret = (char *) ped_partition_flag_get_name(flag); if (!ret) { /* Re-raise the libparted exception. */ partedExnRaised = 0; return NULL; } } return PyString_FromString(ret); } PyObject *py_ped_partition_flag_get_by_name(PyObject *s, PyObject *args) { char *name = NULL; if (!PyArg_ParseTuple(args, "s", &name)) { return NULL; } return PyLong_FromLongLong(ped_partition_flag_get_by_name(name)); } PyObject *py_ped_partition_flag_next(PyObject *s, PyObject *args) { int flag; if (!PyArg_ParseTuple(args, "i", &flag)) { return NULL; } return Py_BuildValue("i", ped_partition_flag_next(flag)); } PyObject *py_ped_disk_add_partition(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PyObject *in_constraint = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; PedConstraint *out_constraint = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "O!|O!",&_ped_Partition_Type_obj, &in_part, &_ped_Constraint_Type_obj, &in_constraint)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } /* Its non-sense to add an owned partition */ if (in_part->_owned == 1) { PyErr_SetString(PartitionException, "Attempting to add a partition " "that is already owned by a disk."); return NULL; } out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "Cannot add a partition to another disk then the one used for creating the partition"); return NULL; } if (in_constraint) { out_constraint = _ped_Constraint2PedConstraint(in_constraint); if (out_constraint == NULL) { return NULL; } } ret = ped_disk_add_partition(disk, out_part, out_constraint); if (out_constraint) ped_constraint_destroy(out_constraint); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not create partition %s%d", out_part->disk->dev->path, out_part->num); return NULL; } /* update our _ped.Partition object with out_part values */ in_part->type = out_part->type; in_part->_owned = 1; *((_ped_Geometry *)in_part->geom)->ped_geometry = out_part->geom; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_remove_partition(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "O!", &_ped_Partition_Type_obj, &in_part)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } /* Its non-sense to remove an unowned partition */ if (in_part->_owned == 0) { PyErr_SetString(PartitionException, "Attempting to remove a partition " "that is not owned by any disk."); return NULL; } out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "Partition is not part of the disk it is being removed from"); return NULL; } if (out_part->part_list != NULL) { PedPartition *part; for (part = out_part->part_list; part; part = part->next) { if (ped_partition_is_active(part)) break; } if (part) { PyErr_SetString(PartitionException, "Attempting to remove an extended partition that still contains logical partitions"); return NULL; } } ret = ped_disk_remove_partition(disk, out_part); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not remove partition %s%d", out_part->disk->dev->path, out_part->num); return NULL; } in_part->_owned = 0; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_delete_partition(PyObject *s, PyObject *args) { return py_ped_disk_remove_partition(s, args); } PyObject *py_ped_disk_delete_all(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_delete_all(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not remove all partitions on %s", disk->dev->path); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_set_partition_geom(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PyObject *in_constraint = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; PedConstraint *out_constraint = NULL; PedSector start, end; int ret = 0; if (!PyArg_ParseTuple(args, "O!OLL", &_ped_Partition_Type_obj, &in_part, &in_constraint, &start, &end)) { return NULL; } if (in_constraint != Py_None && !PyObject_IsInstance(in_constraint, (PyObject *)&_ped_Constraint_Type_obj)) { PyErr_SetString(PyExc_ValueError, "invalid constraint type"); return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "partition.disk does not match disk"); return NULL; } if (in_constraint != Py_None) { out_constraint = _ped_Constraint2PedConstraint(in_constraint); if (out_constraint == NULL) { return NULL; } } ret = ped_disk_set_partition_geom(disk, out_part, out_constraint, start, end); if (out_constraint) ped_constraint_destroy(out_constraint); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not set geometry on %s%d", disk->dev->path, out_part->num); return NULL; } *((_ped_Geometry *)in_part->geom)->ped_geometry = out_part->geom; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_maximize_partition(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PyObject *in_constraint = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; PedConstraint *out_constraint = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "O!|O!", &_ped_Partition_Type_obj, &in_part, &_ped_Constraint_Type_obj, &in_constraint)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "partition.disk does not match disk"); return NULL; } if (in_constraint) { out_constraint = _ped_Constraint2PedConstraint(in_constraint); if (out_constraint == NULL) { return NULL; } } ret = ped_disk_maximize_partition(disk, out_part, out_constraint); if (out_constraint) ped_constraint_destroy(out_constraint); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not maximize partition size for %s%d", disk->dev->path, out_part->num); return NULL; } *((_ped_Geometry *)in_part->geom)->ped_geometry = out_part->geom; if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_get_max_partition_geometry(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PyObject *in_constraint = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; PedConstraint *out_constraint = NULL; PedGeometry *pass_geom = NULL; _ped_Geometry *ret = NULL; if (!PyArg_ParseTuple(args, "O!|O!", &_ped_Partition_Type_obj, &in_part, &_ped_Constraint_Type_obj, &in_constraint)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "partition.disk does not match disk"); return NULL; } if (in_constraint) { out_constraint = _ped_Constraint2PedConstraint(in_constraint); if (out_constraint == NULL) { return NULL; } } pass_geom = ped_disk_get_max_partition_geometry(disk, out_part, out_constraint); if (out_constraint) ped_constraint_destroy(out_constraint); if (pass_geom == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not get maximum partition size for %s%d", disk->dev->path, out_part->num); return NULL; } ret = PedGeometry2_ped_Geometry(pass_geom); if (ret == NULL) { return NULL; } return (PyObject *) ret; } PyObject *py_ped_disk_minimize_extended_partition(PyObject *s, PyObject *args) { PedDisk *disk = NULL; int ret = 0; disk = _ped_Disk2PedDisk(s); if (disk) { ret = ped_disk_minimize_extended_partition(disk); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(PartitionException, partedExnMessage); } else PyErr_Format(PartitionException, "Could not shrink extended partition on %s", disk->dev->path); return NULL; } } else { return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_disk_next_partition(PyObject *s, PyObject *args) { _ped_Partition *in_part = NULL; PedDisk *disk = NULL; PedPartition *out_part = NULL; PedPartition *pass_part = NULL; _ped_Partition *ret = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_Partition_Type_obj, &in_part)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } if (in_part) { out_part = _ped_Partition2PedPartition(in_part); if (out_part == NULL) { return NULL; } if (out_part->disk != disk) { PyErr_SetString(PartitionException, "partition.disk does not match disk"); return NULL; } } pass_part = ped_disk_next_partition(disk, out_part); if (pass_part == NULL) { Py_INCREF(Py_None); return Py_None; } ret = PedPartition2_ped_Partition(pass_part, (_ped_Disk *)s); if (ret == NULL) { return NULL; } ret->_owned = 1; return (PyObject *) ret; } PyObject *py_ped_disk_get_partition(PyObject *s, PyObject *args) { int num; PedDisk *disk = NULL; PedPartition *pass_part = NULL; _ped_Partition *ret = NULL; if (!PyArg_ParseTuple(args, "i", &num)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk) { pass_part = ped_disk_get_partition(disk, num); if (pass_part == NULL) { PyErr_SetString(PartitionException, "Partition does not exist"); return NULL; } ret = PedPartition2_ped_Partition(pass_part, (_ped_Disk *)s); if (ret == NULL) { return NULL; } } else { return NULL; } ret->_owned = 1; return (PyObject *) ret; } PyObject *py_ped_disk_get_partition_by_sector(PyObject *s, PyObject *args) { PedDisk *disk = NULL; PedSector sector; PedPartition *pass_part = NULL; _ped_Partition *ret = NULL; if (!PyArg_ParseTuple(args, "L", §or)) { return NULL; } disk = _ped_Disk2PedDisk(s); if (disk == NULL) { return NULL; } pass_part = ped_disk_get_partition_by_sector(disk, sector); if (pass_part == NULL) { PyErr_SetString(PartitionException, "Partition does not exist"); return NULL; } ret = PedPartition2_ped_Partition(pass_part, (_ped_Disk *)s); if (ret == NULL) { return NULL; } ret->_owned = 1; return (PyObject *) ret; } PyObject *py_ped_disk_extended_partition(PyObject *s, PyObject *args) { PedDisk *disk = NULL; PedPartition *pass_part = NULL; _ped_Partition *ret = NULL; disk = _ped_Disk2PedDisk(s); if (disk) { pass_part = ped_disk_extended_partition(disk); if (pass_part == NULL) { PyErr_SetString(PartitionException, "Extended partition does not exist"); return NULL; } ret = PedPartition2_ped_Partition(pass_part, (_ped_Disk *)s); if (ret == NULL) { return NULL; } } else { return NULL; } ret->_owned = 1; return (PyObject *) ret; } PyObject *py_ped_disk_new_fresh(PyObject *s, PyObject *args) { _ped_Device *in_device = NULL; _ped_DiskType *in_type = NULL; PedDevice *device = NULL; PedDiskType *type = NULL; PedDisk *disk = NULL; _ped_Disk *ret = NULL; if (!PyArg_ParseTuple(args, "O!O!", &_ped_Device_Type_obj, &in_device, &_ped_DiskType_Type_obj, &in_type)) { return NULL; } if ((device = _ped_Device2PedDevice((PyObject *) in_device)) == NULL) { return NULL; } if ((type = _ped_DiskType2PedDiskType((PyObject *) in_type)) == NULL) { return NULL; } if ((disk = ped_disk_new_fresh(device, type)) == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(DiskException, partedExnMessage); } else { PyErr_Format(DiskException, "Could not create new disk label on %s", disk->dev->path); } return NULL; } ret = PedDisk2_ped_Disk(disk); return (PyObject *) ret; } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/_pedmodule.c0000664000076400007640000006210111542263326013221 00000000000000/* * _pedmodule.c * libparted Python bindings. This module is low-level in that it directly * maps to the libparted API. It is intended to be used by a higher level * Python module that implements the libparted functionality via Python * classes and other high level language features. * * Copyright (C) 2007, 2008 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include #include #include #include "_pedmodule.h" #include "exceptions.h" #include "pyconstraint.h" #include "pydevice.h" #include "pydisk.h" #include "pyfilesys.h" #include "pygeom.h" #include "pynatmath.h" #include "pytimer.h" #include "pyunit.h" #include "config.h" char *partedExnMessage = NULL; unsigned int partedExnRaised = 0; /* Docs strings are broken out of the module structure here to be at least a * little bit readable. */ PyDoc_STRVAR(libparted_version_doc, "libparted_version() -> string\n\n" "Return the version of libparted that pyparted was built against."); PyDoc_STRVAR(pyparted_version_doc, "pyparted_version() -> (major, minor, update)\n\n" "Return the version of the pyparted module."); PyDoc_STRVAR(constraint_new_from_min_max_doc, "constraint_new_from_min_max(min, max) -> Constraint\n\n" "min and max are Geometry objects. Return a Constraint that requires the region\n" "to be entirely contained inside max and to entirely contain min."); PyDoc_STRVAR(constraint_new_from_min_doc, "constraint_new_from_min(Geometry) -> Constraint\n\n" "Return a Constraint that requires a region to entirely contain Geometry."); PyDoc_STRVAR(constraint_new_from_max, "constraint_new_from_max(Geometry) -> Constraint\n\n" "Return a Constraint that requires a region to be entirely contained inside\n" "Geometry."); PyDoc_STRVAR(constraint_any_doc, "constraint_any(Device) -> Constraint\n\n" "Return a Constraint that any region on Device will satisfy."); PyDoc_STRVAR(constraint_exact_doc, "constraint_exact(Geometry) -> Constraint\n\n" "Return a Constraint that only the given Geometry will satisfy."); PyDoc_STRVAR(device_get_doc, "device_get(string) -> Device\n\n" "Return the Device corresponding to the given path. Typically, path will\n" "be a device name like /dev/sda."); PyDoc_STRVAR(device_get_next_doc, "device_get_next(Device) -> Device\n\n" "Return the next Device in the list detected by _ped.device_probe_all()."); PyDoc_STRVAR(device_probe_all_doc, "device_probe_all()\n\n" "Attempt to detect all devices."); PyDoc_STRVAR(device_free_all_doc, "device_free_all()\n\n" "Close and free all devices."); PyDoc_STRVAR(file_system_probe_doc, "file_system_probe(Geometry) -> FileSystem\n\n" "Attempt to detect a FileSystem in the region described by Geometry.\n" "This function tries to be clever at dealing with ambiguous\n" "situations, such as when one file system was not completely erased\n" "before a new file system was created on top of it."); PyDoc_STRVAR(file_system_probe_specific_doc, "file_system_probe_specific(FileSystemType, Geometry) -> FileSystem\n\n" "Look at Geometry for FileSystemType, return FileSystem for that type\n" "if found in the specified region."); PyDoc_STRVAR(file_system_type_get_doc, "file_system_type_get(self, string) -> _ped.FileSystemType\n\n" "Get a FileSystemType by its name, or raise _ped.UnknownTypeException if no\n" "type by that name exists."); PyDoc_STRVAR(file_system_type_get_next_doc, "file_system_type_get_next(self) -> _ped.FileSystemType\n\n" "Get the next FileSystemType in parted's list after self, or raise IndexError\n" "if there are no more types."); PyDoc_STRVAR(disk_type_get_next_doc, "disk_type_get_next(self) -> DiskType\n\n" "Return the next DiskType after self. If self is the last DiskType, raise\n" "IndexError."); PyDoc_STRVAR(disk_type_get_doc, "disk_type_get(string) -> DiskType\n\n" "Return a DiskType object with the given name. If no DiskType exists with\n" "that name, raise _ped.UnknownTypeException."); PyDoc_STRVAR(partition_type_get_name_doc, "partition_type_get_name(integer) -> string\n\n" "Return a name for a partition type constant. This mainly exists just to\n" "present something in user interfaces. It doesn't really provide the best\n" "names for partition types."); PyDoc_STRVAR(partition_flag_get_name_doc, "partition_flag_get_name(integer) -> string\n\n" "Return a name for a partition flag constant. If an invalid flag is provided,\n" "_ped.PartedExeption will be raised."); PyDoc_STRVAR(partition_flag_get_by_name_doc, "partition_flag_get_by_name(string) -> integer\n\n" "Return a partition flag given its name, or 0 if no flag matches the name."); PyDoc_STRVAR(partition_flag_next_doc, "partition_flag_next(integer) -> integer\n\n" "Given a partition flag, return the next flag. If there is no next flag, 0\n" "is returned."); PyDoc_STRVAR(disk_new_fresh_doc, "disk_new_fresh(Device, DiskType) -> Disk\n\n" "Given the Device and DiskType, create a new Disk object with using the\n" "DiskType specified. The new disk label is only in-memory, the caller\n" "will have to use the commit_to_dev() method to write the new label to\n" "the disk."); PyDoc_STRVAR(disk_flag_get_name_doc, "disk_flag_get_name(integer) -> string\n\n" "Return a name for a disk flag constant. If an invalid flag is provided,\n" "a ValueError will be raised."); PyDoc_STRVAR(disk_flag_get_by_name_doc, "disk_flag_get_by_name(string) -> integer\n\n" "Return a disk flag given its name, or 0 if no flag matches the name."); PyDoc_STRVAR(disk_flag_next_doc, "disk_flag_next(integer) -> integer\n\n" "Given a disk flag, return the next flag. If there is no next flag, 0\n" "is returned."); PyDoc_STRVAR(unit_set_default_doc, "unit_set_default(Unit)\n\n" "Sets the default Unit to be used by further unit_* calls. This\n" "primarily affects the formatting of error messages."); PyDoc_STRVAR(unit_get_default_doc, "unit_get_default() -> Unit\n\n" "Returns the default Unit."); PyDoc_STRVAR(unit_get_name_doc, "unit_get_name(Unit) -> string\n\n" "Returns a textual representation of a given Unit."); PyDoc_STRVAR(unit_get_by_name_doc, "unit_get_by_name(string) -> Unit\n\n" "Returns a Unit given its textual representation. Returns one of the\n" "UNIT_* constants."); PyDoc_STRVAR(_ped_doc, "This module implements an interface to libparted.\n\n" "pyparted provides two API layers: a lower level that exposes the complete\n" "libparted API, and a higher level built on top of that which provides a\n" "more Python-like view. The _ped module is the base of the lower level\n" "API. It provides:\n\n" "\t- Access to all the basic objects and submodules of pyparted\n" "\t- Basic unit handling and mathematical functions\n" "\t- A few basic device probing functions\n" "\t- The DEVICE_*, PARTITION_*, and UNIT_* constants from libparted\n" "\t- A variety of exceptions for handling error conditions\n\n" "For complete documentation, refer to the docs strings for each _ped\n" "method, exception class, and subclass."); /* all of the methods for the _ped module */ static struct PyMethodDef PyPedModuleMethods[] = { {"libparted_version", (PyCFunction) py_libparted_get_version, METH_VARARGS, libparted_version_doc}, {"pyparted_version", (PyCFunction) py_pyparted_version, METH_VARARGS, pyparted_version_doc}, /* pyconstraint.c */ {"constraint_new_from_min_max", (PyCFunction) py_ped_constraint_new_from_min_max, METH_VARARGS, constraint_new_from_min_max_doc}, {"constraint_new_from_min", (PyCFunction) py_ped_constraint_new_from_min, METH_VARARGS, constraint_new_from_min_doc}, {"constraint_new_from_max", (PyCFunction) py_ped_constraint_new_from_max, METH_VARARGS, constraint_new_from_max}, {"constraint_any", (PyCFunction) py_ped_constraint_any, METH_VARARGS, constraint_any_doc}, {"constraint_exact", (PyCFunction) py_ped_constraint_exact, METH_VARARGS, constraint_exact_doc}, /* pydevice.c */ {"device_get", (PyCFunction) py_ped_device_get, METH_VARARGS, device_get_doc}, {"device_get_next", (PyCFunction) py_ped_device_get_next, METH_VARARGS, device_get_next_doc}, {"device_probe_all", (PyCFunction) py_ped_device_probe_all, METH_VARARGS, device_probe_all_doc}, {"device_free_all", (PyCFunction) py_ped_device_free_all, METH_VARARGS, device_free_all_doc}, /* pydisk.c */ {"disk_type_get_next", (PyCFunction) py_ped_disk_type_get_next, METH_VARARGS, disk_type_get_next_doc}, {"disk_type_get", (PyCFunction) py_ped_disk_type_get, METH_VARARGS, disk_type_get_doc}, {"partition_type_get_name", (PyCFunction) py_ped_partition_type_get_name, METH_VARARGS, partition_type_get_name_doc}, {"partition_flag_get_name", (PyCFunction) py_ped_partition_flag_get_name, METH_VARARGS, partition_flag_get_name_doc}, {"partition_flag_get_by_name", (PyCFunction) py_ped_partition_flag_get_by_name, METH_VARARGS, partition_flag_get_by_name_doc}, {"partition_flag_next", (PyCFunction) py_ped_partition_flag_next, METH_VARARGS, partition_flag_next_doc}, {"disk_new_fresh", (PyCFunction) py_ped_disk_new_fresh, METH_VARARGS, disk_new_fresh_doc}, {"disk_flag_get_name", (PyCFunction) py_ped_disk_flag_get_name, METH_VARARGS, disk_flag_get_name_doc}, {"disk_flag_get_by_name", (PyCFunction) py_ped_disk_flag_get_by_name, METH_VARARGS, disk_flag_get_by_name_doc}, {"disk_flag_next", (PyCFunction) py_ped_disk_flag_next, METH_VARARGS, disk_flag_next_doc}, /* pyfilesys.c */ {"file_system_probe", (PyCFunction) py_ped_file_system_probe, METH_VARARGS, file_system_probe_doc}, {"file_system_probe_specific", (PyCFunction) py_ped_file_system_probe_specific, METH_VARARGS, file_system_probe_specific_doc}, {"file_system_type_get", (PyCFunction) py_ped_file_system_type_get, METH_VARARGS, file_system_type_get_doc}, {"file_system_type_get_next", (PyCFunction) py_ped_file_system_type_get_next, METH_VARARGS, file_system_type_get_next_doc}, /* pyunit.c */ {"unit_set_default", (PyCFunction) py_ped_unit_set_default, METH_VARARGS, unit_set_default_doc}, {"unit_get_default", (PyCFunction) py_ped_unit_get_default, METH_VARARGS, unit_get_default_doc}, {"unit_get_name", (PyCFunction) py_ped_unit_get_name, METH_VARARGS, unit_get_name_doc}, {"unit_get_by_name", (PyCFunction) py_ped_unit_get_by_name, METH_VARARGS, unit_get_by_name_doc}, { NULL, NULL, 0, NULL } }; PyObject *py_libparted_get_version(PyObject *s, PyObject *args) { char *ret = (char *) ped_get_version(); if (ret != NULL) return PyString_FromString(ret); else return PyString_FromString(""); } PyObject *py_pyparted_version(PyObject *s, PyObject *args) { int t = 0; int major = -1, minor = -1, update = -1; char suffix[11]; char *v = VERSION; /* Read pyparted version string. Support the following formats: * X * X.Y * X.Y.Z * X.Y.Z-string */ if (index(v, '-')) { memset(&suffix, '\0', sizeof(suffix)); t = sscanf(v, "%d.%d.%d-%10s", &major, &minor, &update, (char *) &suffix); } else { t = sscanf(v, "%d.%d.%d", &major, &minor, &update); } if (t == 0 || t == EOF) { return NULL; } if (t == 1) { return Py_BuildValue("(i)", major); } else if (t == 2) { if (minor == -1) { return Py_BuildValue("(is)", major, suffix); } else { return Py_BuildValue("(ii)", major, minor); } } else if (t == 3) { if (update == -1) { return Py_BuildValue("(iis)", major, minor, suffix); } else { return Py_BuildValue("(iii)", major, minor, update); } } else { return Py_BuildValue("(iiis)", major, minor, update, suffix); } } /* This function catches libparted exceptions and converts them into Python * exceptions that the various methods can catch and do something with. The * main motivation for this function is that methods in our parted module need * to be able to raise specific, helpful exceptions instead of something * generic. */ static PedExceptionOption partedExnHandler(PedException *e) { switch (e->type) { /* Raise yes/no exceptions so the caller can deal with them, * otherwise ignore */ case PED_EXCEPTION_INFORMATION: case PED_EXCEPTION_WARNING: if (e->options == PED_EXCEPTION_YES_NO) { partedExnRaised = 1; partedExnMessage = strdup(e->message); if (partedExnMessage == NULL) PyErr_NoMemory(); /* * return 'no' for yes/no question exceptions in libparted, * prevent any potential disk destruction and pass up an * exception to our caller */ return PED_EXCEPTION_NO; } else { partedExnRaised = 0; return PED_EXCEPTION_IGNORE; } /* Set global flags so parted module methods can raise specific * exceptions with the message. */ case PED_EXCEPTION_ERROR: case PED_EXCEPTION_FATAL: partedExnRaised = 1; partedExnMessage = strdup(e->message); if (partedExnMessage == NULL) PyErr_NoMemory(); return PED_EXCEPTION_CANCEL; /* Raise exceptions for internal parted bugs immediately. */ case PED_EXCEPTION_BUG: partedExnRaised = 1; PyErr_SetString (PartedException, e->message); return PED_EXCEPTION_CANCEL; /* Raise NotImplemented exceptions immediately too. */ case PED_EXCEPTION_NO_FEATURE: partedExnRaised = 1; PyErr_SetString (PyExc_NotImplementedError, e->message); return PED_EXCEPTION_CANCEL; } return PED_EXCEPTION_IGNORE; } PyMODINIT_FUNC init_ped(void) { PyObject *m = NULL; /* init the main Python module and add methods */ m = Py_InitModule3("_ped", PyPedModuleMethods, _ped_doc); /* PedUnit possible values */ PyModule_AddIntConstant(m, "UNIT_SECTOR", PED_UNIT_SECTOR); PyModule_AddIntConstant(m, "UNIT_BYTE", PED_UNIT_BYTE); PyModule_AddIntConstant(m, "UNIT_KILOBYTE", PED_UNIT_KILOBYTE); PyModule_AddIntConstant(m, "UNIT_MEGABYTE", PED_UNIT_MEGABYTE); PyModule_AddIntConstant(m, "UNIT_GIGABYTE", PED_UNIT_GIGABYTE); PyModule_AddIntConstant(m, "UNIT_TERABYTE", PED_UNIT_TERABYTE); PyModule_AddIntConstant(m, "UNIT_COMPACT", PED_UNIT_COMPACT); PyModule_AddIntConstant(m, "UNIT_CYLINDER", PED_UNIT_CYLINDER); PyModule_AddIntConstant(m, "UNIT_CHS", PED_UNIT_CHS); PyModule_AddIntConstant(m, "UNIT_PERCENT", PED_UNIT_PERCENT); PyModule_AddIntConstant(m, "UNIT_KIBIBYTE", PED_UNIT_KIBIBYTE); PyModule_AddIntConstant(m, "UNIT_MEBIBYTE", PED_UNIT_MEBIBYTE); PyModule_AddIntConstant(m, "UNIT_GIBIBYTE", PED_UNIT_GIBIBYTE); PyModule_AddIntConstant(m, "UNIT_TEBIBYTE", PED_UNIT_TEBIBYTE); /* add PedCHSGeometry type as _ped.CHSGeometry */ if (PyType_Ready(&_ped_CHSGeometry_Type_obj) < 0) return; Py_INCREF(&_ped_CHSGeometry_Type_obj); PyModule_AddObject(m, "CHSGeometry", (PyObject *)&_ped_CHSGeometry_Type_obj); /* add PedDevice type as _ped.Device */ if (PyType_Ready(&_ped_Device_Type_obj) < 0) return; Py_INCREF(&_ped_Device_Type_obj); PyModule_AddObject(m, "Device", (PyObject *)&_ped_Device_Type_obj); PyModule_AddIntConstant(m, "DEVICE_UNKNOWN", PED_DEVICE_UNKNOWN); PyModule_AddIntConstant(m, "DEVICE_SCSI", PED_DEVICE_SCSI); PyModule_AddIntConstant(m, "DEVICE_IDE", PED_DEVICE_IDE); PyModule_AddIntConstant(m, "DEVICE_DAC960", PED_DEVICE_DAC960); PyModule_AddIntConstant(m, "DEVICE_CPQARRAY", PED_DEVICE_CPQARRAY); PyModule_AddIntConstant(m, "DEVICE_FILE", PED_DEVICE_FILE); PyModule_AddIntConstant(m, "DEVICE_ATARAID", PED_DEVICE_ATARAID); PyModule_AddIntConstant(m, "DEVICE_I2O", PED_DEVICE_I2O); PyModule_AddIntConstant(m, "DEVICE_UBD", PED_DEVICE_UBD); PyModule_AddIntConstant(m, "DEVICE_DASD", PED_DEVICE_DASD); PyModule_AddIntConstant(m, "DEVICE_VIODASD", PED_DEVICE_VIODASD); PyModule_AddIntConstant(m, "DEVICE_SX8", PED_DEVICE_SX8); PyModule_AddIntConstant(m, "DEVICE_DM", PED_DEVICE_DM); PyModule_AddIntConstant(m, "DEVICE_XVD", PED_DEVICE_XVD); PyModule_AddIntConstant(m, "DEVICE_SDMMC", PED_DEVICE_SDMMC); PyModule_AddIntConstant(m, "DEVICE_VIRTBLK", PED_DEVICE_VIRTBLK); /* add PedTimer type as _ped.Timer */ if (PyType_Ready(&_ped_Timer_Type_obj) < 0) return; Py_INCREF(&_ped_Timer_Type_obj); PyModule_AddObject(m, "Timer", (PyObject *)&_ped_Timer_Type_obj); /* add PedGeometry type as _ped.Geometry */ if (PyType_Ready(&_ped_Geometry_Type_obj) < 0) return; Py_INCREF(&_ped_Geometry_Type_obj); PyModule_AddObject(m, "Geometry", (PyObject *)&_ped_Geometry_Type_obj); /* add PedAlignment type as _ped.Alignment */ if (PyType_Ready(&_ped_Alignment_Type_obj) < 0) return; Py_INCREF(&_ped_Alignment_Type_obj); PyModule_AddObject(m, "Alignment", (PyObject *)&_ped_Alignment_Type_obj); /* add PedConstraint type as _ped.Constraint */ if (PyType_Ready(&_ped_Constraint_Type_obj) < 0) return; Py_INCREF(&_ped_Constraint_Type_obj); PyModule_AddObject(m, "Constraint", (PyObject *)&_ped_Constraint_Type_obj); /* add PedPartition type as _ped.Partition */ if (PyType_Ready(&_ped_Partition_Type_obj) < 0) return; Py_INCREF(&_ped_Partition_Type_obj); PyModule_AddObject(m, "Partition", (PyObject *)&_ped_Partition_Type_obj); /* add PedDisk as _ped.Disk */ if (PyType_Ready(&_ped_Disk_Type_obj) < 0) return; Py_INCREF(&_ped_Disk_Type_obj); PyModule_AddObject(m, "Disk", (PyObject *)&_ped_Disk_Type_obj); /* add PedDiskType as _ped.DiskType */ if (PyType_Ready(&_ped_DiskType_Type_obj) < 0) return; Py_INCREF(&_ped_DiskType_Type_obj); PyModule_AddObject(m, "DiskType", (PyObject *)&_ped_DiskType_Type_obj); /* possible PedDiskTypeFeature values */ PyModule_AddIntConstant(m, "PARTITION_NORMAL", PED_PARTITION_NORMAL); PyModule_AddIntConstant(m, "PARTITION_LOGICAL", PED_PARTITION_LOGICAL); PyModule_AddIntConstant(m, "PARTITION_EXTENDED", PED_PARTITION_EXTENDED); PyModule_AddIntConstant(m, "PARTITION_FREESPACE", PED_PARTITION_FREESPACE); PyModule_AddIntConstant(m, "PARTITION_METADATA", PED_PARTITION_METADATA); PyModule_AddIntConstant(m, "PARTITION_PROTECTED", PED_PARTITION_PROTECTED); PyModule_AddIntConstant(m, "PARTITION_BOOT", PED_PARTITION_BOOT); PyModule_AddIntConstant(m, "PARTITION_ROOT", PED_PARTITION_ROOT); PyModule_AddIntConstant(m, "PARTITION_SWAP", PED_PARTITION_SWAP); PyModule_AddIntConstant(m, "PARTITION_HIDDEN", PED_PARTITION_HIDDEN); PyModule_AddIntConstant(m, "PARTITION_RAID", PED_PARTITION_RAID); PyModule_AddIntConstant(m, "PARTITION_LVM", PED_PARTITION_LVM); PyModule_AddIntConstant(m, "PARTITION_LBA", PED_PARTITION_LBA); PyModule_AddIntConstant(m, "PARTITION_HPSERVICE", PED_PARTITION_HPSERVICE); PyModule_AddIntConstant(m, "PARTITION_PALO", PED_PARTITION_PALO); PyModule_AddIntConstant(m, "PARTITION_PREP", PED_PARTITION_PREP); PyModule_AddIntConstant(m, "PARTITION_MSFT_RESERVED", PED_PARTITION_MSFT_RESERVED); PyModule_AddIntConstant(m, "PARTITION_APPLE_TV_RECOVERY", PED_PARTITION_APPLE_TV_RECOVERY); PyModule_AddIntConstant(m, "PARTITION_BIOS_GRUB", PED_PARTITION_BIOS_GRUB); PyModule_AddIntConstant(m, "PARTITION_DIAG", PED_PARTITION_DIAG); #ifdef HAVE_PED_PARTITION_LEGACY_BOOT PyModule_AddIntConstant(m, "PARTITION_LEGACY_BOOT", PED_PARTITION_LEGACY_BOOT); #endif PyModule_AddIntConstant(m, "DISK_CYLINDER_ALIGNMENT", PED_DISK_CYLINDER_ALIGNMENT); PyModule_AddIntConstant(m, "DISK_TYPE_EXTENDED", PED_DISK_TYPE_EXTENDED); PyModule_AddIntConstant(m, "DISK_TYPE_PARTITION_NAME", PED_DISK_TYPE_PARTITION_NAME); /* add PedFileSystemType as _ped.FileSystemType */ if (PyType_Ready(&_ped_FileSystemType_Type_obj) < 0) return; Py_INCREF(&_ped_FileSystemType_Type_obj); PyModule_AddObject(m, "FileSystemType", (PyObject *)&_ped_FileSystemType_Type_obj); /* add PedFileSystem as _ped.FileSystem */ if (PyType_Ready(&_ped_FileSystem_Type_obj) < 0) return; Py_INCREF(&_ped_FileSystem_Type_obj); PyModule_AddObject(m, "FileSystem", (PyObject *)&_ped_FileSystem_Type_obj); /* add our custom exceptions */ AlignmentException = PyErr_NewException("_ped.AlignmentException", NULL, NULL); Py_INCREF(AlignmentException); PyModule_AddObject(m, "AlignmentException", AlignmentException); ConstraintException = PyErr_NewException("_ped.ConstraintException", NULL, NULL); Py_INCREF(ConstraintException); PyModule_AddObject(m, "ConstraintException", ConstraintException); CreateException = PyErr_NewException("_ped.CreateException", NULL, NULL); Py_INCREF(CreateException); PyModule_AddObject(m, "CreateException", CreateException); DeviceException = PyErr_NewException("_ped.DeviceException", NULL, NULL); Py_INCREF(DeviceException); PyModule_AddObject(m, "DeviceException", DeviceException); DiskException = PyErr_NewException("_ped.DiskException", NULL, NULL); Py_INCREF(DiskException); PyModule_AddObject(m, "DiskException", DiskException); DiskLabelException = PyErr_NewException("_ped.DiskLabelException", NULL, NULL); Py_INCREF(DiskLabelException); PyModule_AddObject(m, "DiskLabelException", DiskLabelException); FileSystemException = PyErr_NewException("_ped.FileSystemException", NULL, NULL); Py_INCREF(FileSystemException); PyModule_AddObject(m, "FileSystemException", FileSystemException); GeometryException = PyErr_NewException("_ped.GeometryException", NULL, NULL); Py_INCREF(GeometryException); PyModule_AddObject(m, "GeometryException", GeometryException); IOException = PyErr_NewException("_ped.IOException", NULL, NULL); Py_INCREF(IOException); PyModule_AddObject(m, "IOException", IOException); NotNeededException = PyErr_NewException("_ped.NotNeededException", NULL, NULL); Py_INCREF(NotNeededException); PyModule_AddObject(m, "NotNeededException", NotNeededException); PartedException = PyErr_NewException("_ped.PartedException", NULL, NULL); Py_INCREF(PartedException); PyModule_AddObject(m, "PartedException", PartedException); PartitionException = PyErr_NewException("_ped.PartitionException", NULL, NULL); Py_INCREF(PartitionException); PyModule_AddObject(m, "PartitionException", PartitionException); TimerException = PyErr_NewException("_ped.TimerException", NULL, NULL); Py_INCREF(TimerException); PyModule_AddObject(m, "TimerException", TimerException); UnknownDeviceException = PyErr_NewException("_ped.UnknownDeviceException", NULL, NULL); Py_INCREF(UnknownDeviceException); PyModule_AddObject(m, "UnknownDeviceException", UnknownDeviceException); UnknownTypeException = PyErr_NewException("_ped.UnknownTypeException", NULL, NULL); Py_INCREF(UnknownTypeException); PyModule_AddObject(m, "UnknownTypeException", UnknownTypeException); /* Set up our libparted exception handler. */ ped_exception_set_handler(partedExnHandler); } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/pyfilesys.c0000664000076400007640000005374211312767136013151 00000000000000/* * pyfilesys.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pydevice.h" #include "pyfilesys.h" #include "pygeom.h" #include "docstrings/pyfilesys.h" #include "typeobjects/pyfilesys.h" /* _ped.FileSystemType functions */ void _ped_FileSystemType_dealloc(_ped_FileSystemType *self) { PyObject_GC_UnTrack(self); free(self->name); PyObject_GC_Del(self); } int _ped_FileSystemType_compare(_ped_FileSystemType *self, PyObject *obj) { _ped_FileSystemType *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_FileSystemType_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.FileSystemType"); return -1; } comp = (_ped_FileSystemType *) obj; if (!strcmp(self->name, comp->name)) { return 0; } else { return 1; } } PyObject *_ped_FileSystemType_richcompare(_ped_FileSystemType *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_FileSystemType_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_FileSystemType_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.FileSystemType"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_FileSystemType_str(_ped_FileSystemType *self) { char *ret = NULL; if (asprintf(&ret, "_ped.FileSystemType instance --\n" " name: %s", self->name) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_FileSystemType_traverse(_ped_FileSystemType *self, visitproc visit, void *arg) { return 0; } int _ped_FileSystemType_clear(_ped_FileSystemType *self) { return 0; } PyObject *_ped_FileSystemType_get(_ped_FileSystemType *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.FileSystemType()"); return NULL; } if (!strcmp(member, "name")) { if (self->name != NULL) return PyString_FromString(self->name); else return PyString_FromString(""); } else { PyErr_Format(PyExc_AttributeError, "_ped.FileSystemType object has no attribute %s", member); return NULL; } } /* _ped.FileSystem functions */ void _ped_FileSystem_dealloc(_ped_FileSystem *self) { _ped_FileSystemType *fstype = (_ped_FileSystemType *) self->type; if (self->ped_filesystem) { /* XXX: do we want to set an exception string here? */ if (!ped_file_system_close(self->ped_filesystem)) { PyErr_Format(FileSystemException, "Failed to close filesystem type %s", fstype->name); } } PyObject_GC_UnTrack(self); Py_CLEAR(self->type); self->type = NULL; Py_CLEAR(self->geom); self->geom = NULL; PyObject_GC_Del(self); } int _ped_FileSystem_compare(_ped_FileSystem *self, PyObject *obj) { _ped_FileSystem *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_FileSystem_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.FileSystem"); return -1; } comp = (_ped_FileSystem *) obj; if ((_ped_FileSystemType_Type_obj.tp_richcompare(self->type, comp->type, Py_EQ)) && (_ped_Geometry_Type_obj.tp_richcompare(self->geom, comp->geom, Py_EQ)) && (self->checked == comp->checked)) { return 0; } else { return 1; } } PyObject *_ped_FileSystem_richcompare(_ped_FileSystem *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_FileSystem_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_FileSystem_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.FileSystem"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_FileSystem_str(_ped_FileSystem *self) { char *ret = NULL; char *type = NULL, *geom = NULL; type = PyString_AsString(_ped_FileSystem_Type_obj.tp_repr(self->type)); if (type == NULL) { return NULL; } geom = PyString_AsString(_ped_Geometry_Type_obj.tp_repr(self->geom)); if (geom == NULL) { return NULL; } if (asprintf(&ret, "_ped.FileSystem instance --\n" " type: %s geom: %s\n" " checked: %d", type, geom, self->checked) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_FileSystem_traverse(_ped_FileSystem *self, visitproc visit, void *arg) { int err; if (self->type) { if ((err = visit(self->type, arg))) { return err; } } if (self->geom) { if ((err = visit(self->geom, arg))) { return err; } } return 0; } int _ped_FileSystem_clear(_ped_FileSystem *self) { Py_CLEAR(self->type); self->type = NULL; Py_CLEAR(self->geom); self->geom = NULL; return 0; } int _ped_FileSystem_init(_ped_FileSystem *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"type", "geom", "checked", NULL}; self->checked = 0; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "O!O!|i", &_ped_FileSystemType_Type_obj, &self->type, &_ped_Geometry_Type_obj, &self->geom, &self->checked)) { self->type = self->geom = NULL; return -1; } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!|i", kwlist, &_ped_FileSystemType_Type_obj, &self->type, &_ped_Geometry_Type_obj, &self->geom, &self->checked)) { self->type = self->geom = NULL; return -2; } } Py_INCREF(self->type); Py_INCREF(self->geom); self->ped_filesystem = NULL; return 0; } PyObject *_ped_FileSystem_get(_ped_FileSystem *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.FileSystem()"); return NULL; } if (!strcmp(member, "checked")) { return PyInt_FromLong(self->checked); } else { PyErr_Format(PyExc_AttributeError, "_ped.FileSystem object has no attribute %s", member); return NULL; } } /* 1:1 function mappings for filesys.h in libparted */ PyObject *py_ped_file_system_type_get(PyObject *s, PyObject *args) { PedFileSystemType *fstype = NULL; _ped_FileSystemType *ret = NULL; char *name = NULL; if (!PyArg_ParseTuple(args, "s", &name)) { return NULL; } fstype = ped_file_system_type_get(name); if (fstype) { ret = PedFileSystemType2_ped_FileSystemType(fstype); return (PyObject *) ret; } else { PyErr_SetString(UnknownTypeException, name); return NULL; } } PyObject *py_ped_file_system_type_get_next(PyObject *s, PyObject *args) { PyObject *in_fstype = NULL; PedFileSystemType *cur = NULL, *next = NULL; _ped_FileSystemType *ret = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_FileSystemType_Type_obj, &in_fstype)) { return NULL; } if (in_fstype) { cur = _ped_FileSystemType2PedFileSystemType(in_fstype); if (!cur) { return NULL; } } next = ped_file_system_type_get_next(cur); if (next) { ret = PedFileSystemType2_ped_FileSystemType(next); return (PyObject *) ret; } else { PyErr_SetNone(PyExc_IndexError); return NULL; } } PyObject *py_ped_file_system_probe_specific(PyObject *s, PyObject *args) { PyObject *in_geom = NULL, *in_fstype = NULL; PedFileSystemType *fstype = NULL; PedGeometry *out_geom = NULL, *geom = NULL; _ped_Geometry *ret = NULL; if (!PyArg_ParseTuple(args, "O!O!", &_ped_FileSystemType_Type_obj, &in_fstype, &_ped_Geometry_Type_obj, &in_geom)) { return NULL; } fstype = _ped_FileSystemType2PedFileSystemType(in_fstype); if (!fstype) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (!out_geom) { return NULL; } geom = ped_file_system_probe_specific(fstype, out_geom); if (geom) { ret = PedGeometry2_ped_Geometry(geom); } else { /* libparted swallows exceptions here (I think) and just returns * NULL if the match is not made. Reset exception flag and return * None. */ if (partedExnRaised) { partedExnRaised = 0; } Py_INCREF(Py_None); return Py_None; } return (PyObject *) ret; } PyObject *py_ped_file_system_probe(PyObject *s, PyObject *args) { PyObject *in_geom = NULL; PedGeometry *out_geom = NULL; PedFileSystemType *fstype = NULL; _ped_FileSystemType *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_geom)) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (!out_geom) { return NULL; } fstype = ped_file_system_probe(out_geom); if (fstype) { ret = PedFileSystemType2_ped_FileSystemType(fstype); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_SetString(FileSystemException, "Failed to find any filesystem in given geometry"); return NULL; } return (PyObject *) ret; } PyObject *py_ped_file_system_clobber(PyObject *s, PyObject *args) { int ret = -1; _ped_FileSystem *self = (_ped_FileSystem *) s; PedGeometry *geom = NULL; geom = _ped_Geometry2PedGeometry(self->geom); if (!geom) { return NULL; } ret = ped_file_system_clobber(geom); if (!ret) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_SetString(FileSystemException, "Failed to clobber any filesystem in given geometry"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_file_system_open(PyObject *s, PyObject *args) { PedFileSystem *fs = NULL; fs = _ped_FileSystem2PedFileSystem(s); if (fs) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_file_system_create(PyObject *s, PyObject *args) { _ped_FileSystem *self = (_ped_FileSystem *) s; PyObject *in_timer = NULL; PedGeometry *geom = NULL; PedFileSystemType *fstype = NULL; PedTimer *timer = NULL; PedFileSystem *fs = NULL; _ped_FileSystem *ret = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_Timer_Type_obj, &in_timer)) { return NULL; } geom = _ped_Geometry2PedGeometry(self->geom); if (!geom) { return NULL; } fstype = _ped_FileSystemType2PedFileSystemType(self->type); if (!fstype) { return NULL; } if (in_timer) { timer = _ped_Timer2PedTimer(in_timer); if (!timer) { return NULL; } } else timer = NULL; fs = ped_file_system_create(geom, fstype, timer); if (fs) { ret = PedFileSystem2_ped_FileSystem(fs); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PyExc_NotImplementedError) && !PyErr_ExceptionMatches(PartedException)) PyErr_SetString(FileSystemException, partedExnMessage); } else PyErr_Format(FileSystemException, "Failed to create filesystem type %s", fstype->name); ped_timer_destroy(timer); return NULL; } ped_file_system_destroy(fs); ped_timer_destroy(timer); return (PyObject *) ret; } PyObject *py_ped_file_system_close(PyObject *s, PyObject *args) { int ret = -1; PedFileSystem *fs = NULL; fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } ret = ped_file_system_close(fs); ped_file_system_destroy(fs); if (!ret) { PyErr_Format(FileSystemException, "Failed to close filesystem type %s", fs->type->name); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_file_system_check(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_timer = NULL; PedFileSystem *fs = NULL; PedTimer *out_timer = NULL; if (!PyArg_ParseTuple(args, "|O!", &_ped_Timer_Type_obj, &in_timer)) { return NULL; } fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } if (in_timer) { out_timer = _ped_Timer2PedTimer(in_timer); if (!out_timer) { ped_file_system_destroy(fs); return NULL; } } else out_timer = NULL; ret = ped_file_system_check(fs, out_timer); ped_file_system_destroy(fs); ped_timer_destroy(out_timer); /* NotImplementedError may have been raised if it's an unsupported * operation for this filesystem. Otherwise, we shouldn't get any * exceptions here. */ if (!ret && partedExnRaised) { partedExnRaised = 0; return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_file_system_copy(PyObject *s, PyObject *args) { PyObject *in_geom = NULL, *in_timer = NULL; PedFileSystem *fs = NULL; PedGeometry *out_geom = NULL; PedTimer *out_timer = NULL; PedFileSystem *finalfs = NULL; _ped_FileSystem *ret = NULL; if (!PyArg_ParseTuple(args, "O!|O!", &_ped_Geometry_Type_obj, &in_geom, &_ped_Timer_Type_obj, &in_timer)) { return NULL; } fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (!out_geom) { ped_file_system_destroy(fs); return NULL; } if (in_timer) { out_timer = _ped_Timer2PedTimer(in_timer); if (!out_timer) { ped_file_system_destroy(fs); return NULL; } } else out_timer = NULL; finalfs = ped_file_system_copy(fs, out_geom, out_timer); ped_file_system_destroy(fs); ped_timer_destroy(out_timer); if (finalfs) { ret = PedFileSystem2_ped_FileSystem(finalfs); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PyExc_NotImplementedError) && !PyErr_ExceptionMatches(PartedException)) PyErr_SetString(FileSystemException, partedExnMessage); } else PyErr_Format(FileSystemException, "Failed to copy filesystem type %s", fs->type->name); return NULL; } ped_file_system_destroy(finalfs); return (PyObject *) ret; } PyObject *py_ped_file_system_resize(PyObject *s, PyObject *args) { _ped_FileSystem *self = (_ped_FileSystem *) s; PyObject *in_geom = NULL, *in_timer = NULL; PedFileSystem *fs = NULL; PedGeometry *out_geom = NULL; PedTimer *out_timer = NULL; int ret = -1; if (!PyArg_ParseTuple(args, "O!|O!", &_ped_Geometry_Type_obj, &in_geom, &_ped_Timer_Type_obj, &in_timer)) { return NULL; } fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (!out_geom) { ped_file_system_destroy(fs); return NULL; } if (in_timer) { out_timer = _ped_Timer2PedTimer(in_timer); if (!out_timer) { ped_file_system_destroy(fs); return NULL; } } else out_timer = NULL; ret = ped_file_system_resize(fs, out_geom, out_timer); if (ret) *((_ped_Geometry *)self->geom)->ped_geometry = *(fs->geom); ped_file_system_destroy(fs); ped_timer_destroy(out_timer); if (!ret) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PyExc_NotImplementedError) && !PyErr_ExceptionMatches(PartedException)) PyErr_SetString(FileSystemException, partedExnMessage); } else PyErr_Format(FileSystemException, "Failed to resize filesystem type %s", fs->type->name); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } /* * This is a function that exists in filesys.c in libparted, but the * way it works it is more appropriate to make it a method on a Device * object in the _ped module. * * The self object for this method is a Device, so the input parameter * will be a FileSystemType and the return value will be a Constraint. */ PyObject *py_ped_file_system_get_create_constraint(PyObject *s, PyObject *args) { PyObject *in_fstype = NULL; PedFileSystemType *fstype = NULL; PedDevice *device = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_FileSystemType_Type_obj, &in_fstype)) { return NULL; } device = _ped_Device2PedDevice(s); if (!device) { return NULL; } fstype = _ped_FileSystemType2PedFileSystemType(in_fstype); if (!fstype) { return NULL; } constraint = ped_file_system_get_create_constraint(fstype, device); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_Format(CreateException, "Failed to create constraint for filesystem type %s", fstype->name); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_file_system_get_resize_constraint(PyObject *s, PyObject *args) { PedFileSystem *fs = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } constraint = ped_file_system_get_resize_constraint(fs); ped_file_system_destroy(fs); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_Format(CreateException, "Failed to create resize constraint for filesystem type %s", fs->type->name); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } /* * This is a function that exists in filesys.c in libparted, but the * way it works it is more appropriate to make it a method on a Device * object in the _ped module. * * The self object for this method is a Device, so the input parameter * will be a FileSystem and the return value will be a Constraint. */ PyObject *py_ped_file_system_get_copy_constraint(PyObject *s, PyObject *args) { PedDevice *device = NULL; PyObject *in_fs = NULL; PedFileSystem *fs = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_FileSystem_Type_obj, &in_fs)) { return NULL; } device = _ped_Device2PedDevice(s); if (!device) { return NULL; } fs = _ped_FileSystem2PedFileSystem(s); if (!fs) { return NULL; } constraint = ped_file_system_get_copy_constraint(fs, device); ped_file_system_destroy(fs); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_Format(CreateException, "Failed to create copy constraint for filesystem type %s", fs->type->name); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } pyparted-3.6/src/pygeom.c0000664000076400007640000004673611262444054012422 00000000000000/* * pygeom.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pygeom.h" #include "pynatmath.h" #include "docstrings/pygeom.h" #include "typeobjects/pygeom.h" /* _ped.Geometry functions */ void _ped_Geometry_dealloc(_ped_Geometry *self) { if (self->ped_geometry) ped_geometry_destroy(self->ped_geometry); PyObject_GC_UnTrack(self); Py_CLEAR(self->dev); self->dev = NULL; PyObject_GC_Del(self); } int _ped_Geometry_compare(_ped_Geometry *self, PyObject *obj) { _ped_Geometry *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Geometry_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Geometry"); return -1; } comp = (_ped_Geometry *) obj; if ((_ped_Geometry_Type_obj.tp_richcompare(self->dev, comp->dev, Py_EQ)) && (self->ped_geometry->start == comp->ped_geometry->start) && (self->ped_geometry->length == comp->ped_geometry->length) && (self->ped_geometry->end == comp->ped_geometry->end)) { return 0; } else { return 1; } } PyObject *_ped_Geometry_richcompare(_ped_Geometry *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Geometry_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Geometry_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Geometry"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Geometry_str(_ped_Geometry *self) { char *ret = NULL; char *dev = NULL; dev = PyString_AsString(_ped_Device_Type_obj.tp_repr(self->dev)); if (dev == NULL) { return NULL; } if (asprintf(&ret, "_ped.Geometry instance --\n" " start: %lld end: %lld length: %lld\n" " device: %s", self->ped_geometry->start, self->ped_geometry->end, self->ped_geometry->length, dev) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Geometry_traverse(_ped_Geometry *self, visitproc visit, void *arg) { int err; if (self->dev) { if ((err = visit(self->dev, arg))) { return err; } } return 0; } int _ped_Geometry_clear(_ped_Geometry *self) { Py_CLEAR(self->dev); self->dev = NULL; return 0; } int _ped_Geometry_init(_ped_Geometry *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"dev", "start", "length", "end", NULL}; PedDevice *device = NULL; long long start, length, end; self->dev = NULL; self->ped_geometry = NULL; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "O!LL|L", &_ped_Device_Type_obj, &self->dev, &start, &length, &end)) { self->dev = NULL; return -1; } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!LL|L", kwlist, &_ped_Device_Type_obj, &self->dev, &start, &length, &end)) { self->dev = NULL; return -2; } } device = _ped_Device2PedDevice(self->dev); if (device == NULL) { self->dev = NULL; return -3; } self->ped_geometry = ped_geometry_new(device, start, length); if (self->ped_geometry == NULL) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) { PyErr_SetString(CreateException, partedExnMessage); } } else { PyErr_SetString(CreateException, "Could not create new geometry"); } self->dev = NULL; return -3; } Py_INCREF(self->dev); return 0; } PyObject *_ped_Geometry_get(_ped_Geometry *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Geometry()"); return NULL; } if (!strcmp(member, "start")) { return PyLong_FromLongLong(self->ped_geometry->start); } else if (!strcmp(member, "length")) { return PyLong_FromLongLong(self->ped_geometry->length); } else if (!strcmp(member, "end")) { return PyLong_FromLongLong(self->ped_geometry->end); } else { PyErr_Format(PyExc_AttributeError, "_ped.Geometry object has no attribute %s", member); return NULL; } } int _ped_Geometry_set(_ped_Geometry *self, PyObject *value, void *closure) { char *member = (char *) closure; long long val; int ret; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Geometry()"); return -1; } if (!strcmp(member, "start")) { val = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } ret = ped_geometry_set_start(self->ped_geometry, val); } else if (!strcmp(member, "length")) { val = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } ret = ped_geometry_set(self->ped_geometry, self->ped_geometry->start, val); } else if (!strcmp(member, "end")) { val = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } ret = ped_geometry_set_end(self->ped_geometry, val); } else { PyErr_Format(PyExc_AttributeError, "_ped.Geometry object has no attribute %s", member); return -1; } if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, partedExnMessage); } } else { PyErr_SetString(PyExc_ValueError, "Could not set geometry"); } return -1; } return 0; } /* 1:1 function mappings for geom.h in libparted */ PyObject *py_ped_geometry_duplicate(PyObject *s, PyObject *args) { PedGeometry *geometry = NULL, *geom = NULL; _ped_Geometry *ret = NULL; geometry = _ped_Geometry2PedGeometry(s); if (geometry == NULL) { return NULL; } geom = ped_geometry_duplicate(geometry); if (geom) { ret = PedGeometry2_ped_Geometry(geom); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(CreateException, partedExnMessage); } else PyErr_SetString(CreateException, "Could not duplicate geometry"); return NULL; } return (PyObject *) ret; } PyObject *py_ped_geometry_intersect(PyObject *s, PyObject *args) { PyObject *in_b = NULL; PedGeometry *out_a = NULL, *out_b = NULL, *geom = NULL; _ped_Geometry *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_b)) { return NULL; } out_a = _ped_Geometry2PedGeometry(s); if (out_a == NULL) { return NULL; } out_b = _ped_Geometry2PedGeometry(in_b); if (out_b == NULL) { return NULL; } geom = ped_geometry_intersect (out_a, out_b); if (geom) { ret = PedGeometry2_ped_Geometry(geom); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(CreateException, partedExnMessage); } else PyErr_SetString(PyExc_ArithmeticError, "Could not find geometry intersection"); return NULL; } return (PyObject *) ret; } PyObject *py_ped_geometry_set(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; PedSector start, length; if (!PyArg_ParseTuple(args, "LL", &start, &length)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_set(geom, start, length); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(CreateException, partedExnMessage); } else PyErr_SetString(CreateException, "Could not create new geometry"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_set_start(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; PedSector start; if (!PyArg_ParseTuple(args, "L", &start)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_set_start(geom, start); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(CreateException, partedExnMessage); } else PyErr_SetString(CreateException, "Could not create new geometry"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_set_end(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; PedSector end; if (!PyArg_ParseTuple(args, "L", &end)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_set_end(geom, end); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(CreateException, partedExnMessage); } else PyErr_SetString(CreateException, "Could not create new geometry"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_test_overlap(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_b = NULL; PedGeometry *out_a = NULL, *out_b = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_b)) { return NULL; } out_a = _ped_Geometry2PedGeometry(s); if (out_a == NULL) { return NULL; } out_b = _ped_Geometry2PedGeometry(in_b); if (out_b == NULL) { return NULL; } ret = ped_geometry_test_overlap(out_a, out_b); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_test_inside(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_b = NULL; PedGeometry *out_a = NULL, *out_b = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_b)) { return NULL; } out_a = _ped_Geometry2PedGeometry(s); if (out_a == NULL) { return NULL; } out_b = _ped_Geometry2PedGeometry(in_b); if (out_b == NULL) { return NULL; } ret = ped_geometry_test_inside(out_a, out_b); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_test_equal(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_b = NULL; PedGeometry *out_a = NULL, *out_b = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_b)) { return NULL; } out_a = _ped_Geometry2PedGeometry(s); if (out_a == NULL) { return NULL; } out_b = _ped_Geometry2PedGeometry(in_b); if (out_b == NULL) { return NULL; } ret = ped_geometry_test_equal(out_a, out_b); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_test_sector_inside(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; PedSector sector; if (!PyArg_ParseTuple(args, "L", §or)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_test_sector_inside(geom, sector); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_read(PyObject *s, PyObject *args) { PyObject *ret = NULL; PedGeometry *geom = NULL; char *out_buf = NULL; PedSector offset, count; if (!PyArg_ParseTuple(args, "LL", &offset, &count)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } /* py_device_read will ASSERT if the device isn't open yet. */ if (geom->dev->open_count <= 0) { PyErr_SetString(IOException, "Attempting to read from a unopened device"); return NULL; } /* And then py_geometry_read will ASSERT on these things too. */ if (offset < 0 || count < 0) { PyErr_SetString(IOException, "offset and count cannot be negative."); return NULL; } if ((out_buf = malloc(geom->dev->sector_size * count)) == NULL) { return PyErr_NoMemory(); } if (ped_geometry_read(geom, out_buf, offset, count) == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_SetString(IOException, "Could not read from given region"); free(out_buf); return NULL; } ret = PyString_FromString(out_buf); free(out_buf); return ret; } PyObject *py_ped_geometry_sync(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_sync(geom); if (ret == 0) { PyErr_SetString(IOException, "Could not sync"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_sync_fast(PyObject *s, PyObject *args) { int ret = -1; PedGeometry *geom = NULL; geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } ret = ped_geometry_sync_fast(geom); if (ret == 0) { PyErr_SetString(IOException, "Could not sync"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_write(PyObject *s, PyObject *args) { int ret = -1; char *in_buf = NULL; PedGeometry *geom = NULL; PedSector offset, count; if (!PyArg_ParseTuple(args, "sLL", &in_buf, &offset, &count)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } /* py_device_write will ASSERT if the device isn't open yet. */ if (geom->dev->open_count <= 0) { PyErr_SetString(IOException, "Attempting to write to a unopened device"); return NULL; } /* And then py_geometry_wriet will ASSERT on these things too. */ if (offset < 0 || count < 0) { PyErr_SetString(IOException, "offset and count cannot be negative."); return NULL; } ret = ped_geometry_write(geom, in_buf, offset, count); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(IOException, partedExnMessage); } else PyErr_SetString(IOException, "Could not write to given region"); return NULL; } if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_geometry_check(PyObject *s, PyObject *args) { PyObject *in_timer = NULL; PedGeometry *geom = NULL; PedSector offset, granularity, count, ret; PedTimer *out_timer = NULL; char *out_buf = NULL; if (!PyArg_ParseTuple(args, "LLL|O!", &offset, &granularity, &count, &_ped_Timer_Type_obj, &in_timer)) { return NULL; } geom = _ped_Geometry2PedGeometry(s); if (geom == NULL) { return NULL; } if (!geom->dev->open_count) { PyErr_Format(IOException, "Device %s is not open.", geom->dev->path); return NULL; } if (geom->dev->external_mode) { PyErr_Format(IOException, "Device %s is already open for external access.", geom->dev->path); return NULL; } if (in_timer) out_timer = _ped_Timer2PedTimer(in_timer); else out_timer = NULL; if ((out_buf = malloc(geom->dev->sector_size * 32)) == NULL) { ped_timer_destroy(out_timer); return PyErr_NoMemory(); } ret = ped_geometry_check(geom, out_buf, 32, offset, granularity, count, out_timer); ped_timer_destroy(out_timer); free(out_buf); return PyLong_FromLongLong(ret); } PyObject *py_ped_geometry_map(PyObject *s, PyObject *args) { int ret = -1; PyObject *in_dst = NULL; PedGeometry *out_dst = NULL, *src = NULL; PedSector sector; if (!PyArg_ParseTuple(args, "O!L", &_ped_Geometry_Type_obj, &in_dst, §or)) { return NULL; } src = _ped_Geometry2PedGeometry(s); if (src == NULL) { return NULL; } out_dst = _ped_Geometry2PedGeometry(in_dst); if (out_dst == NULL) { return NULL; } ret = ped_geometry_map(out_dst, src, sector); if (ret == -1) { PyErr_SetString(PyExc_ArithmeticError, "Sector must exist within region given by geometry"); return NULL; } return Py_BuildValue("i", ret); } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/pyconstraint.c0000664000076400007640000004526011312767136013653 00000000000000/* * pyconstraint.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pyconstraint.h" #include "pygeom.h" #include "pynatmath.h" #include "docstrings/pyconstraint.h" #include "typeobjects/pyconstraint.h" /* _ped.Constraint functions */ void _ped_Constraint_dealloc(_ped_Constraint *self) { PyObject_GC_UnTrack(self); Py_CLEAR(self->start_align); self->start_align = NULL; Py_CLEAR(self->end_align); self->end_align = NULL; Py_CLEAR(self->start_range); self->start_range = NULL; Py_CLEAR(self->end_range); self->end_range = NULL; PyObject_GC_Del(self); } int _ped_Constraint_compare(_ped_Constraint *self, PyObject *obj) { _ped_Constraint *comp = NULL; int check = PyObject_IsInstance(obj, (PyObject *) &_ped_Constraint_Type_obj); if (PyErr_Occurred()) { return -1; } if (!check) { PyErr_SetString(PyExc_ValueError, "object comparing to must be a _ped.Constraint"); return -1; } comp = (_ped_Constraint *) obj; if ((_ped_Alignment_Type_obj.tp_richcompare(self->start_align, comp->start_align, Py_EQ)) && (_ped_Alignment_Type_obj.tp_richcompare(self->end_align, comp->end_align, Py_EQ)) && (_ped_Geometry_Type_obj.tp_richcompare(self->start_range, comp->start_range, Py_EQ)) && (_ped_Geometry_Type_obj.tp_richcompare(self->end_range, comp->end_range, Py_EQ)) && (self->min_size == comp->min_size) && (self->max_size == comp->max_size)) { return 0; } else { return 1; } } PyObject *_ped_Constraint_richcompare(_ped_Constraint *a, PyObject *b, int op) { if (op == Py_EQ) { if (!(_ped_Constraint_Type_obj.tp_compare((PyObject *) a, b))) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if (op == Py_NE) { if (_ped_Constraint_Type_obj.tp_compare((PyObject *) a, b)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } else if ((op == Py_LT) || (op == Py_LE) || (op == Py_GT) || (op == Py_GE)) { PyErr_SetString(PyExc_TypeError, "comparison operator not supported for _ped.Constraint"); return NULL; } else { PyErr_SetString(PyExc_ValueError, "unknown richcompare op"); return NULL; } } PyObject *_ped_Constraint_str(_ped_Constraint *self) { char *ret = NULL; char *start_align = NULL, *end_align = NULL; char *start_range = NULL, *end_range = NULL; start_align = PyString_AsString(_ped_Alignment_Type_obj.tp_repr(self->start_align)); if (start_align == NULL) { return NULL; } end_align = PyString_AsString(_ped_Alignment_Type_obj.tp_repr(self->end_align)); if (end_align == NULL) { return NULL; } start_range = PyString_AsString(_ped_Geometry_Type_obj.tp_repr(self->start_range)); if (start_range == NULL) { return NULL; } end_range = PyString_AsString(_ped_Geometry_Type_obj.tp_repr(self->end_range)); if (end_range == NULL) { return NULL; } if (asprintf(&ret, "_ped.Constraint instance --\n" " start_align: %s end_align: %s\n" " start_range: %s end_range: %s\n" " min_size: %lld max_size: %lld", start_align, end_align, start_range, end_range, self->min_size, self->max_size) == -1) { return PyErr_NoMemory(); } return Py_BuildValue("s", ret); } int _ped_Constraint_traverse(_ped_Constraint *self, visitproc visit, void *arg) { int err; if (self->start_align) { if ((err = visit(self->start_align, arg))) { return err; } } if (self->end_align) { if ((err = visit(self->end_align, arg))) { return err; } } if (self->start_range) { if ((err = visit(self->start_range, arg))) { return err; } } if (self->end_range) { if ((err = visit(self->end_range, arg))) { return err; } } return 0; } int _ped_Constraint_clear(_ped_Constraint *self) { Py_CLEAR(self->start_align); self->start_align = NULL; Py_CLEAR(self->end_align); self->end_align = NULL; Py_CLEAR(self->start_range); self->start_range = NULL; Py_CLEAR(self->end_range); self->end_range = NULL; return 0; } int _ped_Constraint_init(_ped_Constraint *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"start_align", "end_align", "start_range", "end_range", "min_size", "max_size", NULL}; PedConstraint *constraint = NULL; PedAlignment *start_align = NULL, *end_align = NULL; PedGeometry *start_range = NULL, *end_range = NULL; if (kwds == NULL) { if (!PyArg_ParseTuple(args, "O!O!O!O!LL", &_ped_Alignment_Type_obj, &self->start_align, &_ped_Alignment_Type_obj, &self->end_align, &_ped_Geometry_Type_obj, &self->start_range, &_ped_Geometry_Type_obj, &self->end_range, &self->min_size, &self->max_size)) { self->start_align = self->end_align = NULL; self->start_range = self->end_range = NULL; return -1; } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O!O!LL", kwlist, &_ped_Alignment_Type_obj, &self->start_align, &_ped_Alignment_Type_obj, &self->end_align, &_ped_Geometry_Type_obj, &self->start_range, &_ped_Geometry_Type_obj, &self->end_range, &self->min_size, &self->max_size)) { self->start_align = self->end_align = NULL; self->start_range = self->end_range = NULL; return -2; } } /* * try to call libparted with provided information, * on failure, raise an exception */ start_align = _ped_Alignment2PedAlignment(self->start_align); end_align = _ped_Alignment2PedAlignment(self->end_align); start_range = _ped_Geometry2PedGeometry(self->start_range); end_range = _ped_Geometry2PedGeometry(self->end_range); constraint = ped_constraint_new(start_align, end_align, start_range, end_range, self->min_size, self->max_size); if (constraint == NULL) { PyErr_SetString(CreateException, "Could not create new constraint"); ped_alignment_destroy(start_align); ped_alignment_destroy(end_align); self->start_align = NULL; self->end_align = NULL; self->start_range = NULL; self->end_range = NULL; return -3; } /* increment reference count for PyObjects read by PyArg_ParseTuple */ Py_INCREF(self->start_align); Py_INCREF(self->end_align); Py_INCREF(self->start_range); Py_INCREF(self->end_range); /* clean up libparted objects we created */ ped_alignment_destroy(start_align); ped_alignment_destroy(end_align); ped_constraint_destroy(constraint); return 0; } PyObject *_ped_Constraint_get(_ped_Constraint *self, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Constraint()"); return NULL; } if (!strcmp(member, "min_size")) { return PyLong_FromLongLong(self->min_size); } else if (!strcmp(member, "max_size")) { return PyLong_FromLongLong(self->max_size); } else { PyErr_Format(PyExc_AttributeError, "_ped.Constraint object has no attribute %s", member); return NULL; } } int _ped_Constraint_set(_ped_Constraint *self, PyObject *value, void *closure) { char *member = (char *) closure; if (member == NULL) { PyErr_SetString(PyExc_TypeError, "Empty _ped.Constraint()"); return -1; } if (!strcmp(member, "min_size")) { self->min_size = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } } else if (!strcmp(member, "max_size")) { self->max_size = PyLong_AsLongLong(value); if (PyErr_Occurred()) { return -1; } } else { PyErr_Format(PyExc_AttributeError, "_ped.Constraint object has no attribute %s", member); return -1; } return 0; } /* 1:1 function mappings for constraint.h in libparted */ PyObject *py_ped_constraint_new_from_min_max(PyObject *s, PyObject *args) { PyObject *in_min = NULL, *in_max = NULL; PedGeometry *out_min = NULL, *out_max = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!O!", &_ped_Geometry_Type_obj, &in_min, &_ped_Geometry_Type_obj, &in_max)) { return NULL; } out_min = _ped_Geometry2PedGeometry(in_min); if (out_min == NULL) { return NULL; } out_max = _ped_Geometry2PedGeometry(in_max); if (out_max == NULL) { return NULL; } /* ped_constraint_new_from_min_max will ASSERT if this isn't enforced. */ if (!ped_geometry_test_inside(out_max, out_min)) { PyErr_SetString(CreateException, "min geometry must be contained within max geometry"); return NULL; } constraint = ped_constraint_new_from_min_max(out_min, out_max); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create new constraint from min/max"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_constraint_new_from_min(PyObject *s, PyObject *args) { PyObject *in_min = NULL; PedGeometry *out_min = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_min)) { return NULL; } out_min = _ped_Geometry2PedGeometry(in_min); if (out_min == NULL) { return NULL; } constraint = ped_constraint_new_from_min(out_min); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create new constraint from min"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_constraint_new_from_max(PyObject *s, PyObject *args) { PyObject *in_max = NULL; PedGeometry *out_max = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_max)) { return NULL; } out_max = _ped_Geometry2PedGeometry(in_max); if (out_max == NULL) { return NULL; } constraint = ped_constraint_new_from_max(out_max); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create new constraint from max"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } /* XXX: Remove this function at some point in the future. The deprecation * warning tells people what they should be doing. */ PyObject *py_ped_constraint_duplicate(PyObject *s, PyObject *args) { PedConstraint *constraint = NULL, *dup_constraint = NULL; _ped_Constraint *ret = NULL; constraint = _ped_Constraint2PedConstraint(s); if (constraint == NULL) { return NULL; } if (PyErr_WarnEx(PyExc_DeprecationWarning, "use copy.deepcopy() to duplicate a _ped.Constraint", 1) == -1) { return NULL; } dup_constraint = ped_constraint_duplicate(constraint); ped_constraint_destroy(constraint); if (dup_constraint) { ret = PedConstraint2_ped_Constraint(dup_constraint); } else { PyErr_SetString(CreateException, "Could not duplicate constraint"); return NULL; } ped_constraint_destroy(dup_constraint); return (PyObject *) ret; } PyObject *py_ped_constraint_intersect(PyObject *s, PyObject *args) { PyObject *in_constraintB = NULL; PedConstraint *constraintA = NULL, *constraintB = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Constraint_Type_obj, &in_constraintB)) { return NULL; } constraintA = _ped_Constraint2PedConstraint(s); if (constraintA == NULL) { return NULL; } constraintB = _ped_Constraint2PedConstraint(in_constraintB); if (constraintB == NULL) { ped_constraint_destroy(constraintA); return NULL; } constraint = ped_constraint_intersect(constraintA, constraintB); ped_constraint_destroy(constraintA); ped_constraint_destroy(constraintB); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(PyExc_ArithmeticError, "Could not find constraint intersection"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_constraint_solve_max(PyObject *s, PyObject *args) { PedConstraint *constraint = NULL; PedGeometry *geometry = NULL; _ped_Geometry *ret = NULL; constraint = _ped_Constraint2PedConstraint(s); if (constraint == NULL) { return NULL; } geometry = ped_constraint_solve_max(constraint); ped_constraint_destroy(constraint); if (geometry) { ret = PedGeometry2_ped_Geometry(geometry); } else { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_ExceptionMatches(PartedException) && !PyErr_ExceptionMatches(PyExc_NotImplementedError)) PyErr_SetString(ConstraintException, partedExnMessage); } else PyErr_SetString(PyExc_ArithmeticError, "Could not find largest region satisfying constraint"); return NULL; } return (PyObject *) ret; } PyObject *py_ped_constraint_solve_nearest(PyObject *s, PyObject *args) { PyObject *in_geometry = NULL; PedConstraint *constraint = NULL; PedGeometry *out_geometry = NULL; PedGeometry *geometry = NULL; _ped_Geometry *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_geometry)) { return NULL; } constraint = _ped_Constraint2PedConstraint(s); if (constraint == NULL) { return NULL; } out_geometry = _ped_Geometry2PedGeometry(in_geometry); if (out_geometry == NULL) { ped_constraint_destroy(constraint); return NULL; } geometry = ped_constraint_solve_nearest(constraint, out_geometry); ped_constraint_destroy(constraint); if (geometry) { ret = PedGeometry2_ped_Geometry(geometry); } else { PyErr_SetString(PyExc_ArithmeticError, "Could not find region nearest to constraint for given geometry"); return NULL; } return (PyObject *) ret; } PyObject *py_ped_constraint_is_solution(PyObject *s, PyObject *args) { PyObject *in_geometry = NULL; PedConstraint *constraint = NULL; PedGeometry *out_geometry = NULL; int ret = 0; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_geometry)) { return NULL; } constraint = _ped_Constraint2PedConstraint(s); if (constraint == NULL) { return NULL; } out_geometry = _ped_Geometry2PedGeometry(in_geometry); if (out_geometry == NULL) { ped_constraint_destroy(constraint); return NULL; } ret = ped_constraint_is_solution(constraint, out_geometry); ped_constraint_destroy(constraint); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_constraint_any(PyObject *s, PyObject *args) { PyObject *in_device = NULL; PedDevice *out_device = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Device_Type_obj, &in_device)) { return NULL; } out_device = _ped_Device2PedDevice(in_device); if (out_device == NULL) { return NULL; } constraint = ped_constraint_any(out_device); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create new constraint"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } PyObject *py_ped_constraint_exact(PyObject *s, PyObject *args) { PyObject *in_geometry = NULL; PedGeometry *out_geometry = NULL; PedConstraint *constraint = NULL; _ped_Constraint *ret = NULL; if (!PyArg_ParseTuple(args, "O!", &_ped_Geometry_Type_obj, &in_geometry)) { return NULL; } out_geometry = _ped_Geometry2PedGeometry(in_geometry); if (out_geometry == NULL) { return NULL; } constraint = ped_constraint_exact(out_geometry); if (constraint) { ret = PedConstraint2_ped_Constraint(constraint); } else { PyErr_SetString(CreateException, "Could not create exact constraint"); return NULL; } ped_constraint_destroy(constraint); return (PyObject *) ret; } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/src/pyunit.c0000664000076400007640000001715311227731771012447 00000000000000/* * pyunit.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #include #include "convert.h" #include "exceptions.h" #include "pydevice.h" #include "pyunit.h" /* 1:1 function mappings for unit.h in libparted */ PyObject *py_ped_unit_set_default(PyObject *s, PyObject *args) { int unit; if (!PyArg_ParseTuple(args, "i", &unit)) { return NULL; } if (unit < PED_UNIT_FIRST || unit > PED_UNIT_LAST) { PyErr_SetString(PyExc_ValueError, "Invalid unit provided."); return NULL; } ped_unit_set_default(unit); Py_INCREF(Py_None); return Py_None; } PyObject *py_ped_unit_get_default(PyObject *s, PyObject *args) { return PyLong_FromLongLong(ped_unit_get_default()); } PyObject *py_ped_unit_get_size(PyObject *s, PyObject *args) { long long ret = -1; PedDevice *dev = NULL; int unit; if (!PyArg_ParseTuple(args, "i", &unit)) return NULL; if (unit < PED_UNIT_FIRST || unit > PED_UNIT_LAST) { PyErr_SetString(PyExc_ValueError, "Invalid unit provided."); return NULL; } dev = _ped_Device2PedDevice(s); if (dev == NULL) { return NULL; } ret = ped_unit_get_size(dev, unit); if (ret == 0) { if (partedExnRaised) { partedExnRaised = 0; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, partedExnMessage); } } else { PyErr_SetString(PyExc_ValueError, "Could not get size"); } return NULL; } return PyLong_FromLongLong(ret); } PyObject *py_ped_unit_get_name(PyObject *s, PyObject *args) { const char *name; int unit; if (!PyArg_ParseTuple(args, "i", &unit)) { return NULL; } if (unit < PED_UNIT_FIRST || unit > PED_UNIT_LAST) { PyErr_SetString(PyExc_ValueError, "Invalid unit provided."); return NULL; } /* * DO NOT free the result from ped_unit_get_name(), it's a pointer to * a value in the static unit_names[] array in libparted. */ name = ped_unit_get_name(unit); if (name != NULL) { return PyString_FromString(name); } else { return PyString_FromString(""); } } PyObject *py_ped_unit_get_by_name(PyObject *s, PyObject *args) { int ret; char *name = NULL; if (!PyArg_ParseTuple(args, "z", &name)) { return NULL; } ret = ped_unit_get_by_name(name); if (ret < PED_UNIT_FIRST || ret > PED_UNIT_LAST) { PyErr_SetString(UnknownTypeException, name); return NULL; } return Py_BuildValue("i", ret); } PyObject *py_ped_unit_format_custom_byte(PyObject *s, PyObject *args) { PyObject *ret = NULL; char *pedret = NULL; PedSector sector; int unit; PedDevice *out_dev = NULL; if (!PyArg_ParseTuple(args, "Li", §or, &unit)) { return NULL; } if (unit < PED_UNIT_FIRST || unit > PED_UNIT_LAST) { PyErr_SetString(PyExc_ValueError, "Invalid unit provided."); return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } pedret = ped_unit_format_custom_byte(out_dev, sector, unit); if (pedret != NULL) { ret = PyString_FromString(pedret); free(pedret); } else { ret = PyString_FromString(""); } return ret; } PyObject *py_ped_unit_format_byte(PyObject *s, PyObject *args) { PyObject *ret = NULL; char *pedret = NULL; PedSector sector; PedDevice *out_dev = NULL; if (!PyArg_ParseTuple(args, "L", §or)) { return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } pedret = ped_unit_format_byte(out_dev, sector); if (pedret != NULL) { ret = PyString_FromString(pedret); free(pedret); } else { ret = PyString_FromString(""); } return ret; } PyObject *py_ped_unit_format_custom(PyObject *s, PyObject *args) { PyObject *ret = NULL; char *pedret = NULL; PedDevice *out_dev = NULL; PedSector sector; int unit; if (!PyArg_ParseTuple(args, "Li", §or, &unit)) { return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } pedret = ped_unit_format_custom(out_dev, sector, unit); if (pedret != NULL) { ret = PyString_FromString(pedret); free(pedret); } else { ret = PyString_FromString(""); } return ret; } PyObject *py_ped_unit_format(PyObject *s, PyObject *args) { PyObject *ret = NULL; char *pedret = NULL; PedDevice *out_dev = NULL; PedSector sector; if (!PyArg_ParseTuple(args, "L", §or)) { return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } pedret = ped_unit_format(out_dev, sector); if (pedret != NULL) { ret = PyString_FromString(pedret); free(pedret); } else { ret = PyString_FromString(""); } return ret; } PyObject *py_ped_unit_parse(PyObject *s, PyObject *args) { int ret; char *str = NULL; PedDevice *out_dev = NULL; PedSector sector; PyObject *in_geom = NULL; PedGeometry *out_geom = NULL; if (!PyArg_ParseTuple(args, "zLO!", &str, §or, &_ped_Geometry_Type_obj, &in_geom)) { return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_unit_parse(str, out_dev, §or, &out_geom); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } PyObject *py_ped_unit_parse_custom(PyObject *s, PyObject *args) { int ret; char *str = NULL; PedDevice *out_dev = NULL; int unit; PedSector sector; PyObject *in_geom = NULL; PedGeometry *out_geom = NULL; if (!PyArg_ParseTuple(args, "ziLO!", &str, &unit, §or, &_ped_Geometry_Type_obj, &in_geom)) { return NULL; } if (unit < PED_UNIT_FIRST || unit > PED_UNIT_LAST) { PyErr_SetString(PyExc_ValueError, "Invalid unit provided."); return NULL; } out_dev = _ped_Device2PedDevice(s); if (out_dev == NULL) { return NULL; } out_geom = _ped_Geometry2PedGeometry(in_geom); if (out_geom == NULL) { return NULL; } ret = ped_unit_parse_custom(str, out_dev, unit, §or, &out_geom); if (ret) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/ltmain.sh0000755000076400007640000073306011542323602011775 00000000000000# Generated from ltmain.m4sh. # ltmain.sh (GNU libtool) 2.2.6b # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, # or obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Usage: $progname [OPTION]... [MODE-ARG]... # # Provide generalized library-building support services. # # --config show all configuration variables # --debug enable verbose shell tracing # -n, --dry-run display commands without modifying any files # --features display basic configuration information and exit # --mode=MODE use operation mode MODE # --preserve-dup-deps don't remove duplicate dependency libraries # --quiet, --silent don't print informational messages # --tag=TAG use configuration variables from tag TAG # -v, --verbose print informational messages (default) # --version print version information # -h, --help print short or long help message # # MODE must be one of the following: # # clean remove files from the build directory # compile compile a source file into a libtool object # execute automatically set library path, then run a program # finish complete the installation of libtool libraries # install install libraries or executables # link create a library or an executable # uninstall remove libraries from an installed directory # # MODE-ARGS vary depending on the MODE. # Try `$progname --help --mode=MODE' for a more detailed description of MODE. # # When reporting a bug, please describe a test case to reproduce it and # include the following information: # # host-triplet: $host # shell: $SHELL # compiler: $LTCC # compiler flags: $LTCFLAGS # linker: $LD (gnu? $with_gnu_ld) # $progname: (GNU libtool) 2.2.6b # automake: $automake_version # autoconf: $autoconf_version # # Report bugs to . PROGRAM=ltmain.sh PACKAGE=libtool VERSION=2.2.6b TIMESTAMP="" package_revision=1.3017 # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # NLS nuisances: We save the old values to restore during execute mode. # Only set LANG and LC_ALL to C if already set. # These must not be set unconditionally because not all systems understand # e.g. LANG=C (notably SCO). lt_user_locale= lt_safe_locale= for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${$lt_var+set}\" = set; then save_$lt_var=\$$lt_var $lt_var=C export $lt_var lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" fi" done $lt_unset CDPATH : ${CP="cp -f"} : ${ECHO="echo"} : ${EGREP="/bin/grep -E"} : ${FGREP="/bin/grep -F"} : ${GREP="/bin/grep"} : ${LN_S="ln -s"} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SED="/bin/sed"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} : ${Xsed="$SED -e 1s/^X//"} # Global variables: EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. exit_status=$EXIT_SUCCESS # Make sure IFS has a sensible default lt_nl=' ' IFS=" $lt_nl" dirname="s,/[^/]*$,," basename="s,^.*/,," # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } # Generated shell functions inserted here. # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath="$0" # The name of this program: # In the unlikely event $progname began with a '-', it would play havoc with # func_echo (imagine progname=-n), so we prepend ./ in that case: func_dirname_and_basename "$progpath" progname=$func_basename_result case $progname in -*) progname=./$progname ;; esac # Make sure we have an absolute path for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=$func_dirname_result progdir=`cd "$progdir" && pwd` progpath="$progdir/$progname" ;; *) save_IFS="$IFS" IFS=: for progdir in $PATH; do IFS="$save_IFS" test -x "$progdir/$progname" && break done IFS="$save_IFS" test -n "$progdir" || progdir=`pwd` progpath="$progdir/$progname" ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed="${SED}"' -e 1s/^X//' sed_quote_subst='s/\([`"$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Re-`\' parameter expansions in output of double_quote_subst that were # `\'-ed in input to the same. If an odd number of `\' preceded a '$' # in input to double_quote_subst, that '$' was protected from expansion. # Since each input `\' is now two `\'s, look for any number of runs of # four `\'s followed by two `\'s and then a '$'. `\' that '$'. bs='\\' bs2='\\\\' bs4='\\\\\\\\' dollar='\$' sed_double_backslash="\ s/$bs4/&\\ /g s/^$bs2$dollar/$bs&/ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g s/\n//g" # Standard options: opt_dry_run=false opt_help=false opt_quiet=false opt_verbose=false opt_warning=: # func_echo arg... # Echo program name prefixed message, along with the current mode # name if it has been set yet. func_echo () { $ECHO "$progname${mode+: }$mode: $*" } # func_verbose arg... # Echo program name prefixed message in verbose mode only. func_verbose () { $opt_verbose && func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_error arg... # Echo program name prefixed message to standard error. func_error () { $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 } # func_warning arg... # Echo program name prefixed warning message to standard error. func_warning () { $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 # bash bug again: : } # func_fatal_error arg... # Echo program name prefixed message to standard error, and exit. func_fatal_error () { func_error ${1+"$@"} exit $EXIT_FAILURE } # func_fatal_help arg... # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { func_error ${1+"$@"} func_fatal_error "$help" } help="Try \`$progname --help' for more information." ## default # func_grep expression filename # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $GREP "$1" "$2" >/dev/null 2>&1 } # func_mkdir_p directory-path # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { my_directory_path="$1" my_dir_list= if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then # Protect directory names starting with `-' case $my_directory_path in -*) my_directory_path="./$my_directory_path" ;; esac # While some portion of DIR does not yet exist... while test ! -d "$my_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. my_dir_list="$my_directory_path:$my_dir_list" # If the last portion added has no slash in it, the list is done case $my_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"` done my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'` save_mkdir_p_IFS="$IFS"; IFS=':' for my_dir in $my_dir_list; do IFS="$save_mkdir_p_IFS" # mkdir can fail with a `File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$my_dir" 2>/dev/null || : done IFS="$save_mkdir_p_IFS" # Bail out if we (or some other process) failed to create a directory. test -d "$my_directory_path" || \ func_fatal_error "Failed to create \`$1'" fi } # func_mktempdir [string] # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, STRING is the basename for that directory. func_mktempdir () { my_template="${TMPDIR-/tmp}/${1-$progname}" if test "$opt_dry_run" = ":"; then # Return a directory name, but don't create it in dry-run mode my_tmpdir="${my_template}-$$" else # If mktemp works, use that first and foremost my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` if test ! -d "$my_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race my_tmpdir="${my_template}-${RANDOM-0}$$" save_mktempdir_umask=`umask` umask 0077 $MKDIR "$my_tmpdir" umask $save_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$my_tmpdir" || \ func_fatal_error "cannot create temporary directory \`$my_tmpdir'" fi $ECHO "X$my_tmpdir" | $Xsed } # func_quote_for_eval arg # Aesthetically quote ARG to be evaled later. # This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT # is double-quoted, suitable for a subsequent eval, whereas # FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters # which are still active within double quotes backslashified. func_quote_for_eval () { case $1 in *[\\\`\"\$]*) func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;; *) func_quote_for_eval_unquoted_result="$1" ;; esac case $func_quote_for_eval_unquoted_result in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and and variable # expansion for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" ;; *) func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" esac } # func_quote_for_expand arg # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { case $1 in *[\\\`\"]*) my_arg=`$ECHO "X$1" | $Xsed \ -e "$double_quote_subst" -e "$sed_double_backslash"` ;; *) my_arg="$1" ;; esac case $my_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") my_arg="\"$my_arg\"" ;; esac func_quote_for_expand_result="$my_arg" } # func_show_eval cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$my_cmd" my_status=$? if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_show_eval_locale cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$lt_user_locale $my_cmd" my_status=$? eval "$lt_safe_locale" if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_version # Echo version message to standard output and exit. func_version () { $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / { s/^# // s/^# *$// s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ p }' < "$progpath" exit $? } # func_usage # Echo short help message to standard output and exit. func_usage () { $SED -n '/^# Usage:/,/# -h/ { s/^# // s/^# *$// s/\$progname/'$progname'/ p }' < "$progpath" $ECHO $ECHO "run \`$progname --help | more' for full usage" exit $? } # func_help # Echo long help message to standard output and exit. func_help () { $SED -n '/^# Usage:/,/# Report bugs to/ { s/^# // s/^# *$// s*\$progname*'$progname'* s*\$host*'"$host"'* s*\$SHELL*'"$SHELL"'* s*\$LTCC*'"$LTCC"'* s*\$LTCFLAGS*'"$LTCFLAGS"'* s*\$LD*'"$LD"'* s/\$with_gnu_ld/'"$with_gnu_ld"'/ s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ p }' < "$progpath" exit $? } # func_missing_arg argname # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { func_error "missing argument for $1" exit_cmd=exit } exit_cmd=: # Check that we have a working $ECHO. if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X$1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then # Yippee, $ECHO works! : else # Restart under the correct shell, and then maybe $ECHO will work. exec $SHELL "$progpath" --no-reexec ${1+"$@"} fi if test "X$1" = X--fallback-echo; then # used as fallback echo shift cat </dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # Parse options once, thoroughly. This comes as soon as possible in # the script to make things like `libtool --version' happen quickly. { # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Parse non-mode specific arguments: while test "$#" -gt 0; do opt="$1" shift case $opt in --config) func_config ;; --debug) preserve_args="$preserve_args $opt" func_echo "enabling shell trace mode" opt_debug='set -x' $opt_debug ;; -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break execute_dlfiles="$execute_dlfiles $1" shift ;; --dry-run | -n) opt_dry_run=: ;; --features) func_features ;; --finish) mode="finish" ;; --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break case $1 in # Valid mode arguments: clean) ;; compile) ;; execute) ;; finish) ;; install) ;; link) ;; relink) ;; uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $opt" exit_cmd=exit break ;; esac mode="$1" shift ;; --preserve-dup-deps) opt_duplicate_deps=: ;; --quiet|--silent) preserve_args="$preserve_args $opt" opt_silent=: ;; --verbose| -v) preserve_args="$preserve_args $opt" opt_silent=false ;; --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break preserve_args="$preserve_args $opt $1" func_enable_tag "$1" # tagname is set here shift ;; # Separate optargs to long options: -dlopen=*|--mode=*|--tag=*) func_opt_split "$opt" set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} shift ;; -\?|-h) func_usage ;; --help) opt_help=: ;; --version) func_version ;; -*) func_fatal_help "unrecognized option \`$opt'" ;; *) nonopt="$opt" break ;; esac done case $host in *cygwin* | *mingw* | *pw32* | *cegcc*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_duplicate_deps ;; esac # Having warned about all mis-specified options, bail out if # anything was wrong. $exit_cmd $EXIT_FAILURE } # func_check_version_match # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } ## ----------- ## ## Main. ## ## ----------- ## $opt_help || { # Sanity checks first: func_check_version_match if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then func_fatal_configuration "not configured to build any kind of library" fi test -z "$mode" && func_fatal_error "error: you must specify a MODE." # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$execute_dlfiles" && test "$mode" != execute; then func_error "unrecognized option \`-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help="$help" help="Try \`$progname --help --mode=$mode' for more information." } # func_lalib_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null \ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_unsafe_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if `file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case "$lalib_p_line" in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test "$lalib_p" = yes } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { func_lalib_p "$1" } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_ltwrapper_scriptname_result="" if func_ltwrapper_executable_p "$1"; then func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" fi } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $opt_debug save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$save_ifs eval cmd=\"$cmd\" func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # `FILE.' does not work on cygwin managed mounts. func_source () { $opt_debug case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $opt_debug if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_quote_for_eval "$arg" CC_quoted="$CC_quoted $func_quote_for_eval_result" done case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_quote_for_eval "$arg" CC_quoted="$CC_quoted $func_quote_for_eval_result" done case "$@ " in " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with \`--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=${1} if test "$build_libtool_libs" = yes; then write_lobj=\'${2}\' else write_lobj=none fi if test "$build_old_libs" = yes; then write_oldobj=\'${3}\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T <?"'"'"' &()|`$[]' \ && func_warning "libobj name \`$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname="$func_basename_result" xdir="$func_dirname_result" lobj=${xdir}$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test "$build_old_libs" = yes; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test "$compiler_c_o" = no; then output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test "$need_locks" = warn; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi removelist="$removelist $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist removelist="$removelist $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 if test -n "$fix_srcfile_path"; then eval srcfile=\"$fix_srcfile_path\" fi func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test "$build_libtool_libs" = yes; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test "$pic_mode" != no; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir command="$command -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test "$suppress_opt" = yes; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test "$build_old_libs" = yes; then if test "$pic_mode" != yes; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test "$compiler_c_o" = yes; then command="$command -o $obj" fi # Suppress compiler output if we already did a PIC compilation. command="$command$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test "$need_locks" != no; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test "$mode" = compile && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to building PIC objects only -prefer-non-pic try to building non-PIC objects only -shared do not build a \`.o' file suitable for static linking -static only build a \`.o' file suitable for static linking COMPILE-COMMAND is a command to be used in creating a \`standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix \`.c' with the library object suffix, \`.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to \`-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the \`--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the \`install' or \`cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE Use a list of object files found in FILE to specify objects -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface All other options (arguments beginning with \`-') are ignored. Every other argument is treated as a filename. Files ending in \`.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in \`.la', then a libtool library is created, only library objects (\`.lo' files) may be specified, and \`-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created using \`ar' and \`ranlib', or on Windows using \`lib'. If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode \`$mode'" ;; esac $ECHO $ECHO "Try \`$progname --help' for more information about other modes." exit $? } # Now that we've collected a possible --mode arg, show help if necessary $opt_help && func_mode_help # func_mode_execute arg... func_mode_execute () { $opt_debug # The first argument is the command name. cmd="$nonopt" test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $execute_dlfiles; do test -f "$file" \ || func_fatal_help "\`$file' is not a file" dir= case $file in *.la) # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "\`$file' was not linked with \`-export-dynamic'" continue fi func_dirname "$file" "" "." dir="$func_dirname_result" if test -f "$dir/$objdir/$dlname"; then dir="$dir/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir="$func_dirname_result" ;; *) func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir="$absdir" # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic="$magic" # Check if any of the arguments is a wrapper script. args= for file do case $file in -*) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file="$progdir/$program" elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file="$progdir/$program" fi ;; esac # Quote arguments (to preserve shell metacharacters). func_quote_for_eval "$file" args="$args $func_quote_for_eval_result" done if test "X$opt_dry_run" = Xfalse; then if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd="\$cmd$args" else # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" $ECHO "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS fi } test "$mode" = execute && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $opt_debug libdirs="$nonopt" admincmds= if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for dir do libdirs="$libdirs $dir" done for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || admincmds="$admincmds $cmds" fi done fi # Exit here if they wanted silent mode. $opt_silent && exit $EXIT_SUCCESS $ECHO "X----------------------------------------------------------------------" | $Xsed $ECHO "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done $ECHO $ECHO "If you ever happen to want to link against installed libraries" $ECHO "in a given directory, LIBDIR, you must either use libtool, and" $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'" $ECHO "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable" $ECHO " during execution" fi if test -n "$runpath_var"; then $ECHO " - add LIBDIR to the \`$runpath_var' environment variable" $ECHO " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the \`$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" fi $ECHO $ECHO "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual" $ECHO "pages." ;; *) $ECHO "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac $ECHO "X----------------------------------------------------------------------" | $Xsed exit $EXIT_SUCCESS } test "$mode" = finish && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $opt_debug # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || # Allow the use of GNU shtool's install command. $ECHO "X$nonopt" | $GREP shtool >/dev/null; then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" install_prog="$install_prog$func_quote_for_eval_result" # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=no stripme= for arg do if test -n "$dest"; then files="$files $dest" dest=$arg continue fi case $arg in -d) isdir=yes ;; -f) case " $install_prog " in *[\\\ /]cp\ *) ;; *) prev=$arg ;; esac ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" install_prog="$install_prog $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the \`$prev' option requires an argument" if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=yes if test "$isdir" = yes; then destdir="$dest" destname= else func_dirname_and_basename "$dest" "" "." destdir="$func_dirname_result" destname="$func_basename_result" # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "\`$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "\`$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. staticlibs="$staticlibs $file" ;; *.la) # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) current_libdirs="$current_libdirs $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) future_libdirs="$future_libdirs $libdir" ;; esac fi func_dirname "$file" "/" "" dir="$func_dirname_result" dir="$dir$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"` fi func_warning "relinking \`$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname="$1" shift srcname="$realname" test -n "$relink_command" && srcname="$realname"T # Install the shared library and build the symlinks. func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme="$stripme" case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme="" ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try `ln -sf' first, because the `ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib="$destdir/$realname" func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name="$func_basename_result" instname="$dir/$name"i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest="$destfile" destfile= ;; *) func_fatal_help "cannot copy a libtool object to \`$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test "$build_old_libs" = yes; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext="" case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=".exe" fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script \`$wrapper'" finalize=yes for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test if test -n "$libdir" && test ! -f "$libfile"; then func_warning "\`$lib' has not been installed in \`$libdir'" finalize=no fi done relink_command= func_source "$wrapper" outputname= if test "$fast_install" = no && test -n "$relink_command"; then $opt_dry_run || { if test "$finalize" = yes; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file="$func_basename_result" outputname="$tmpdir/$file" # Replace the output file specification. relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` $opt_silent || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink \`$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file="$outputname" else func_warning "cannot relink \`$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name="$func_basename_result" # Set up the ranlib parameters. oldlib="$destdir/$name" func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run \`$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test "$mode" = install && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $opt_debug my_outputname="$1" my_originator="$2" my_pic_p="${3-no}" my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms="${my_outputname}S.c" else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist="$output_objdir/${my_outputname}.nm" func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif /* External symbol declarations for the compiler. */\ " if test "$dlself" = yes; then func_verbose "generating symbol list for \`$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` for progfile in $progfiles; do func_verbose "extracting global C symbols from \`$progfile'" $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols="$output_objdir/$outputname.exp" $opt_dry_run || { $RM $export_symbols eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from \`$dlprefile'" func_basename "$dlprefile" name="$func_basename_result" $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" } done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms" fi $ECHO >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; " case $host in *cygwin* | *mingw* | *cegcc* ) $ECHO >> "$output_objdir/$my_dlsyms" "\ /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */" lt_dlsym_const= ;; *osf5*) echo >> "$output_objdir/$my_dlsyms" "\ /* This system does not cope well with relocations in const data */" lt_dlsym_const= ;; *) lt_dlsym_const=const ;; esac $ECHO >> "$output_objdir/$my_dlsyms" "\ extern $lt_dlsym_const lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[]; $lt_dlsym_const lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = {\ { \"$my_originator\", (void *) 0 }," case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac $ECHO >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) if test "X$my_pic_p" != Xno; then pic_flag_for_symtable=" $pic_flag" fi ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) symtab_cflags="$symtab_cflags $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' # Transform the symbol file into the correct name. symfileobj="$output_objdir/${my_outputname}S.$objext" case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for \`$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` fi } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. func_win32_libid () { $opt_debug win32_libid_type="unknown" win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then win32_nmres=`eval $NM -f posix -A $1 | $SED -n -e ' 1,100{ / I /{ s,.*,import, p q } }'` case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_extract_an_archive dir oldlib func_extract_an_archive () { $opt_debug f_ex_an_ar_dir="$1"; shift f_ex_an_ar_oldlib="$1" func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?' if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $opt_debug my_gentop="$1"; shift my_oldlibs=${1+"$@"} my_oldobjs="" my_xlib="" my_xabs="" my_xdir="" for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib="$func_basename_result" my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir="$my_gentop/$my_xlib_u" func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` darwin_base_archive=`basename "$darwin_archive"` darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches ; do func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" func_extract_an_archive "`pwd`" "${darwin_base_archive}" cd "$darwin_curdir" $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` done func_extract_archives_result="$my_oldobjs" } # func_emit_wrapper_part1 [arg=no] # # Emit the first part of a libtool wrapper script on stdout. # For more information, see the description associated with # func_emit_wrapper(), below. func_emit_wrapper_part1 () { func_emit_wrapper_part1_arg1=no if test -n "$1" ; then func_emit_wrapper_part1_arg1=$1 fi $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed='${SED} -e 1s/^X//' sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then ECHO=\"$qecho\" file=\"\$0\" # Make sure echo works. if test \"X\$1\" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then # Yippee, \$ECHO works! : else # Restart under the correct shell, and then maybe \$ECHO will work. exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} fi fi\ " $ECHO "\ # Find the directory that this script lives in. thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` done " } # end: func_emit_wrapper_part1 # func_emit_wrapper_part2 [arg=no] # # Emit the second part of a libtool wrapper script on stdout. # For more information, see the description associated with # func_emit_wrapper(), below. func_emit_wrapper_part2 () { func_emit_wrapper_part2_arg1=no if test -n "$1" ; then func_emit_wrapper_part2_arg1=$1 fi $ECHO "\ # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test "$fast_install" = yes; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else $ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # Export our shlibpath_var if we have one. if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` export $shlibpath_var " fi # fixup the dll searchpath if we need to. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 fi else # The program doesn't exist. \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # end: func_emit_wrapper_part2 # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory in which it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=no if test -n "$1" ; then func_emit_wrapper_arg1=$1 fi # split this up so that func_emit_cwrapperexe_src # can call each part independently. func_emit_wrapper_part1 "${func_emit_wrapper_arg1}" func_emit_wrapper_part2 "${func_emit_wrapper_arg1}" } # func_to_host_path arg # # Convert paths to host format when used with build tools. # Intended for use with "native" mingw (where libtool itself # is running under the msys shell), or in the following cross- # build environments: # $build $host # mingw (msys) mingw [e.g. native] # cygwin mingw # *nix + wine mingw # where wine is equipped with the `winepath' executable. # In the native mingw case, the (msys) shell automatically # converts paths for any non-msys applications it launches, # but that facility isn't available from inside the cwrapper. # Similar accommodations are necessary for $host mingw and # $build cygwin. Calling this function does no harm for other # $host/$build combinations not listed above. # # ARG is the path (on $build) that should be converted to # the proper representation for $host. The result is stored # in $func_to_host_path_result. func_to_host_path () { func_to_host_path_result="$1" if test -n "$1" ; then case $host in *mingw* ) lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' case $build in *mingw* ) # actually, msys # awkward: cmd appends spaces to result lt_sed_strip_trailing_spaces="s/[ ]*\$//" func_to_host_path_tmp1=`( cmd //c echo "$1" |\ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` ;; *cygwin* ) func_to_host_path_tmp1=`cygpath -w "$1"` func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` ;; * ) # Unfortunately, winepath does not exit with a non-zero # error code, so we are forced to check the contents of # stdout. On the other hand, if the command is not # found, the shell will set an exit code of 127 and print # *an error message* to stdout. So we must check for both # error code of zero AND non-empty stdout, which explains # the odd construction: func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ $SED -e "$lt_sed_naive_backslashify"` else # Allow warning below. func_to_host_path_result="" fi ;; esac if test -z "$func_to_host_path_result" ; then func_error "Could not determine host path corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_path_result="$1" fi ;; esac fi } # end: func_to_host_path # func_to_host_pathlist arg # # Convert pathlists to host format when used with build tools. # See func_to_host_path(), above. This function supports the # following $build/$host combinations (but does no harm for # combinations not listed here): # $build $host # mingw (msys) mingw [e.g. native] # cygwin mingw # *nix + wine mingw # # Path separators are also converted from $build format to # $host format. If ARG begins or ends with a path separator # character, it is preserved (but converted to $host format) # on output. # # ARG is a pathlist (on $build) that should be converted to # the proper representation on $host. The result is stored # in $func_to_host_pathlist_result. func_to_host_pathlist () { func_to_host_pathlist_result="$1" if test -n "$1" ; then case $host in *mingw* ) lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_to_host_pathlist_tmp2="$1" # Once set for this call, this variable should not be # reassigned. It is used in tha fallback case. func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e 's|^:*||' -e 's|:*$||'` case $build in *mingw* ) # Actually, msys. # Awkward: cmd appends spaces to result. lt_sed_strip_trailing_spaces="s/[ ]*\$//" func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e "$lt_sed_naive_backslashify"` ;; *cygwin* ) func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"` func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ $SED -e "$lt_sed_naive_backslashify"` ;; * ) # unfortunately, winepath doesn't convert pathlists func_to_host_pathlist_result="" func_to_host_pathlist_oldIFS=$IFS IFS=: for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do IFS=$func_to_host_pathlist_oldIFS if test -n "$func_to_host_pathlist_f" ; then func_to_host_path "$func_to_host_pathlist_f" if test -n "$func_to_host_path_result" ; then if test -z "$func_to_host_pathlist_result" ; then func_to_host_pathlist_result="$func_to_host_path_result" else func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result" fi fi fi IFS=: done IFS=$func_to_host_pathlist_oldIFS ;; esac if test -z "$func_to_host_pathlist_result" ; then func_error "Could not determine the host path(s) corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This may break if $1 contains DOS-style drive # specifications. The fix is not to complicate the expression # below, but for the user to provide a working wine installation # with winepath so that path translation in the cross-to-mingw # case works properly. lt_replace_pathsep_nix_to_dos="s|:|;|g" func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ $SED -e "$lt_replace_pathsep_nix_to_dos"` fi # Now, add the leading and trailing path separators back case "$1" in :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" ;; esac case "$1" in *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;" ;; esac ;; esac fi } # end: func_to_host_pathlist # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include # define setmode _setmode #else # include # include # ifdef __CYGWIN__ # include # define HAVE_SETENV # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif # endif #endif #include #include #include #include #include #include #include #include #if defined(PATH_MAX) # define LT_PATHMAX PATH_MAX #elif defined(MAXPATHLEN) # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif #ifdef _MSC_VER # define S_IXUSR _S_IEXEC # define stat _stat # ifndef _INTPTR_T_DEFINED # define intptr_t int # endif #endif #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifdef __CYGWIN__ # define FOPEN_WB "wb" #endif #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free ((void *) stale); stale = 0; } \ } while (0) #undef LTWRAPPER_DEBUGPRINTF #if defined DEBUGWRAPPER # define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args static void ltwrapper_debugprintf (const char *fmt, ...) { va_list args; va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } #else # define LTWRAPPER_DEBUGPRINTF(args) #endif const char *program_name = NULL; void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_fatal (const char *message, ...); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_opt_process_env_set (const char *arg); void lt_opt_process_env_prepend (const char *arg); void lt_opt_process_env_append (const char *arg); int lt_split_name_value (const char *arg, char** name, char** value); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); static const char *script_text_part1 = EOF func_emit_wrapper_part1 yes | $SED -e 's/\([\\"]\)/\\\1/g' \ -e 's/^/ "/' -e 's/$/\\n"/' echo ";" cat <"))); for (i = 0; i < newargc; i++) { LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); } EOF case $host_os in mingw*) cat <<"EOF" /* execv doesn't actually work on mingw as expected on unix */ rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz); if (rval == -1) { /* failed to start process */ LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); return 127; } return rval; EOF ;; *) cat <<"EOF" execv (lt_argv_zero, newargz); return rval; /* =127, but avoids unused variable warning */ EOF ;; esac cat <<"EOF" } void * xmalloc (size_t num) { void *p = (void *) malloc (num); if (!p) lt_fatal ("Memory exhausted"); return p; } char * xstrdup (const char *string) { return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL; } const char * base_name (const char *name) { const char *base; #if defined (HAVE_DOS_BASED_FILE_SYSTEM) /* Skip over the disk name in MSDOS pathnames. */ if (isalpha ((unsigned char) name[0]) && name[1] == ':') name += 2; #endif for (base = name; *name; name++) if (IS_DIR_SEPARATOR (*name)) base = name + 1; return base; } int check_executable (const char *path) { struct stat st; LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!")); if ((!path) || (!*path)) return 0; if ((stat (path, &st) >= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!")); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; int tmp_len; char *concat_name; LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined (HAVE_DOS_BASED_FILE_SYSTEM) if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined (HAVE_DOS_BASED_FILE_SYSTEM) } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = q - p; p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal ("getcwd failed"); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal ("getcwd failed"); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", tmp_pathspec)); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { char *errstr = strerror (errno); lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal ("Could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (strcmp (str, pat) == 0) *str = '\0'; } return str; } static void lt_error_core (int exit_status, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s: %s: ", program_name, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, "FATAL", message, ap); va_end (ap); } void lt_setenv (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", (name ? name : ""), (value ? value : ""))); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else int len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { int orig_value_len = strlen (orig_value); int add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } int lt_split_name_value (const char *arg, char** name, char** value) { const char *p; int len; if (!arg || !*arg) return 1; p = strchr (arg, (int)'='); if (!p) return 1; *value = xstrdup (++p); len = strlen (arg) - strlen (*value); *name = XMALLOC (char, len); strncpy (*name, arg, len-1); (*name)[len - 1] = '\0'; return 0; } void lt_opt_process_env_set (const char *arg) { char *name = NULL; char *value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg); } lt_setenv (name, value); XFREE (name); XFREE (value); } void lt_opt_process_env_prepend (const char *arg) { char *name = NULL; char *value = NULL; char *new_value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg); } new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); XFREE (name); XFREE (value); } void lt_opt_process_env_append (const char *arg) { char *name = NULL; char *value = NULL; char *new_value = NULL; if (lt_split_name_value (arg, &name, &value) != 0) { XFREE (name); XFREE (value); lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg); } new_value = lt_extend_str (getenv (name), value, 1); lt_setenv (name, new_value); XFREE (new_value); XFREE (name); XFREE (value); } void lt_update_exe_path (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", (name ? name : ""), (value ? value : ""))); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ int len = strlen (new_value); while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[len-1] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", (name ? name : ""), (value ? value : ""))); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF } # end: func_emit_cwrapperexe_src # func_mode_link arg... func_mode_link () { $opt_debug case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # which system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll which has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=no prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module="${wl}-single_module" func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg="$1" shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in dlfiles|dlprefiles) if test "$preload" = no; then # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=yes fi case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test "$dlself" = no; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test "$prev" = dlprefiles; then dlself=yes elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test "$prev" = dlfiles; then dlfiles="$dlfiles $arg" else dlprefiles="$dlprefiles $arg" fi prev= continue ;; esac ;; expsyms) export_symbols="$arg" test -f "$arg" \ || func_fatal_error "symbol file \`$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex="$arg" prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) deplibs="$deplibs $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir="$arg" prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # moreargs="$moreargs $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then dlfiles="$dlfiles $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. dlprefiles="$dlprefiles $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file \`$arg' does not exist" fi arg=$save_arg prev= continue ;; precious_regex) precious_files_regex="$arg" prev= continue ;; release) release="-$arg" prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test "$prev" = rpath; then case "$rpath " in *" $arg "*) ;; *) rpath="$rpath $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) xrpath="$xrpath $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds="$arg" prev= continue ;; weak) weak_libs="$weak_libs $arg" prev= continue ;; xcclinker) linker_flags="$linker_flags $qarg" compiler_flags="$compiler_flags $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) compiler_flags="$compiler_flags $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) linker_flags="$linker_flags $qarg" compiler_flags="$compiler_flags $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg="$arg" case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "\`-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test "X$arg" = "X-export-symbols"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname '-L' '' "$arg" dir=$func_stripname_result if test -z "$dir"; then if test "$#" -gt 0; then func_fatal_error "require no space between \`-L' and \`$1'" else func_fatal_error "need path for \`-L' option" fi fi # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of \`$dir'" dir="$absdir" ;; esac case "$deplibs " in *" -L$dir "*) ;; *) deplibs="$deplibs -L$dir" lib_search_path="$lib_search_path $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) dllsearchpath="$dllsearchpath:$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) dllsearchpath="$dllsearchpath:$testbindir";; esac ;; esac continue ;; -l*) if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test "X$arg" = "X-lc" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. test "X$arg" = "X-lc" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework deplibs="$deplibs System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test "X$arg" = "X-lc" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test "X$arg" = "X-lc" && continue ;; esac elif test "X$arg" = "X-lc_r"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi deplibs="$deplibs $arg" continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot) compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; esac continue ;; -multi_module) single_module="${wl}-multi_module" continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "\`-no-install' is ignored for $host" func_warning "assuming \`-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) xrpath="$xrpath $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" arg="$arg $wl$func_quote_for_eval_result" compiler_flags="$compiler_flags $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" arg="$arg $wl$func_quote_for_eval_result" compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" linker_flags="$linker_flags $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; # -64, -mips[0-9] enable 64-bit mode on the SGI compiler # -r[0-9][0-9]* specifies the processor on the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler # +DA*, +DD* enable 64-bit mode on the HP compiler # -q* pass through compiler args for the IBM compiler # -m*, -t[45]*, -txscale* pass through architecture-specific # compiler args for GCC # -F/path gives path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC # @file GCC response files -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" func_append compile_command " $arg" func_append finalize_command " $arg" compiler_flags="$compiler_flags $arg" continue ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; *.$objext) # A standard object. objs="$objs $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then dlfiles="$dlfiles $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. dlprefiles="$dlprefiles $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. deplibs="$deplibs $arg" old_deplibs="$old_deplibs $arg" continue ;; *.la) # A libtool-controlled library. if test "$prev" = dlfiles; then # This library was specified with -dlopen. dlfiles="$dlfiles $arg" prev= elif test "$prev" = dlprefiles; then # The library was specified with -dlpreopen. dlprefiles="$dlprefiles $arg" prev= else deplibs="$deplibs $arg" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the \`$prevarg' option requires an argument" if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname="$func_basename_result" libobjs_save="$libobjs" if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" func_dirname "$output" "/" "" output_objdir="$func_dirname_result$objdir" # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_duplicate_deps ; then case "$libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi libs="$libs $deplib" done if test "$linkmode" = lib; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; esac pre_post_deps="$pre_post_deps $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=no newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test "$linkmode,$pass" = "lib,link"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs="$tmp_deplibs" fi if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan"; then libs="$deplibs" deplibs= fi if test "$linkmode" = prog; then case $pass in dlopen) libs="$dlfiles" ;; dlpreopen) libs="$dlprefiles" ;; link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; esac fi if test "$linkmode,$pass" = "lib,dlpreopen"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= case $lib in *.la) func_source "$lib" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"` case " $weak_libs " in *" $deplib_base "*) ;; *) deplibs="$deplibs $deplib" ;; esac done done libs="$dlprefiles" fi if test "$pass" = dlopen; then # Collect dlpreopened libraries save_deplibs="$deplibs" deplibs= fi for deplib in $libs; do lib= found=no case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else compiler_flags="$compiler_flags $deplib" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; esac fi fi continue ;; -l*) if test "$linkmode" != lib && test "$linkmode" != prog; then func_warning "\`-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test "$linkmode" = lib; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib="$searchdir/lib${name}${search_ext}" if test -f "$lib"; then if test "$search_ext" = ".la"; then found=yes else found=no fi break 2 fi done done if test "$found" != yes; then # deplib doesn't seem to be a libtool library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue else # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll="$l" done if test "X$ll" = "X$old_library" ; then # only static version available found=no func_dirname "$lib" "" "." ladir="$func_dirname_result" lib=$ladir/$old_library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi fi ;; # -l *.ltframework) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test "$pass" = conv && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; prog) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi if test "$pass" = scan; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; *) func_warning "\`-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test "$pass" = link; then func_stripname '-R' '' "$deplib" dir=$func_stripname_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) xrpath="$xrpath $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) lib="$deplib" ;; *.$libext) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=no case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=yes fi ;; pass_all) valid_a_lib=yes ;; esac if test "$valid_a_lib" != yes; then $ECHO $ECHO "*** Warning: Trying to link with static lib archive $deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because the file extensions .$libext of this argument makes me believe" $ECHO "*** that it is just a static archive that I should not use here." else $ECHO $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" fi ;; esac continue ;; prog) if test "$pass" != link; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test "$pass" = conv; then deplibs="$deplib $deplibs" elif test "$linkmode" = prog; then if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlopen support or we're linking statically, # we need to preload. newdlprefiles="$newdlprefiles $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else newdlfiles="$newdlfiles $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=yes continue ;; esac # case $deplib if test "$found" = yes || test -f "$lib"; then : else func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" fi # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "\`$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir="$func_dirname_result" dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan" || { test "$linkmode" != prog && test "$linkmode" != lib; }; then test -n "$dlopen" && dlfiles="$dlfiles $dlopen" test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" fi if test "$pass" = conv; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # It is a libtool convenience library, so add in its objects. convenience="$convenience $ladir/$objdir/$old_library" old_convenience="$old_convenience $ladir/$objdir/$old_library" elif test "$linkmode" != prog && test "$linkmode" != lib; then func_fatal_error "\`$lib' is not a convenience library" fi tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done continue fi # $pass = conv # Get the name of the library we link against. linklib= for l in $old_library $library_names; do linklib="$l" done if test -z "$linklib"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # This library was specified with -dlopen. if test "$pass" = dlopen; then if test -z "$libdir"; then func_fatal_error "cannot -dlopen a convenience library: \`$lib'" fi if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. dlprefiles="$dlprefiles $lib $dependency_libs" else newdlfiles="$newdlfiles $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of \`$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir="$ladir" fi ;; esac func_basename "$lib" laname="$func_basename_result" # Find the relevant object directory and library name. if test "X$installed" = Xyes; then if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library \`$lib' was moved." dir="$ladir" absdir="$abs_ladir" libdir="$abs_ladir" else dir="$libdir" absdir="$libdir" fi test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir="$ladir" absdir="$abs_ladir" # Remove this search path later notinst_path="$notinst_path $abs_ladir" else dir="$ladir/$objdir" absdir="$abs_ladir/$objdir" # Remove this search path later notinst_path="$notinst_path $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test "$pass" = dlpreopen; then if test -z "$libdir" && test "$linkmode" = prog; then func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" fi # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then newdlprefiles="$newdlprefiles $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then newdlprefiles="$newdlprefiles $dir/$dlname" else newdlprefiles="$newdlprefiles $dir/$linklib" fi fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test "$linkmode" = lib; then deplibs="$dir/$old_library $deplibs" elif test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test "$linkmode" = prog && test "$pass" != link; then newlib_search_path="$newlib_search_path $ladir" deplibs="$lib $deplibs" linkalldeplibs=no if test "$link_all_deplibs" != no || test -z "$library_names" || test "$build_libtool_libs" = no; then linkalldeplibs=yes fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" newlib_search_path="$newlib_search_path $func_stripname_result" ;; esac # Need to link against all dependency_libs? if test "$linkalldeplibs" = yes; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done # for deplib continue fi # $linkmode = prog... if test "$linkmode,$pass" = "prog,link"; then if test -n "$library_names" && { { test "$prefer_static_libs" = no || test "$prefer_static_libs,$installed" = "built,yes"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then # Make sure the rpath contains only unique directories. case "$temp_rpath:" in *"$absdir:"*) ;; *) temp_rpath="$temp_rpath$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) compile_rpath="$compile_rpath $absdir" esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" esac ;; esac fi # $linkmode,$pass = prog,link... if test "$alldeplibs" = yes && { test "$deplibs_check_method" = pass_all || { test "$build_libtool_libs" = yes && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test "$use_static_libs" = built && test "$installed" = yes; then use_static_libs=no fi if test -n "$library_names" && { test "$use_static_libs" = no || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc*) # No point in relinking DLLs because paths are not encoded notinst_deplibs="$notinst_deplibs $lib" need_relink=no ;; *) if test "$installed" = no; then notinst_deplibs="$notinst_deplibs $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule="" for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule="$dlpremoduletest" break fi done if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then $ECHO if test "$linkmode" = prog; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test "$linkmode" = lib && test "$hardcode_into_libs" = yes; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) compile_rpath="$compile_rpath $absdir" esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname="$1" shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname="$dlname" elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc*) func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; esac eval soname=\"$soname_spec\" else soname="$realname" fi # Make a new name for the extract_expsyms_cmds to use soroot="$soname" func_basename "$soroot" soname="$func_basename_result" func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from \`$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for \`$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test "$linkmode" = prog || test "$mode" != relink; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test "$hardcode_direct" = no; then add="$dir/$linklib" case $host in *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; *-*-sysv4*uw2*) add_dir="-L$dir" ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir="-L$dir" ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we can not # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null ; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library" ; then $ECHO $ECHO "*** And there doesn't seem to be a static archive available" $ECHO "*** The link will probably fail, sorry" else add="$dir/$old_library" fi elif test -n "$old_library"; then add="$dir/$old_library" fi fi esac elif test "$hardcode_minus_L" = no; then case $host in *-*-sunos*) add_shlibpath="$dir" ;; esac add_dir="-L$dir" add="-l$name" elif test "$hardcode_shlibpath_var" = no; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; relink) if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$dir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$dir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; *) lib_linked=no ;; esac if test "$lib_linked" != yes; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; esac fi if test "$linkmode" = prog; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test "$hardcode_direct" != yes && test "$hardcode_minus_L" != yes && test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; esac fi fi fi if test "$linkmode" = prog || test "$mode" = relink; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$libdir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$libdir" add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; esac add="-l$name" elif test "$hardcode_automatic" = yes; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib" ; then add="$inst_prefix_dir$libdir/$linklib" else add="$libdir/$linklib" fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir="-L$libdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) add_dir="$add_dir -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" fi if test "$linkmode" = prog; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test "$linkmode" = prog; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test "$hardcode_direct" != unsupported; then test -n "$old_library" && linklib="$old_library" compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test "$build_libtool_libs" = yes; then # Not a shared library if test "$deplibs_check_method" != pass_all; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. $ECHO $ECHO "*** Warning: This system can not link to static lib archive $lib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have." if test "$module" = yes; then $ECHO "*** But as you try to build a module library, libtool will still create " $ECHO "*** a static module, that should work as long as the dlopening application" $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then $ECHO $ECHO "*** However, this would only work if libtool was able to extract symbol" $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" $ECHO "*** not find such a program. So, this module is probably useless." $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test "$linkmode" = lib; then if test -n "$dependency_libs" && { test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes || test "$link_static" = yes; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) xrpath="$xrpath $temp_xrpath";; esac;; *) temp_deplibs="$temp_deplibs $libdir";; esac done dependency_libs="$temp_deplibs" fi newlib_search_path="$newlib_search_path $absdir" # Link against this library test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" if $opt_duplicate_deps ; then case "$tmp_libs " in *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; esac fi tmp_libs="$tmp_libs $deplib" done if test "$link_all_deplibs" != no; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do case $deplib in -L*) path="$deplib" ;; *.la) func_dirname "$deplib" "" "." dir="$func_dirname_result" # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of \`$dir'" absdir="$dir" fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl" ; then depdepl="$absdir/$objdir/$depdepl" darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" path= fi fi ;; *) path="-L$absdir/$objdir" ;; esac else eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "\`$deplib' seems to be moved" path="-L$absdir" fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test "$pass" = link; then if test "$linkmode" = "prog"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs="$newdependency_libs" if test "$pass" = dlpreopen; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test "$pass" != dlopen; then if test "$pass" != conv; then # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) lib_search_path="$lib_search_path $dir" ;; esac done newlib_search_path= fi if test "$linkmode,$pass" != "prog,link"; then vars="deplibs" else vars="compile_deplibs finalize_deplibs" fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) tmp_libs="$tmp_libs $deplib" ;; esac ;; *) tmp_libs="$tmp_libs $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs ; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i="" ;; esac if test -n "$i" ; then tmp_libs="$tmp_libs $i" fi done dependency_libs=$tmp_libs done # for pass if test "$linkmode" = prog; then dlfiles="$newdlfiles" fi if test "$linkmode" = prog || test "$linkmode" = lib; then dlprefiles="$newdlprefiles" fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "\`-R' is ignored for archives" test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "\`-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "\`-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs="$output" objs="$objs$old_deplibs" ;; lib) # Make sure we only generate libraries of the form `libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test "$module" = no && \ func_fatal_help "libtool library \`$output' must begin with \`lib'" if test "$need_lib_prefix" != no; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test "$deplibs_check_method" != pass_all; then func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" else $ECHO $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" libobjs="$libobjs $objs" fi fi test "$dlself" != no && \ func_warning "\`-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test "$#" -gt 1 && \ func_warning "ignoring multiple \`-rpath's for a libtool library" install_libdir="$1" oldlibs= if test -z "$rpath"; then if test "$build_libtool_libs" = yes; then # Building a libtool convenience library. # Some compilers have problems with a `.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "\`-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs="$IFS"; IFS=':' set dummy $vinfo 0 0 0 shift IFS="$save_ifs" test -n "$7" && \ func_fatal_help "too many parameters to \`-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major="$1" number_minor="$2" number_revision="$3" # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # which has an extra 1 added just for fun # case $version_type in darwin|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_revision" ;; freebsd-aout|freebsd-elf|sunos) current="$number_major" revision="$number_minor" age="0" ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_minor" lt_irix_increment=no ;; esac ;; no) current="$1" revision="$2" age="$3" ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT \`$current' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION \`$revision' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE \`$age' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE \`$age' is greater than the current interface number \`$current'" func_fatal_error "\`$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; freebsd-aout) major=".$current" versuffix=".$current.$revision"; ;; freebsd-elf) major=".$current" versuffix=".$current" ;; irix | nonstopux) if test "X$lt_irix_increment" = "Xno"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring="$verstring_prefix$major.$revision" # Add in all the interfaces that we are compatible with. loop=$revision while test "$loop" -ne 0; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring_prefix$major.$iface:$verstring" done # Before this point, $major must not contain `.'. major=.$major versuffix="$major.$revision" ;; linux) func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=".$current.$age.$revision" verstring="$current.$age.$revision" # Add in all the interfaces that we are compatible with. loop=$age while test "$loop" -ne 0; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring:${iface}.0" done # Make executables depend on our current version. verstring="$verstring:${current}.0" ;; qnx) major=".$current" versuffix=".$current" ;; sunos) major=".$current" versuffix=".$current.$revision" ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 filesystems. func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; *) func_fatal_configuration "unknown library version type \`$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring="0.0" ;; esac if test "$need_version" = no; then versuffix= else versuffix=".0.0" fi fi # Remove version info from name if versioning should be avoided if test "$avoid_version" = yes && test "$need_version" = no; then major= versuffix= verstring="" fi # Check to see if the archive will have undefined symbols. if test "$allow_undefined" = yes; then if test "$allow_undefined_flag" = unsupported; then func_warning "undefined symbols not allowed in $host shared libraries" build_libtool_libs=no build_old_libs=yes fi else # Don't allow undefined symbols. allow_undefined_flag="$no_undefined_flag" fi fi func_generate_dlsyms "$libname" "$libname" "yes" libobjs="$libobjs $symfileobj" test "X$libobjs" = "X " && libobjs= if test "$mode" != relink; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) if test "X$precious_files_regex" != "X"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi removelist="$removelist $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then oldlibs="$oldlibs $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"` # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"` # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do temp_xrpath="$temp_xrpath -R$libdir" case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" ;; esac done if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles="$dlfiles" dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) dlfiles="$dlfiles $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles="$dlprefiles" dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) dlprefiles="$dlprefiles $lib" ;; esac done if test "$build_libtool_libs" = yes; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework deplibs="$deplibs System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test "$build_libtool_need_lc" = "yes"; then deplibs="$deplibs -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release="" versuffix="" major="" newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib="$potent_lib" while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then newdeplibs="$newdeplibs $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes $ECHO $ECHO "*** Warning: linker path does not have real file for library $a_deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. newdeplibs="$newdeplibs $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $a_deplib "*) newdeplibs="$newdeplibs $a_deplib" a_deplib="" ;; esac fi if test -n "$a_deplib" ; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib="$potent_lib" # see symlink-check above in file_magic test if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then newdeplibs="$newdeplibs $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes $ECHO $ECHO "*** Warning: linker path does not have real file for library $a_deplib." $ECHO "*** I have the capability to make that library automatically link in when" $ECHO "*** you link to this library. But I can only do this if you have a" $ECHO "*** shared version of the library, which you do not appear to have" $ECHO "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. newdeplibs="$newdeplibs $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs="" tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \ -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'` if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then for i in $predeps $postdeps ; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"` done fi if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' | $GREP . >/dev/null; then $ECHO if test "X$deplibs_check_method" = "Xnone"; then $ECHO "*** Warning: inter-library dependencies are not supported in this platform." else $ECHO "*** Warning: inter-library dependencies are not known to be supported." fi $ECHO "*** All declared inter-library dependencies are being dropped." droppeddeps=yes fi ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'` ;; esac if test "$droppeddeps" = yes; then if test "$module" = yes; then $ECHO $ECHO "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" $ECHO "*** a static module, that should work as long as the dlopening" $ECHO "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then $ECHO $ECHO "*** However, this would only work if libtool was able to extract symbol" $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" $ECHO "*** not find such a program. So, this module is probably useless." $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else $ECHO "*** The inter-library dependencies that have been dropped here will be" $ECHO "*** automatically added whenever a program is linked with this library" $ECHO "*** or is declared to -dlopen it." if test "$allow_undefined" = no; then $ECHO $ECHO "*** Since this library must not contain undefined symbols," $ECHO "*** because either the platform does not support them or" $ECHO "*** it was explicitly requested with -no-undefined," $ECHO "*** libtool will only create a static version of it." if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) new_libs="$new_libs -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$new_libs $deplib" ;; esac ;; *) new_libs="$new_libs $deplib" ;; esac done deplibs="$new_libs" # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test "$build_libtool_libs" = yes; then if test "$hardcode_into_libs" = yes; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath="$finalize_rpath" test "$mode" != relink && rpath="$compile_rpath$rpath" for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" dep_rpath="$dep_rpath $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) perm_rpath="$perm_rpath $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" if test -n "$hardcode_libdir_flag_spec_ld"; then eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" else eval dep_rpath=\"$hardcode_libdir_flag_spec\" fi fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do rpath="$rpath$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath="$finalize_shlibpath" test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname="$1" shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname="$realname" fi if test -z "$dlname"; then dlname=$soname fi lib="$output_objdir/$realname" linknames= for link do linknames="$linknames $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols="$output_objdir/$libname.uexp" delfiles="$delfiles $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile if test "x`$SED 1q $export_symbols`" != xEXPORTS; then # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols="$export_symbols" export_symbols= always_export_symbols=yes fi fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" func_len " $cmd" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then func_show_eval "$cmd" 'exit $?' skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS="$save_ifs" if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' fi if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) tmp_deplibs="$tmp_deplibs $test_deplib" ;; esac done deplibs="$tmp_deplibs" if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test "$compiler_needs_object" = yes && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $convenience libobjs="$libobjs $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" linker_flags="$linker_flags $flag" fi # Make a backup of the uninstalled library when relinking if test "$mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test "X$skipped_export" != "X:" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output output_la=`$ECHO "X$output" | $Xsed -e "$basename"` # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then output=${output_objdir}/${output_la}.lnkscript func_verbose "creating GNU ld script: $output" $ECHO 'INPUT (' > $output for obj in $save_libobjs do $ECHO "$obj" >> $output done $ECHO ')' >> $output delfiles="$delfiles $output" elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then output=${output_objdir}/${output_la}.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test "$compiler_needs_object" = yes; then firstobj="$1 " shift fi for obj do $ECHO "$obj" >> $output done delfiles="$delfiles $output" output=$firstobj\"$file_list_spec$output\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-${k}.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test "X$objlist" = X || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test "$k" -eq 1 ; then # The first file doesn't have a previous command to add. eval concat_cmds=\"$reload_cmds $objlist $last_robj\" else # All subsequent reloadable object files will link in # the last one created. eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-${k}.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-${k}.$objext objlist=$obj func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" if test -n "$last_robj"; then eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" fi delfiles="$delfiles $output" else output= fi if ${skipped_export-false}; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi fi test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs="$IFS"; IFS='~' for cmd in $concat_cmds; do IFS="$save_ifs" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi if ${skipped_export-false}; then if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi fi libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $dlprefiles libobjs="$libobjs $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" # Restore the uninstalled library and exit if test "$mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test "$module" = yes || test "$export_dynamic" = yes; then # On all known operating systems, these are identical. dlname="$soname" fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "\`-R' is ignored for objects" test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for objects" test -n "$release" && \ func_warning "\`-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object \`$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj="$output" ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # reload_cmds runs $LD directly, so let us get rid of # -Wl from whole_archive_flag_spec and hope we can get by with # turning comma into space.. wl= if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'` else gentop="$output_objdir/${obj}x" generated="$generated $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # Create the old-style object. reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test output="$obj" func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi if test "$build_libtool_libs" != yes; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS fi if test -n "$pic_flag" || test "$pic_mode" != default; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output="$libobj" func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for programs" test -n "$release" && \ func_warning "\`-release' is ignored for programs" test "$preload" = yes \ && test "$dlopen_support" = unknown \ && test "$dlopen_self" = unknown \ && test "$dlopen_self_static" = unknown && \ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test "$tagname" = CXX ; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) compile_command="$compile_command ${wl}-bind_at_load" finalize_command="$finalize_command ${wl}-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) new_libs="$new_libs -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$new_libs $deplib" ;; esac ;; *) new_libs="$new_libs $deplib" ;; esac done compile_deplibs="$new_libs" compile_command="$compile_command $compile_deplibs" finalize_command="$finalize_command $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) finalize_rpath="$finalize_rpath $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" rpath="$rpath $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) perm_rpath="$perm_rpath $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) dllsearchpath="$dllsearchpath:$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) dllsearchpath="$dllsearchpath:$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath="$rpath" rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" rpath="$rpath $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath="$rpath" if test -n "$libobjs" && test "$build_old_libs" = yes; then # Transform all the library objects into standard objects. compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" "no" # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=yes case $host in *cygwin* | *mingw* ) if test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; *cegcc) # Disable wrappers for cegcc, we are cross compiling anyway. wrappers_required=no ;; *) if test "$need_relink" = no || test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; esac if test "$wrappers_required" = no; then # Replace the output file specification. compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` link_command="$compile_command$compile_rpath" # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' # Delete the generated files. if test -f "$output_objdir/${outputname}S.${objext}"; then func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' fi exit $exit_status fi if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do rpath="$rpath$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do rpath="$rpath$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test "$no_install" = yes; then # We don't need to create a wrapper script. link_command="$compile_var$compile_command$compile_rpath" # Replace the output file specification. link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' exit $EXIT_SUCCESS fi if test "$hardcode_action" = relink; then # Fast installation is not supported link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" func_warning "this platform does not like uninstalled shared libraries" func_warning "\`$output' will be relinked during installation" else if test "$fast_install" != no; then link_command="$finalize_var$compile_command$finalize_rpath" if test "$fast_install" = yes; then relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` else # fast_install is set to needless relink_command= fi else link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" fi fi # Replace the output file specification. link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` fi # Quote $ECHO for shipping. if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then case $progpath in [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; esac qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"` else qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource="$output_path/$objdir/lt-$output_name.c" cwrapper="$output_path/$output_name.exe" $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host" ; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do if test "$build_libtool_libs" = convenience; then oldobjs="$libobjs_save $symfileobj" addlibs="$convenience" build_libtool_libs=no else if test "$build_libtool_libs" = module; then oldobjs="$libobjs_save" build_libtool_libs=no else oldobjs="$old_deplibs $non_pic_objects" if test "$preload" = yes && test -f "$symfileobj"; then oldobjs="$oldobjs $symfileobj" fi fi addlibs="$old_convenience" fi if test -n "$addlibs"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $addlibs oldobjs="$oldobjs $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_extract_archives $gentop $dlprefiles oldobjs="$oldobjs $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else $ECHO "copying selected object files to avoid basename conflicts..." gentop="$output_objdir/${outputname}x" generated="$generated $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase="$func_basename_result" case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" oldobjs="$oldobjs $gentop/$newobj" ;; *) oldobjs="$oldobjs $obj" ;; esac done fi eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj" ; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test "X$oldobjs" = "X" ; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test "$build_old_libs" = yes && old_library="$libname.$libext" func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` if test "$hardcode_automatic" = yes ; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test "$installed" = yes; then if test -z "$install_libdir"; then break fi output="$output_objdir/$outputname"i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" newdependency_libs="$newdependency_libs $libdir/$name" ;; *) newdependency_libs="$newdependency_libs $deplib" ;; esac done dependency_libs="$newdependency_libs" newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" newdlfiles="$newdlfiles $libdir/$name" ;; *) newdlfiles="$newdlfiles $lib" ;; esac done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" newdlprefiles="$newdlprefiles $libdir/$name" ;; esac done dlprefiles="$newdlprefiles" else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac newdlfiles="$newdlfiles $abs" done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac newdlprefiles="$newdlprefiles $abs" done dlprefiles="$newdlprefiles" fi $RM $output # place dlname in correct position for cygwin tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that can not go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test "$installed" = no && test "$need_relink" = yes; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } { test "$mode" = link || test "$mode" = relink; } && func_mode_link ${1+"$@"} # func_mode_uninstall arg... func_mode_uninstall () { $opt_debug RM="$nonopt" files= rmforce= exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" for arg do case $arg in -f) RM="$RM $arg"; rmforce=yes ;; -*) RM="$RM $arg" ;; *) files="$files $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= origobjdir="$objdir" for file in $files; do func_dirname "$file" "" "." dir="$func_dirname_result" if test "X$dir" = X.; then objdir="$origobjdir" else objdir="$dir/$origobjdir" fi func_basename "$file" name="$func_basename_result" test "$mode" = uninstall && objdir="$dir" # Remember objdir for removal later, being careful to avoid duplicates if test "$mode" = clean; then case " $rmdirs " in *" $objdir "*) ;; *) rmdirs="$rmdirs $objdir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif test "$rmforce" = yes; then continue fi rmfiles="$file" case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do rmfiles="$rmfiles $objdir/$n" done test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" case "$mode" in clean) case " $library_names " in # " " in the beginning catches empty $dlname *" $dlname "*) ;; *) rmfiles="$rmfiles $objdir/$dlname" ;; esac test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test "$pic_object" != none; then rmfiles="$rmfiles $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test "$non_pic_object" != none; then rmfiles="$rmfiles $dir/$non_pic_object" fi fi ;; *) if test "$mode" = clean ; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe rmfiles="$rmfiles $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result rmfiles="$rmfiles $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" if test "$fast_install" = yes && test -n "$relink_command"; then rmfiles="$rmfiles $objdir/lt-$name" fi if test "X$noexename" != "X$name" ; then rmfiles="$rmfiles $objdir/lt-${noexename}.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done objdir="$origobjdir" # Try to remove the ${objdir}s in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } { test "$mode" = uninstall || test "$mode" = clean; } && func_mode_uninstall ${1+"$@"} test -z "$mode" && { help="$generic_help" func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode \`$mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: # vi:sw=2 pyparted-3.6/include/0000775000076400007640000000000011542323614011651 500000000000000pyparted-3.6/include/Makefile.in0000664000076400007640000004415111542323606013644 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted include subdirectory # # Copyright (C) 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = include DIST_COMMON = $(noinst_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive HEADERS = $(noinst_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = docstrings typeobjects noinst_HEADERS = convert.h exceptions.h _pedmodule.h pyconstraint.h \ pydevice.h pydisk.h pyfilesys.h pygeom.h pynatmath.h \ pytimer.h pyunit.h MAINTAINERCLEANFILES = Makefile.in all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign include/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(HEADERS) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am check check-am clean clean-generic clean-libtool \ ctags ctags-recursive distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/include/pynatmath.h0000664000076400007640000000506411170723402013750 00000000000000/* * pynatmath.h * pyparted type definitions for pynatmath.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef PYNATMATH_H_INCLUDED #define PYNATMATH_H_INCLUDED #include #include /* 1:1 function mappings for natmath.h in libparted */ PyObject *py_ped_alignment_duplicate(PyObject *, PyObject *); PyObject *py_ped_alignment_intersect(PyObject *, PyObject *); PyObject *py_ped_alignment_align_up(PyObject *, PyObject *); PyObject *py_ped_alignment_align_down(PyObject *, PyObject *); PyObject *py_ped_alignment_align_nearest(PyObject *, PyObject *); PyObject *py_ped_alignment_is_aligned(PyObject *, PyObject *); /* _ped.Alignment type is the Python equivalent of PedAlignment in libparted */ typedef struct { PyObject_HEAD /* PedAlignment members */ long long offset; /* PedSector */ long long grain_size; /* PedSector */ } _ped_Alignment; void _ped_Alignment_dealloc(_ped_Alignment *); int _ped_Alignment_compare(_ped_Alignment *, PyObject *); PyObject *_ped_Alignment_richcompare(_ped_Alignment *, PyObject *, int); PyObject *_ped_Alignment_str(_ped_Alignment *); int _ped_Alignment_traverse(_ped_Alignment *, visitproc, void *); int _ped_Alignment_clear(_ped_Alignment *); int _ped_Alignment_init(_ped_Alignment *, PyObject *, PyObject *); PyObject *_ped_Alignment_get(_ped_Alignment *, void *); int _ped_Alignment_set(_ped_Alignment *, PyObject *, void *); extern PyTypeObject _ped_Alignment_Type_obj; #endif /* PYNATMATH_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pydisk.h0000664000076400007640000001573611343033343013255 00000000000000/* * pydisk.h * pyparted type definitions for pydisk.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef PYDISK_H_INCLUDED #define PYDISK_H_INCLUDED #include #include /* _ped.Partition type is the Python equivalent of PedPartition * in libparted */ typedef struct { PyObject_HEAD /* PedPartition members */ PyObject *disk; /* _ped.Disk */ PyObject *geom; /* _ped.Geometry */ int type; /* PedPartitionType */ PyObject *fs_type; /* _ped.FileSystemType */ /* store the PedPartition from libparted */ PedPartition *ped_partition; int _owned; /* Belongs to a Disk or not */ } _ped_Partition; void _ped_Partition_dealloc(_ped_Partition *); int _ped_Partition_compare(_ped_Partition *, PyObject *); PyObject *_ped_Partition_richcompare(_ped_Partition *, PyObject *, int); PyObject *_ped_Partition_str(_ped_Partition *); int _ped_Partition_traverse(_ped_Partition *, visitproc, void *); int _ped_Partition_clear(_ped_Partition *); int _ped_Partition_init(_ped_Partition *, PyObject *, PyObject *); PyObject *_ped_Partition_get(_ped_Partition *, void *); int _ped_Partition_set(_ped_Partition *, PyObject *, void *); extern PyTypeObject _ped_Partition_Type_obj; /* _ped.Disk type is the Python equivalent of PedDisk in libparted */ typedef struct { PyObject_HEAD /* PedDisk members */ PyObject *dev; /* _ped.Device */ PyObject *type; /* _ped.DiskType */ /* store the PedDisk from libparted */ PedDisk *ped_disk; } _ped_Disk; void _ped_Disk_dealloc(_ped_Disk *); int _ped_Disk_compare(_ped_Disk *, PyObject *); PyObject *_ped_Disk_richcompare(_ped_Disk *, PyObject *, int); PyObject *_ped_Disk_str(_ped_Disk *); int _ped_Disk_traverse(_ped_Disk *, visitproc, void *); int _ped_Disk_clear(_ped_Disk *); int _ped_Disk_init(_ped_Disk *, PyObject *, PyObject *); extern PyTypeObject _ped_Disk_Type_obj; /* _ped.DiskType type is the Python equivalent of PedDiskType in libparted */ typedef struct { PyObject_HEAD /* PedDiskType members */ char *name; long long features; /* PedDiskTypeFeature */ } _ped_DiskType; void _ped_DiskType_dealloc(_ped_DiskType *); int _ped_DiskType_compare(_ped_DiskType *, PyObject *); PyObject *_ped_DiskType_richcompare(_ped_DiskType *, PyObject *, int); PyObject *_ped_DiskType_str(_ped_DiskType *); int _ped_DiskType_traverse(_ped_DiskType *, visitproc, void *); int _ped_DiskType_clear(_ped_DiskType *); PyObject *_ped_DiskType_get(_ped_DiskType *, void *); extern PyTypeObject _ped_DiskType_Type_obj; /* 1:1 function mappings for disk.h in libparted */ PyObject *py_ped_disk_type_get_next(PyObject *, PyObject *); PyObject *py_ped_disk_type_get(PyObject *, PyObject *); PyObject *py_ped_disk_type_check_feature(PyObject *, PyObject *); PyObject *py_ped_disk_clobber(PyObject *, PyObject *); PyObject *py_ped_disk_duplicate(PyObject *, PyObject *); PyObject *py_ped_disk_destroy(PyObject *, PyObject *); PyObject *py_ped_disk_commit(PyObject *, PyObject *); PyObject *py_ped_disk_commit_to_dev(PyObject *, PyObject *); PyObject *py_ped_disk_commit_to_os(PyObject *, PyObject *); PyObject *py_ped_disk_check(PyObject *, PyObject *); PyObject *py_ped_disk_print(PyObject *, PyObject *); PyObject *py_ped_disk_get_primary_partition_count(PyObject *, PyObject *); PyObject *py_ped_disk_get_last_partition_num(PyObject *, PyObject *); PyObject *py_ped_disk_get_max_primary_partition_count(PyObject *, PyObject *); PyObject *py_ped_disk_get_max_supported_partition_count(PyObject *, PyObject *); PyObject *py_ped_disk_get_partition_alignment(PyObject *, PyObject *); PyObject *py_ped_disk_max_partition_length(PyObject *, PyObject *); PyObject *py_ped_disk_max_partition_start_sector(PyObject *, PyObject *); PyObject *py_ped_disk_set_flag(PyObject *, PyObject *); PyObject *py_ped_disk_get_flag(PyObject *, PyObject *); PyObject *py_ped_disk_is_flag_available(PyObject *, PyObject *); PyObject *py_ped_disk_flag_get_name(PyObject *, PyObject *); PyObject *py_ped_disk_flag_get_by_name(PyObject *, PyObject *); PyObject *py_ped_disk_flag_next(PyObject *, PyObject *); PyObject *py_ped_partition_destroy(_ped_Partition *, PyObject *); PyObject *py_ped_partition_is_active(_ped_Partition *, PyObject *); PyObject *py_ped_partition_set_flag(_ped_Partition *, PyObject *); PyObject *py_ped_partition_get_flag(_ped_Partition *, PyObject *); PyObject *py_ped_partition_is_flag_available(_ped_Partition *, PyObject *); PyObject *py_ped_partition_set_system(_ped_Partition *, PyObject *); PyObject *py_ped_partition_set_name(_ped_Partition *, PyObject *); PyObject *py_ped_partition_get_name(_ped_Partition *, PyObject *); PyObject *py_ped_partition_is_busy(_ped_Partition *, PyObject *); PyObject *py_ped_partition_get_path(_ped_Partition *, PyObject *); PyObject *py_ped_partition_type_get_name(PyObject *, PyObject *); PyObject *py_ped_partition_flag_get_name(PyObject *, PyObject *); PyObject *py_ped_partition_flag_get_by_name(PyObject *, PyObject *); PyObject *py_ped_partition_flag_next(PyObject *, PyObject *); PyObject *py_ped_disk_add_partition(PyObject *, PyObject *); PyObject *py_ped_disk_remove_partition(PyObject *, PyObject *); PyObject *py_ped_disk_delete_partition(PyObject *, PyObject *); PyObject *py_ped_disk_delete_all(PyObject *, PyObject *); PyObject *py_ped_disk_set_partition_geom(PyObject *, PyObject *); PyObject *py_ped_disk_maximize_partition(PyObject *, PyObject *); PyObject *py_ped_disk_get_max_partition_geometry(PyObject *, PyObject *); PyObject *py_ped_disk_minimize_extended_partition(PyObject *, PyObject *); PyObject *py_ped_disk_next_partition(PyObject *, PyObject *); PyObject *py_ped_disk_get_partition(PyObject *, PyObject *); PyObject *py_ped_disk_get_partition_by_sector(PyObject *, PyObject *); PyObject *py_ped_disk_extended_partition(PyObject *, PyObject *); PyObject *py_ped_disk_new_fresh(PyObject *, PyObject *); #endif /* PYDISK_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pyfilesys.h0000664000076400007640000000754411170723402013777 00000000000000/* * pyfilesys.h * pyparted type definitions for pyfilesys.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef PYFILESYS_H_INCLUDED #define PYFILESYS_H_INCLUDED #include #include /* These functions need to be added to libparted. Remove when that's done. */ #define ped_file_system_destroy(fs) /* 1:1 function mappings for filesys.h in libparted */ PyObject *py_ped_file_system_type_get(PyObject *, PyObject *); PyObject *py_ped_file_system_type_get_next(PyObject *, PyObject *); PyObject *py_ped_file_system_probe_specific(PyObject *, PyObject *); PyObject *py_ped_file_system_probe(PyObject *, PyObject *); PyObject *py_ped_file_system_clobber(PyObject *, PyObject *); PyObject *py_ped_file_system_open(PyObject *, PyObject *); PyObject *py_ped_file_system_create(PyObject *, PyObject *); PyObject *py_ped_file_system_close(PyObject *, PyObject *); PyObject *py_ped_file_system_check(PyObject *, PyObject *); PyObject *py_ped_file_system_copy(PyObject *, PyObject *); PyObject *py_ped_file_system_resize(PyObject *, PyObject *); PyObject *py_ped_file_system_get_resize_constraint(PyObject *, PyObject *); /* _ped.FileSystemType type is the Python equivalent of PedFileSystemType * in libparted */ typedef struct { PyObject_HEAD /* PedFileSystemType members */ char *name; } _ped_FileSystemType; void _ped_FileSystemType_dealloc(_ped_FileSystemType *); int _ped_FileSystemType_compare(_ped_FileSystemType *, PyObject *); PyObject *_ped_FileSystemType_richcompare(_ped_FileSystemType *, PyObject *, int); PyObject *_ped_FileSystemType_str(_ped_FileSystemType *); int _ped_FileSystemType_traverse(_ped_FileSystemType *, visitproc, void *); int _ped_FileSystemType_clear(_ped_FileSystemType *); PyObject *_ped_FileSystemType_get(_ped_FileSystemType *, void *); extern PyTypeObject _ped_FileSystemType_Type_obj; /* _ped.FileSystem type is the Python equiv of PedFileSystem in libparted */ typedef struct { PyObject_HEAD /* PedFileSystem members */ PyObject *type; /* _ped.FileSystemType */ PyObject *geom; /* _ped.Geometry */ int checked; /* store the PedFileSystem from libparted */ PedFileSystem *ped_filesystem; } _ped_FileSystem; void _ped_FileSystem_dealloc(_ped_FileSystem *); int _ped_FileSystem_compare(_ped_FileSystem *, PyObject *); PyObject *_ped_FileSystem_richcompare(_ped_FileSystem *, PyObject *, int); PyObject *_ped_FileSystem_str(_ped_FileSystem *); int _ped_FileSystem_traverse(_ped_FileSystem *, visitproc, void *); int _ped_FileSystem_clear(_ped_FileSystem *); int _ped_FileSystem_init(_ped_FileSystem *, PyObject *, PyObject *); PyObject *_ped_FileSystem_get(_ped_FileSystem *, void *); extern PyTypeObject _ped_FileSystem_Type_obj; #endif /* PYFILESYS_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/exceptions.h0000664000076400007640000000331411415143007014117 00000000000000/* * exceptions.h * * Copyright (C) 2007 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): Chris Lumens */ #ifndef _EXCEPTIONS_H_INCLUDED #define _EXCEPTIONS_H_INCLUDED #include /* custom exceptions for _ped */ PyObject *AlignmentException; PyObject *CreateException; PyObject *ConstraintException; PyObject *DeviceException; PyObject *DiskException; PyObject *DiskLabelException; PyObject *FileSystemException; PyObject *GeometryException; PyObject *IOException; PyObject *NotNeededException; PyObject *PartedException; PyObject *PartitionException; PyObject *TimerException; PyObject *UnknownDeviceException; PyObject *UnknownTypeException; extern unsigned int partedExnRaised; extern char *partedExnMessage; #endif /* _EXCEPTIONS_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pydevice.h0000664000076400007640000001164611274123127013561 00000000000000/* * pydevice.h * pyparted type definitions for pydevice.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell */ #ifndef PYDEVICE_H_INCLUDED #define PYDEVICE_H_INCLUDED #include #include /* _ped.CHSGeometry type is the Python equiv of PedCHSGeometry in libparted */ typedef struct { PyObject_HEAD /* a PedCHSGeometry stores three ints */ int cylinders; int heads; int sectors; } _ped_CHSGeometry; void _ped_CHSGeometry_dealloc(_ped_CHSGeometry *); int _ped_CHSGeometry_compare(_ped_CHSGeometry *, PyObject *); PyObject *_ped_CHSGeometry_richcompare(_ped_CHSGeometry *, PyObject *, int); PyObject *_ped_CHSGeometry_str(_ped_CHSGeometry *); int _ped_CHSGeometry_traverse(_ped_CHSGeometry *, visitproc, void *); int _ped_CHSGeometry_clear(_ped_CHSGeometry *); PyObject *_ped_CHSGeometry_get(_ped_CHSGeometry *, void *); extern PyTypeObject _ped_CHSGeometry_Type_obj; /* _ped.Device type is the Python equivalent of PedDevice in libparted */ typedef struct { PyObject_HEAD /* a PedDevice is complex, we will store primitives when appropriate or * just other Python objects we've created for the typedefs in libparted */ char *model; char *path; long long type; long long sector_size; long long phys_sector_size; long long length; /* PedSector */ int open_count; int read_only; int external_mode; int dirty; int boot_dirty; PyObject *hw_geom; /* a _ped.CHSGeometry */ PyObject *bios_geom; /* a _ped.CHSGeometry */ short host; short did; } _ped_Device; void _ped_Device_dealloc(_ped_Device *); int _ped_Device_compare(_ped_Device *, PyObject *); PyObject *_ped_Device_richcompare(_ped_Device *, PyObject *, int); PyObject *_ped_Device_str(_ped_Device *); int _ped_Device_traverse(_ped_Device *, visitproc, void *); int _ped_Device_clear(_ped_Device *); PyObject *_ped_Device_get(_ped_Device *, void *); extern PyTypeObject _ped_Device_Type_obj; /* 1:1 function mappings for device.h in libparted */ PyObject *py_ped_disk_probe(PyObject *, PyObject *); PyObject *py_ped_device_probe_all(PyObject *, PyObject *); PyObject *py_ped_device_free_all(PyObject *, PyObject *); PyObject *py_ped_device_get(PyObject *, PyObject *); PyObject *py_ped_device_get_next(PyObject *, PyObject *); PyObject *py_ped_device_is_busy(PyObject *, PyObject *); PyObject *py_ped_device_open(PyObject *, PyObject *); PyObject *py_ped_device_close(PyObject *, PyObject *); PyObject *py_ped_device_destroy(PyObject *, PyObject *); PyObject *py_ped_device_cache_remove(PyObject *, PyObject *); PyObject *py_ped_device_begin_external_access(PyObject *, PyObject *); PyObject *py_ped_device_end_external_access(PyObject *, PyObject *); PyObject *py_ped_device_read(PyObject *, PyObject *); PyObject *py_ped_device_write(PyObject *, PyObject *); PyObject *py_ped_device_sync(PyObject *, PyObject *); PyObject *py_ped_device_sync_fast(PyObject *, PyObject *); PyObject *py_ped_device_check(PyObject *, PyObject *); PyObject *py_ped_device_get_constraint(PyObject *, PyObject *); PyObject *py_ped_device_get_minimal_aligned_constraint(PyObject *, PyObject *); PyObject *py_ped_device_get_optimal_aligned_constraint(PyObject *, PyObject *); PyObject *py_ped_device_get_minimum_alignment(PyObject *, PyObject *); PyObject *py_ped_device_get_optimum_alignment(PyObject *, PyObject *); PyObject *py_ped_file_system_get_create_constraint(PyObject *, PyObject *); PyObject *py_ped_file_system_get_copy_constraint(PyObject *, PyObject *); PyObject *py_ped_unit_get_size(PyObject *, PyObject *); PyObject *py_ped_unit_format_custom_byte(PyObject *, PyObject *); PyObject *py_ped_unit_format_byte(PyObject *, PyObject *); PyObject *py_ped_unit_format_custom(PyObject *, PyObject *); PyObject *py_ped_unit_format(PyObject *, PyObject *); PyObject *py_ped_unit_parse(PyObject *, PyObject *); PyObject *py_ped_unit_parse_custom(PyObject *, PyObject *); #endif /* PYDEVICE_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/_pedmodule.h0000664000076400007640000000256311170723402014061 00000000000000/* * _pedmodule.h * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell */ #ifndef _PARTEDMODULE_H_INCLUDED #define _PARTEDMODULE_H_INCLUDED #include extern PyObject *py_libparted_get_version(PyObject *, PyObject *); extern PyObject *py_pyparted_version(PyObject *, PyObject *); extern PyMODINIT_FUNC init_ped(void); #endif /* _PARTEDMODULE_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/Makefile.am0000664000076400007640000000244511151317256013633 00000000000000# # Makefile.am for pyparted include subdirectory # # Copyright (C) 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # SUBDIRS = docstrings typeobjects noinst_HEADERS = convert.h exceptions.h _pedmodule.h pyconstraint.h \ pydevice.h pydisk.h pyfilesys.h pygeom.h pynatmath.h \ pytimer.h pyunit.h MAINTAINERCLEANFILES = Makefile.in pyparted-3.6/include/typeobjects/0000775000076400007640000000000011542323614014204 500000000000000pyparted-3.6/include/typeobjects/Makefile.in0000664000076400007640000003134511542323606016200 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted include/typeobjects subdirectory # # Copyright (C) 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = include/typeobjects DIST_COMMON = $(noinst_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = HEADERS = $(noinst_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ noinst_HEADERS = pyconstraint.h pydevice.h pydisk.h pyfilesys.h \ pygeom.h pynatmath.h pytimer.h MAINTAINERCLEANFILES = Makefile.in all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/typeobjects/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign include/typeobjects/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(HEADERS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool ctags distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/include/typeobjects/pynatmath.h0000664000076400007640000001015111170723402016274 00000000000000/* * pynatmath.h * pyparted type objects for pynatmath.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYNATMATH_H_INCLUDED #define TYPEOBJECTS_PYNATMATH_H_INCLUDED #include #include /* _ped.Alignment type object */ static PyMemberDef _ped_Alignment_members[] = { {NULL} }; static PyMethodDef _ped_Alignment_methods[] = { {"duplicate", (PyCFunction) py_ped_alignment_duplicate, METH_VARARGS, alignment_duplicate_doc}, {"intersect", (PyCFunction) py_ped_alignment_intersect, METH_VARARGS, alignment_intersect_doc}, {"align_up", (PyCFunction) py_ped_alignment_align_up, METH_VARARGS, alignment_align_up_doc}, {"align_down", (PyCFunction) py_ped_alignment_align_down, METH_VARARGS, alignment_align_down_doc}, {"align_nearest", (PyCFunction) py_ped_alignment_align_nearest, METH_VARARGS, alignment_align_nearest_doc}, {"is_aligned", (PyCFunction) py_ped_alignment_is_aligned, METH_VARARGS, alignment_is_aligned_doc}, {NULL} }; static PyGetSetDef _ped_Alignment_getset[] = { {"offset", (getter) _ped_Alignment_get, (setter) _ped_Alignment_set, "Offset in sectors from the start of a _ped.Geometry.", "offset"}, {"grain_size", (getter) _ped_Alignment_get, (setter) _ped_Alignment_set, "Alignment grain_size", "grain_size"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Alignment_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Alignment", .tp_basicsize = sizeof(_ped_Alignment), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Alignment_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Alignment_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Alignment_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Alignment_doc, .tp_traverse = (traverseproc) _ped_Alignment_traverse, .tp_clear = (inquiry) _ped_Alignment_clear, .tp_richcompare = (richcmpfunc) _ped_Alignment_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Alignment_methods, .tp_members = _ped_Alignment_members, .tp_getset = _ped_Alignment_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Alignment_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYNATMATH_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/pydisk.h0000664000076400007640000003265411313012377015607 00000000000000/* * pydisk.h * pyparted type objects for pydisk.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYDISK_H_INCLUDED #define TYPEOBJECTS_PYDISK_H_INCLUDED #include #include /* _ped.Partition type object */ static PyMemberDef _ped_Partition_members[] = { {"disk", T_OBJECT, offsetof(_ped_Partition, disk), READONLY, "The _ped.Disk this Partition exists on."}, {"geom", T_OBJECT, offsetof(_ped_Partition, geom), READONLY, "A _ped.Geometry object describing the region this Partition occupies."}, {"fs_type", T_OBJECT, offsetof(_ped_Partition, fs_type), READONLY, "A _ped.FileSystemType object describing the filesystem on this Partition."}, {NULL} }; static PyMethodDef _ped_Partition_methods[] = { {"destroy", (PyCFunction) py_ped_partition_destroy, METH_VARARGS, partition_destroy_doc}, {"is_active", (PyCFunction) py_ped_partition_is_active, METH_VARARGS, partition_is_active_doc}, {"set_flag", (PyCFunction) py_ped_partition_set_flag, METH_VARARGS, partition_set_flag_doc}, {"get_flag", (PyCFunction) py_ped_partition_get_flag, METH_VARARGS, partition_get_flag_doc}, {"is_flag_available", (PyCFunction) py_ped_partition_is_flag_available, METH_VARARGS, partition_is_flag_available_doc}, {"set_system", (PyCFunction) py_ped_partition_set_system, METH_VARARGS, partition_set_system_doc}, {"set_name", (PyCFunction) py_ped_partition_set_name, METH_VARARGS, partition_set_name_doc}, {"get_name", (PyCFunction) py_ped_partition_get_name, METH_VARARGS, partition_get_name_doc}, {"is_busy", (PyCFunction) py_ped_partition_is_busy, METH_VARARGS, partition_is_busy_doc}, {"get_path", (PyCFunction) py_ped_partition_get_path, METH_VARARGS, partition_get_path_doc}, {NULL} }; static PyGetSetDef _ped_Partition_getset[] = { {"num", (getter) _ped_Partition_get, NULL, "The number of this Partition on self.disk.", "num"}, {"type", (getter) _ped_Partition_get, (setter) _ped_Partition_set, "PedPartition type", "type"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Partition_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Partition", .tp_basicsize = sizeof(_ped_Partition), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Partition_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Partition_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Partition_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Partition_doc, .tp_traverse = (traverseproc) _ped_Partition_traverse, .tp_clear = (inquiry) _ped_Partition_clear, .tp_richcompare = (richcmpfunc) _ped_Partition_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Partition_methods, .tp_members = _ped_Partition_members, .tp_getset = _ped_Partition_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Partition_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; /* _ped.Disk type object */ static PyMemberDef _ped_Disk_members[] = { {"dev", T_OBJECT, offsetof(_ped_Disk, dev), READONLY, "A _ped.Device object holding self's partition table."}, {"type", T_OBJECT, offsetof(_ped_Disk, type), READONLY, "The type of the disk label as a _ped.DiskType."}, {NULL} }; static PyMethodDef _ped_Disk_methods[] = { {"duplicate", (PyCFunction) py_ped_disk_duplicate, METH_VARARGS, disk_duplicate_doc}, {"destroy", (PyCFunction) py_ped_disk_destroy, METH_VARARGS, disk_destroy_doc}, {"commit", (PyCFunction) py_ped_disk_commit, METH_VARARGS, disk_commit_doc}, {"commit_to_dev", (PyCFunction) py_ped_disk_commit_to_dev, METH_VARARGS, disk_commit_to_dev_doc}, {"commit_to_os", (PyCFunction) py_ped_disk_commit_to_os, METH_VARARGS, disk_commit_to_os_doc}, {"check", (PyCFunction) py_ped_disk_check, METH_VARARGS, disk_check_doc}, {"print", (PyCFunction) py_ped_disk_print, METH_VARARGS, disk_print_doc}, {"get_primary_partition_count", (PyCFunction) py_ped_disk_get_primary_partition_count, METH_VARARGS, disk_get_primary_partition_count_doc}, {"get_last_partition_num", (PyCFunction) py_ped_disk_get_last_partition_num, METH_VARARGS, disk_get_last_partition_num_doc}, {"get_max_primary_partition_count", (PyCFunction) py_ped_disk_get_max_primary_partition_count, METH_VARARGS, disk_get_max_primary_partition_count_doc}, {"get_max_supported_partition_count", (PyCFunction) py_ped_disk_get_max_supported_partition_count, METH_VARARGS, disk_get_max_supported_partition_count_doc}, {"get_partition_alignment", (PyCFunction) py_ped_disk_get_partition_alignment, METH_NOARGS, disk_get_partition_alignment_doc}, {"max_partition_length", (PyCFunction) py_ped_disk_max_partition_length, METH_NOARGS, disk_max_partition_length_doc}, {"max_partition_start_sector", (PyCFunction) py_ped_disk_max_partition_start_sector, METH_NOARGS, disk_max_partition_start_sector_doc}, {"set_flag", (PyCFunction) py_ped_disk_set_flag, METH_VARARGS, disk_set_flag_doc}, {"get_flag", (PyCFunction) py_ped_disk_get_flag, METH_VARARGS, disk_get_flag_doc}, {"is_flag_available", (PyCFunction) py_ped_disk_is_flag_available, METH_VARARGS, disk_is_flag_available_doc}, {"add_partition", (PyCFunction) py_ped_disk_add_partition, METH_VARARGS, disk_add_partition_doc}, {"remove_partition", (PyCFunction) py_ped_disk_remove_partition, METH_VARARGS, disk_remove_partition_doc}, {"delete_partition", (PyCFunction) py_ped_disk_delete_partition, METH_VARARGS, disk_delete_partition_doc}, {"delete_all", (PyCFunction) py_ped_disk_delete_all, METH_VARARGS, disk_delete_all_doc}, {"set_partition_geom", (PyCFunction) py_ped_disk_set_partition_geom, METH_VARARGS, disk_set_partition_geom_doc}, {"maximize_partition", (PyCFunction) py_ped_disk_maximize_partition, METH_VARARGS, disk_maximize_partition_doc}, {"get_max_partition_geometry", (PyCFunction) py_ped_disk_get_max_partition_geometry, METH_VARARGS, disk_get_max_partition_geometry_doc}, {"minimize_extended_partition", (PyCFunction) py_ped_disk_minimize_extended_partition, METH_VARARGS, disk_minimize_extended_partition_doc}, {"next_partition", (PyCFunction) py_ped_disk_next_partition, METH_VARARGS, disk_next_partition_doc}, {"get_partition", (PyCFunction) py_ped_disk_get_partition, METH_VARARGS, disk_get_partition_doc}, {"get_partition_by_sector", (PyCFunction) py_ped_disk_get_partition_by_sector, METH_VARARGS, disk_get_partition_by_sector_doc}, {"extended_partition", (PyCFunction) py_ped_disk_extended_partition, METH_VARARGS, disk_extended_partition_doc}, {NULL} }; static PyGetSetDef _ped_Disk_getset[] = { {NULL} /* Sentinel */ }; PyTypeObject _ped_Disk_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Disk", .tp_basicsize = sizeof(_ped_Disk), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Disk_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Disk_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Disk_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Disk_doc, .tp_traverse = (traverseproc) _ped_Disk_traverse, .tp_clear = (inquiry) _ped_Disk_clear, .tp_richcompare = (richcmpfunc) _ped_Disk_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Disk_methods, .tp_members = _ped_Disk_members, .tp_getset = _ped_Disk_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Disk_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; /* _ped.DiskType type object */ static PyMemberDef _ped_DiskType_members[] = { {NULL} }; static PyMethodDef _ped_DiskType_methods[] = { {"check_feature", (PyCFunction) py_ped_disk_type_check_feature, METH_VARARGS, disk_type_check_feature_doc}, {NULL} }; static PyGetSetDef _ped_DiskType_getset[] = { {"name", (getter) _ped_DiskType_get, NULL, "The name of the partition table type.", "name"}, {"features", (getter) _ped_DiskType_get, NULL, "A bitmask of features supported by this DiskType.", "features"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_DiskType_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.DiskType", .tp_basicsize = sizeof(_ped_DiskType), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_DiskType_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_DiskType_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_DiskType_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_DiskType_doc, .tp_traverse = (traverseproc) _ped_DiskType_traverse, .tp_clear = (inquiry) _ped_DiskType_clear, .tp_richcompare = (richcmpfunc) _ped_DiskType_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_DiskType_methods, .tp_members = _ped_DiskType_members, .tp_getset = _ped_DiskType_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = NULL, .tp_alloc = PyType_GenericAlloc, .tp_new = NULL, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYDISK_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/pyfilesys.h0000664000076400007640000001467411170723402016334 00000000000000/* * pyfilesys.h * pyparted type objects for pyfilesys.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYFILESYS_H_INCLUDED #define TYPEOBJECTS_PYFILESYS_H_INCLUDED #include #include /* _ped.FileSystemType type object */ static PyMemberDef _ped_FileSystemType_members[] = { {NULL} }; static PyMethodDef _ped_FileSystemType_methods[] = { {NULL} }; static PyGetSetDef _ped_FileSystemType_getset[] = { {"name", (getter) _ped_FileSystemType_get, NULL, "The name of the FileSystemType.", "name"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_FileSystemType_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.FileSystemType", .tp_basicsize = sizeof(_ped_FileSystemType), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_FileSystemType_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_FileSystemType_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_FileSystemType_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_FileSystemType_doc, .tp_traverse = (traverseproc) _ped_FileSystemType_traverse, .tp_clear = (inquiry) _ped_FileSystemType_clear, .tp_richcompare = (richcmpfunc) _ped_FileSystemType_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_FileSystemType_methods, .tp_members = _ped_FileSystemType_members, .tp_getset = _ped_FileSystemType_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = NULL, .tp_alloc = PyType_GenericAlloc, .tp_new = NULL, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; /* _ped.FileSystem type object */ static PyMemberDef _ped_FileSystem_members[] = { {"type", T_OBJECT, offsetof(_ped_FileSystem, type), READONLY, "A _ped.FileSystemType object describing the filesystem on self.geom."}, {"geom", T_OBJECT, offsetof(_ped_FileSystem, geom), READONLY, "The on-disk region where this FileSystem object exists."}, {NULL} }; static PyMethodDef _ped_FileSystem_methods[] = { {"clobber", (PyCFunction) py_ped_file_system_clobber, METH_VARARGS, file_system_clobber_doc}, {"open", (PyCFunction) py_ped_file_system_open, METH_VARARGS, file_system_open_doc}, {"create", (PyCFunction) py_ped_file_system_create, METH_VARARGS, file_system_create_doc}, {"close", (PyCFunction) py_ped_file_system_close, METH_VARARGS, file_system_close_doc}, {"check", (PyCFunction) py_ped_file_system_check, METH_VARARGS, file_system_check_doc}, {"copy", (PyCFunction) py_ped_file_system_copy, METH_VARARGS, file_system_copy_doc}, {"resize", (PyCFunction) py_ped_file_system_resize, METH_VARARGS, file_system_resize_doc}, {"get_resize_constraint", (PyCFunction) py_ped_file_system_get_resize_constraint, METH_VARARGS, file_system_get_resize_constraint_doc}, {NULL} }; static PyGetSetDef _ped_FileSystem_getset[] = { {"checked", (getter) _ped_FileSystem_get, NULL, "Has the filesystem been checked prior to calling copy or resize?", "checked"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_FileSystem_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.FileSystem", .tp_basicsize = sizeof(_ped_FileSystem), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_FileSystem_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_FileSystemType_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_FileSystem_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_FileSystem_doc, .tp_traverse = (traverseproc) _ped_FileSystem_traverse, .tp_clear = (inquiry) _ped_FileSystem_clear, .tp_richcompare = (richcmpfunc) _ped_FileSystem_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_FileSystem_methods, .tp_members = _ped_FileSystem_members, .tp_getset = _ped_FileSystem_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_FileSystem_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYFILESYS_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/pydevice.h0000664000076400007640000002602011323155321016100 00000000000000/* * pydevice.h * pyparted type objects for pydevice.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYDEVICE_H_INCLUDED #define TYPEOBJECTS_PYDEVICE_H_INCLUDED #include #include /* _ped.CHSGeometry type object */ static PyMemberDef _ped_CHSGeometry_members[] = { {NULL} }; static PyMethodDef _ped_CHSGeometry_methods[] = { {NULL} }; static PyGetSetDef _ped_CHSGeometry_getset[] = { {"cylinders", (getter) _ped_CHSGeometry_get, NULL, "The number of cylinders.", "cylinders"}, {"heads", (getter) _ped_CHSGeometry_get, NULL, "The number of heads", "heads"}, {"sectors", (getter) _ped_CHSGeometry_get, NULL, "The number of sectors", "sectors"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_CHSGeometry_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.CHSGeometry", .tp_basicsize = sizeof(_ped_CHSGeometry), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_CHSGeometry_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_CHSGeometry_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_CHSGeometry_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_CHSGeometry_doc, .tp_traverse = (traverseproc) _ped_CHSGeometry_traverse, .tp_clear = (inquiry) _ped_CHSGeometry_clear, .tp_richcompare = (richcmpfunc) _ped_CHSGeometry_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_CHSGeometry_methods, .tp_members = _ped_CHSGeometry_members, .tp_getset = _ped_CHSGeometry_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = NULL, .tp_alloc = PyType_GenericAlloc, .tp_new = NULL, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; /* _ped.Device type object */ static PyMemberDef _ped_Device_members[] = { {"hw_geom", T_OBJECT, offsetof(_ped_Device, hw_geom), READONLY, "The CHSGeometry of the Device as reported by the hardware."}, {"bios_geom", T_OBJECT, offsetof(_ped_Device, bios_geom), READONLY, "The CHSGeometry of the Device as reported by the BIOS."}, {NULL} }; static PyMethodDef _ped_Device_methods[] = { /* * This is a unique function as it's in pydisk.c, but is really * a method on _ped.Device, so it's part of this PyMethod Def */ {"disk_probe", (PyCFunction) py_ped_disk_probe, METH_VARARGS, disk_probe_doc}, /* These functions are all in pydevice.c */ {"is_busy", (PyCFunction) py_ped_device_is_busy, METH_VARARGS, device_is_busy_doc}, {"open", (PyCFunction) py_ped_device_open, METH_VARARGS, device_open_doc}, {"close", (PyCFunction) py_ped_device_close, METH_VARARGS, device_close_doc}, {"destroy", (PyCFunction) py_ped_device_destroy, METH_VARARGS, device_destroy_doc}, {"cache_remove", (PyCFunction) py_ped_device_cache_remove, METH_VARARGS, device_cache_remove_doc}, {"begin_external_access", (PyCFunction) py_ped_device_begin_external_access, METH_VARARGS, device_begin_external_access_doc}, {"end_external_access", (PyCFunction) py_ped_device_end_external_access, METH_VARARGS, device_end_external_access_doc}, {"read", (PyCFunction) py_ped_device_read, METH_VARARGS, device_read_doc}, {"write", (PyCFunction) py_ped_device_write, METH_VARARGS, device_write_doc}, {"sync", (PyCFunction) py_ped_device_sync, METH_VARARGS, device_sync_doc}, {"sync_fast", (PyCFunction) py_ped_device_sync_fast, METH_VARARGS, device_sync_fast_doc}, {"check", (PyCFunction) py_ped_device_check, METH_VARARGS, device_check_doc}, {"get_constraint", (PyCFunction) py_ped_device_get_constraint, METH_VARARGS, device_get_constraint_doc}, {"get_minimal_aligned_constraint", (PyCFunction) py_ped_device_get_minimal_aligned_constraint, METH_NOARGS, device_get_minimal_aligned_constraint_doc}, {"get_optimal_aligned_constraint", (PyCFunction) py_ped_device_get_optimal_aligned_constraint, METH_NOARGS, device_get_optimal_aligned_constraint_doc}, {"get_minimum_alignment", (PyCFunction) py_ped_device_get_minimum_alignment, METH_NOARGS, device_get_minimum_alignment_doc}, {"get_optimum_alignment", (PyCFunction) py_ped_device_get_optimum_alignment, METH_NOARGS, device_get_optimum_alignment_doc}, /* * These functions are in pydisk.c, but they work best as * methods on a _ped.Device. */ {"clobber", (PyCFunction) py_ped_disk_clobber, METH_VARARGS, disk_clobber_doc}, /* * These functions are in pyfilesys.c, but they work best * as methods on a _ped.Device */ {"get_create_constraint", (PyCFunction) py_ped_file_system_get_create_constraint, METH_VARARGS, file_system_get_create_constraint_doc}, {"get_copy_constraint", (PyCFunction) py_ped_file_system_get_copy_constraint, METH_VARARGS, file_system_get_copy_constraint_doc}, /* * These functions are in pyunit.c, but they work best as methods * on a _ped.Device */ {"unit_get_size", (PyCFunction) py_ped_unit_get_size, METH_VARARGS, unit_get_size_doc}, {"unit_format_custom_byte", (PyCFunction) py_ped_unit_format_custom_byte, METH_VARARGS, unit_format_custom_byte_doc}, {"unit_format_byte", (PyCFunction) py_ped_unit_format_byte, METH_VARARGS, unit_format_byte_doc}, {"unit_format_custom", (PyCFunction) py_ped_unit_format_custom, METH_VARARGS, unit_format_custom_doc}, {"unit_format", (PyCFunction) py_ped_unit_format, METH_VARARGS, unit_format_doc}, {"unit_parse", (PyCFunction) py_ped_unit_parse, METH_VARARGS, unit_parse_doc}, {"unit_parse_custom", (PyCFunction) py_ped_unit_parse_custom, METH_VARARGS, unit_parse_custom_doc}, {NULL} }; static PyGetSetDef _ped_Device_getset[] = { {"model", (getter) _ped_Device_get, NULL, "A brief description of the hardware, usually mfr and model.", "model"}, {"path", (getter) _ped_Device_get, NULL, "The operating system level path to the device node.", "path"}, {"type", (getter) _ped_Device_get, NULL, "The type of device, deprecated in favor of PedDeviceType", "type"}, {"sector_size", (getter) _ped_Device_get, NULL, "Logical sector size.", "sector_size"}, {"phys_sector_size", (getter) _ped_Device_get, NULL, "Physical sector size.", "phys_sector_size"}, {"length", (getter) _ped_Device_get, NULL, "Device length, in sectors (LBA).", "length"}, {"open_count", (getter) _ped_Device_get, NULL, "How many times self.open() has been called.", "open_count"}, {"read_only", (getter) _ped_Device_get, NULL, "Is the device opened in read-only mode?", "read_only"}, {"external_mode", (getter) _ped_Device_get, NULL, "PedDevice external_mode", "external_mode"}, {"dirty", (getter) _ped_Device_get, NULL, "Have any unflushed changes been made to self?", "dirty"}, {"boot_dirty", (getter) _ped_Device_get, NULL, "Have any unflushed changes been made to the bootloader?", "boot_dirty"}, {"host", (getter) _ped_Device_get, NULL, "Any SCSI host ID associated with self.", "host"}, {"did", (getter) _ped_Device_get, NULL, "Any SCSI device ID associated with self.", "did"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Device_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Device", .tp_basicsize = PyGC_HEAD_SIZE + sizeof(_ped_Device), .tp_itemsize = 0, .tp_dealloc = (destructor) _ped_Device_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Device_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Device_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Device_doc, .tp_traverse = (traverseproc) _ped_Device_traverse, .tp_clear = (inquiry) _ped_Device_clear, .tp_richcompare = (richcmpfunc) _ped_Device_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Device_methods, .tp_members = _ped_Device_members, .tp_getset = _ped_Device_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = NULL, .tp_alloc = PyType_GenericAlloc, .tp_new = NULL, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYDEVICE_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/Makefile.am0000664000076400007640000000231711151317256016164 00000000000000# # Makefile.am for pyparted include/typeobjects subdirectory # # Copyright (C) 2008 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # noinst_HEADERS = pyconstraint.h pydevice.h pydisk.h pyfilesys.h \ pygeom.h pynatmath.h pytimer.h MAINTAINERCLEANFILES = Makefile.in pyparted-3.6/include/typeobjects/pygeom.h0000664000076400007640000001242211170723402015572 00000000000000/* * pygeom.h * pyparted type objects for pygeom.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYGEOM_H_INCLUDED #define TYPEOBJECTS_PYGEOM_H_INCLUDED #include #include /* _ped.Geometry type object */ static PyMemberDef _ped_Geometry_members[] = { {"dev", T_OBJECT, offsetof(_ped_Geometry, dev), READONLY, "The _ped.Device described by this _ped.Geometry object."}, {NULL} }; static PyMethodDef _ped_Geometry_methods[] = { {"duplicate", (PyCFunction) py_ped_geometry_duplicate, METH_VARARGS, geometry_duplicate_doc}, {"intersect", (PyCFunction) py_ped_geometry_intersect, METH_VARARGS, geometry_intersect_doc}, {"set", (PyCFunction) py_ped_geometry_set, METH_VARARGS, geometry_set_doc}, {"set_start", (PyCFunction) py_ped_geometry_set_start, METH_VARARGS, geometry_set_start_doc}, {"set_end", (PyCFunction) py_ped_geometry_set_end, METH_VARARGS, geometry_set_end_doc}, {"test_overlap", (PyCFunction) py_ped_geometry_test_overlap, METH_VARARGS, geometry_test_overlap_doc}, {"test_inside", (PyCFunction) py_ped_geometry_test_inside, METH_VARARGS, geometry_test_inside_doc}, {"test_equal", (PyCFunction) py_ped_geometry_test_equal, METH_VARARGS, geometry_test_equal_doc}, {"test_sector_inside", (PyCFunction) py_ped_geometry_test_sector_inside, METH_VARARGS, geometry_test_sector_inside_doc}, {"read", (PyCFunction) py_ped_geometry_read, METH_VARARGS, geometry_read_doc}, {"sync", (PyCFunction) py_ped_geometry_sync, METH_VARARGS, geometry_sync_doc}, {"sync_fast", (PyCFunction) py_ped_geometry_sync_fast, METH_VARARGS, geometry_sync_fast_doc}, {"write", (PyCFunction) py_ped_geometry_write, METH_VARARGS, geometry_write_doc}, {"check", (PyCFunction) py_ped_geometry_check, METH_VARARGS, geometry_check_doc}, {"map", (PyCFunction) py_ped_geometry_map, METH_VARARGS, geometry_map_doc}, {NULL} }; static PyGetSetDef _ped_Geometry_getset[] = { {"start", (getter) _ped_Geometry_get, (setter) _ped_Geometry_set, "The starting Sector of the region.", "start"}, {"length", (getter) _ped_Geometry_get, (setter) _ped_Geometry_set, "The length of the region described by this Geometry object.", "length"}, {"end", (getter) _ped_Geometry_get, (setter) _ped_Geometry_set, "The ending Sector of the region.", "end"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Geometry_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Geometry", .tp_basicsize = sizeof(_ped_Geometry), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Geometry_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Geometry_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Geometry_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Geometry_doc, .tp_traverse = (traverseproc) _ped_Geometry_traverse, .tp_clear = (inquiry) _ped_Geometry_clear, .tp_richcompare = (richcmpfunc) _ped_Geometry_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Geometry_methods, .tp_members = _ped_Geometry_members, .tp_getset = _ped_Geometry_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Geometry_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYGEOM_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/pytimer.h0000664000076400007640000001024611170723402015765 00000000000000/* * pytimer.h * pyparted type objects for pytimer.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYTIMER_H_INCLUDED #define TYPEOBJECTS_PYTIMER_H_INCLUDED #include #include /* _ped.Timer type object */ static PyMemberDef _ped_Timer_members[] = { {NULL} }; static PyMethodDef _ped_Timer_methods[] = { {"destroy", (PyCFunction) py_ped_timer_destroy, METH_VARARGS, NULL}, {"new_nested", (PyCFunction) py_ped_timer_new_nested, METH_VARARGS, NULL}, {"destroy_nested", (PyCFunction) py_ped_timer_destroy_nested, METH_VARARGS, NULL}, {"touch", (PyCFunction) py_ped_timer_touch, METH_VARARGS, NULL}, {"reset", (PyCFunction) py_ped_timer_reset, METH_VARARGS, NULL}, {"update", (PyCFunction) py_ped_timer_update, METH_VARARGS, NULL}, {"set_state_name", (PyCFunction) py_ped_timer_set_state_name, METH_VARARGS, NULL}, {NULL} }; static PyGetSetDef _ped_Timer_getset[] = { {"frac", (getter) _ped_Timer_get, (setter) _ped_Timer_set, "PedTimer frac", "frac"}, {"start", (getter) _ped_Timer_get, (setter) _ped_Timer_set, "PedTimer.start", "start"}, {"now", (getter) _ped_Timer_get, (setter) _ped_Timer_set, "PedTimer.now", "now"}, {"predicted_end", (getter) _ped_Timer_get, (setter) _ped_Timer_set, "PedTimer.predicted_end", "predicted_end"}, {"state_name", (getter) _ped_Timer_get, (setter) _ped_Timer_set, "PedTimer.state_name", "state_name"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Timer_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Timer", .tp_basicsize = sizeof(_ped_Timer), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Timer_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Timer_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Timer_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = "PedTimer objects", .tp_traverse = (traverseproc) _ped_Timer_traverse, .tp_clear = (inquiry) _ped_Timer_clear, .tp_richcompare = (richcmpfunc) _ped_Timer_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Timer_methods, .tp_members = _ped_Timer_members, .tp_getset = _ped_Timer_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Timer_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYTIMER_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/typeobjects/pyconstraint.h0000664000076400007640000001143611170723402017033 00000000000000/* * pyconstraint.h * pyparted type objects for pyconstraint.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef TYPEOBJECTS_PYCONSTRAINT_H_INCLUDED #define TYPEOBJECTS_PYCONSTRAINT_H_INCLUDED #include #include /* _ped.Constraint type object */ static PyMemberDef _ped_Constraint_members[] = { {"start_align", T_OBJECT, offsetof(_ped_Constraint, start_align), 0, "The _ped.Alignment describing the starting alignment constraints of the partition."}, {"end_align", T_OBJECT, offsetof(_ped_Constraint, end_align), 0, "The _ped.Alignment describing the ending alignment constraints of the partition."}, {"start_range", T_OBJECT, offsetof(_ped_Constraint, start_range), 0, "The _ped.Geometry describing the minimum size constraints of the partition."}, {"end_range", T_OBJECT, offsetof(_ped_Constraint, end_range), 0, "The _ped.Geometry describing the maximum size constraints of the partition."}, {NULL} }; static PyMethodDef _ped_Constraint_methods[] = { {"duplicate", (PyCFunction) py_ped_constraint_duplicate, METH_VARARGS, constraint_duplicate_doc}, {"intersect", (PyCFunction) py_ped_constraint_intersect, METH_VARARGS, constraint_intersect_doc}, {"solve_max", (PyCFunction) py_ped_constraint_solve_max, METH_VARARGS, constraint_solve_max_doc}, {"solve_nearest", (PyCFunction) py_ped_constraint_solve_nearest, METH_VARARGS, constraint_solve_nearest_doc}, {"is_solution", (PyCFunction) py_ped_constraint_is_solution, METH_VARARGS, constraint_is_solution_doc}, {NULL} }; static PyGetSetDef _ped_Constraint_getset[] = { {"min_size", (getter) _ped_Constraint_get, (setter) _ped_Constraint_set, "The mimimum size in _ped.Sectors of the partition.", "min_size"}, {"max_size", (getter) _ped_Constraint_get, (setter) _ped_Constraint_set, "The maximum size in _ped.Sectors of the partition.", "max_size"}, {NULL} /* Sentinel */ }; PyTypeObject _ped_Constraint_Type_obj = { PyObject_HEAD_INIT(&PyType_Type) .tp_name = "_ped.Constraint", .tp_basicsize = sizeof(_ped_Constraint), /* .tp_itemsize = XXX */ .tp_dealloc = (destructor) _ped_Constraint_dealloc, /* .tp_getattr = XXX */ /* .tp_setattr = XXX */ .tp_compare = (cmpfunc) _ped_Constraint_compare, /* .tp_repr = XXX */ /* .tp_as_number = XXX */ /* .tp_as_sequence = XXX */ /* .tp_as_mapping = XXX */ .tp_hash = PyObject_HashNotImplemented, .tp_call = NULL, .tp_str = (reprfunc) _ped_Constraint_str, .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, /* .tp_as_buffer = XXX */ .tp_flags = Py_TPFLAGS_HAVE_CLASS | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE, .tp_doc = _ped_Constraint_doc, .tp_traverse = (traverseproc) _ped_Constraint_traverse, .tp_clear = (inquiry) _ped_Constraint_clear, .tp_richcompare = (richcmpfunc) _ped_Constraint_richcompare, /* .tp_weaklistoffset = XXX */ /* .tp_iter = XXX */ /* .tp_iternext = XXX */ .tp_methods = _ped_Constraint_methods, .tp_members = _ped_Constraint_members, .tp_getset = _ped_Constraint_getset, .tp_base = NULL, .tp_dict = NULL, /* .tp_descr_get = XXX */ /* .tp_descr_set = XXX */ /* .tp_dictoffset = XXX */ .tp_init = (initproc) _ped_Constraint_init, .tp_alloc = PyType_GenericAlloc, .tp_new = PyType_GenericNew, /* .tp_free = XXX */ /* .tp_is_gc = XXX */ .tp_bases = NULL, /* .tp_del = XXX */ }; #endif /* TYPEOBJECTS_PYCONSTRAINT_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pyunit.h0000664000076400007640000000313511170723402013270 00000000000000/* * pyunit.h * pyparted type definitions for pyunit.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell */ #ifndef PYUNIT_H_INCLUDED #define PYUNIT_H_INCLUDED #include #include /* a PedUnit is a long int in C, so we store it that way in Python */ /* 1:1 function mappings for unit.h in libparted */ PyObject *py_ped_unit_set_default(PyObject *, PyObject *); PyObject *py_ped_unit_get_default(PyObject *, PyObject *); PyObject *py_ped_unit_get_name(PyObject *, PyObject *); PyObject *py_ped_unit_get_by_name(PyObject *, PyObject *); #endif /* PYUNIT_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pygeom.h0000664000076400007640000000606111170723402013241 00000000000000/* * pygeom.h * pyparted type definitions for pygeom.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef PYGEOM_H_INCLUDED #define PYGEOM_H_INCLUDED #include #include /* 1:1 function mappings for geom.h in libparted */ PyObject *py_ped_geometry_duplicate(PyObject *, PyObject *); PyObject *py_ped_geometry_intersect(PyObject *, PyObject *); PyObject *py_ped_geometry_set(PyObject *, PyObject *); PyObject *py_ped_geometry_set_start(PyObject *, PyObject *); PyObject *py_ped_geometry_set_end(PyObject *, PyObject *); PyObject *py_ped_geometry_test_overlap(PyObject *, PyObject *); PyObject *py_ped_geometry_test_inside(PyObject *, PyObject *); PyObject *py_ped_geometry_test_equal(PyObject *, PyObject *); PyObject *py_ped_geometry_test_sector_inside(PyObject *, PyObject *); PyObject *py_ped_geometry_read(PyObject *, PyObject *); PyObject *py_ped_geometry_sync(PyObject *, PyObject *); PyObject *py_ped_geometry_sync_fast(PyObject *, PyObject *); PyObject *py_ped_geometry_write(PyObject *, PyObject *); PyObject *py_ped_geometry_check(PyObject *, PyObject *); PyObject *py_ped_geometry_map(PyObject *, PyObject *); /* _ped.Geometry type is the Python equivalent of PedGeometry in libparted */ typedef struct { PyObject_HEAD /* PedGeometry members */ PyObject *dev; /* _ped.Device */ /* store the PedGeometry from libparted */ PedGeometry *ped_geometry; } _ped_Geometry; void _ped_Geometry_dealloc(_ped_Geometry *); int _ped_Geometry_compare(_ped_Geometry *, PyObject *); PyObject *_ped_Geometry_richcompare(_ped_Geometry *, PyObject *, int); PyObject *_ped_Geometry_str(_ped_Geometry *); int _ped_Geometry_traverse(_ped_Geometry *, visitproc, void *); int _ped_Geometry_clear(_ped_Geometry *); int _ped_Geometry_init(_ped_Geometry *, PyObject *, PyObject *); PyObject *_ped_Geometry_get(_ped_Geometry *, void *); int _ped_Geometry_set(_ped_Geometry *, PyObject *, void *); extern PyTypeObject _ped_Geometry_Type_obj; #endif /* PYGEOM_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pytimer.h0000664000076400007640000000473411170723402013437 00000000000000/* * pytimer.h * pyparted type definitions for pytimer.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell */ #ifndef PYTIMER_H_INCLUDED #define PYTIMER_H_INCLUDED #include #include /* 1:1 function mappings for timer.h in libparted */ PyObject *py_ped_timer_destroy(PyObject *, PyObject *); PyObject *py_ped_timer_new_nested(PyObject *, PyObject *); PyObject *py_ped_timer_destroy_nested(PyObject *, PyObject *); PyObject *py_ped_timer_touch(PyObject *, PyObject *); PyObject *py_ped_timer_reset(PyObject *, PyObject *); PyObject *py_ped_timer_update(PyObject *, PyObject *); PyObject *py_ped_timer_set_state_name(PyObject *, PyObject *); /* _ped.Timer type is the Python equivalent of PedTimer in libparted */ typedef struct { PyObject_HEAD /* PedTimer members */ float frac; time_t start; time_t now; time_t predicted_end; char *state_name; PedTimerHandler *handler; void *context; } _ped_Timer; void _ped_Timer_dealloc(_ped_Timer *); int _ped_Timer_compare(_ped_Timer *, PyObject *); PyObject *_ped_Timer_richcompare(_ped_Timer *, PyObject *, int); PyObject *_ped_Timer_str(_ped_Timer *); int _ped_Timer_traverse(_ped_Timer *, visitproc, void *); int _ped_Timer_clear(_ped_Timer *); int _ped_Timer_init(_ped_Timer *, PyObject *, PyObject *); PyObject *_ped_Timer_get(_ped_Timer *, void *); int _ped_Timer_set(_ped_Timer *, PyObject *, void *); extern PyTypeObject _ped_Timer_Type_obj; #endif /* PYTIMER_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/0000775000076400007640000000000011542323614014030 500000000000000pyparted-3.6/include/docstrings/Makefile.in0000664000076400007640000003132711542323606016024 00000000000000# Makefile.in generated by automake 1.11.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # # Makefile.am for pyparted include/docstrings subdirectory # # Copyright (C) 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = include/docstrings DIST_COMMON = $(noinst_HEADERS) $(srcdir)/Makefile.am \ $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libparted.m4 \ $(top_srcdir)/m4/python.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = HEADERS = $(noinst_HEADERS) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBPARTED_LIBS = @LIBPARTED_LIBS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PKG_CONFIG = @PKG_CONFIG@ PYTHON = @PYTHON@ PYTHON_EMBED_LIBS = @PYTHON_EMBED_LIBS@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_INCLUDES = @PYTHON_INCLUDES@ PYTHON_LDFLAGS = @PYTHON_LDFLAGS@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libparted_CFLAGS = @libparted_CFLAGS@ libparted_LIBS = @libparted_LIBS@ localedir = @localedir@ localstatedir = @localstatedir@ lt_ECHO = @lt_ECHO@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ noinst_HEADERS = pyconstraint.h pydevice.h pydisk.h pyfilesys.h pygeom.h \ pynatmath.h MAINTAINERCLEANFILES = Makefile.in all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/docstrings/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign include/docstrings/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(HEADERS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ clean-libtool ctags distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: pyparted-3.6/include/docstrings/pynatmath.h0000664000076400007640000000717611151317256016142 00000000000000/* * docstrings/pynatmath.h * pyparted docstrings for for pynatmath.c * * Copyright (C) 2007, 2008 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef DOCSTRINGS_PYNATMATH_H_INCLUDED #define DOCSTRINGS_PYNATMATH_H_INCLUDED #include PyDoc_STRVAR(alignment_duplicate_doc, "duplicate(self) -> _ped.Alignment\n\n" "Create an identical copy of self. Raises _ped.CreateException if the\n" "operation fails"); PyDoc_STRVAR(alignment_intersect_doc, "intersect(self, Alignment) -> _ped.Alignment\n\n" "Create a new Alignment that describes the intersection of self and\n" "Alignment. A sector will satisfy the new Alignment iff it satisfies both\n" "of the original alignments, where 'satisfy' is determined by is_aligned().\n" "The proof of this is relatively complicated and is described thoroughly\n" "in the libparted source. This method raises ArithmeticError if no\n" "intersection can be found."); PyDoc_STRVAR(alignment_align_up_doc, "align_up(self, Geometry, Sector) -> Sector\n\n" "Returns the closest Sector to the input Sector that lies inside Geometry\n" "and satisfies the alignment constraint. This method prefers, but does not\n" "guarantee, that the result is beyond Sector. If no such Sector can be\n" "found, an ArithmeticError is raised."); PyDoc_STRVAR(alignment_align_down_doc, "align_down(self, Geometry, Sector) -> Sector\n\n" "Returns the closest Sector to the input Sector that lies inside Geometry\n" "and satisfies the alignment constraint. This method prefers, but does not\n" "guarantee, that the result is below Sector. If no such Sector can be\n" "found, an ArithmeticError is raised."); PyDoc_STRVAR(alignment_align_nearest_doc, "align_nearest(self, Geometry, Sector) -> Sector\n\n" "Returns the closest Sector to the input Sector that lies inside Geometry\n" "and satisfies the aligmnent constraint. If no such Sector can be found,\n" "an ArithmeticError is raised."); PyDoc_STRVAR(alignment_is_aligned_doc, "is_aligned(self, Geometry, Sector) -> boolean\n\n" "Returns whether or not Sector lies inside Geometry and satisfies the\n" "alignment constraint. This method defines what 'satisfy' means for\n" "intersection."); PyDoc_STRVAR(_ped_Alignment_doc, "A _ped.Alignment object describes constraints on how sectors and Geometry\n" "objects are aligned. It includes a variety of methods for aligning sectors\n" "and calculating the intersection of two Alignment objects. Most methods on\n" "this object can raise _ped.CreateException if creating temporary objects\n" "fails and ArithmeticError if calculating alignments and intersections fails."); #endif /* DOCSTRINGS_PYNATMATH_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/pydisk.h0000664000076400007640000003212411313012377015423 00000000000000/* * pydisk.h * pyparted docstrings for pydisk.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef DOCSTRINGS_PYDISK_H_INCLUDED #define DOCSTRINGS_PYDISK_H_INCLUDED #include PyDoc_STRVAR(partition_destroy_doc, "destroy(self) -> None\n\n" "Destroys the Partition object."); PyDoc_STRVAR(partition_is_active_doc, "is_active(self) -> boolean\n\n" "Return whether self is active or not."); PyDoc_STRVAR(partition_set_flag_doc, "set_flag(self, flag, state) -> boolean\n\n" "Sets the state of the given flag on self . Flags have different types of\n" "different types of disk labels, and are not guaranteed to exist on all disk\n" "label types. If provided with an invalid flag for the disk's label,\n" "_ped.PartitionException is raised."); PyDoc_STRVAR(partition_get_flag_doc, "get_flag(self, flag) -> integer\n\n" "Return the state of the given flag on self. There is no check for invalid\n" "flag types, so these will always return 0. It is therefore recommended to\n" "call self.is_flag_available() first to make sure."); PyDoc_STRVAR(partition_is_flag_available_doc, "is_flag_available(self, flag) -> boolean\n\n" "Return whether the given flag is valid for self."); PyDoc_STRVAR(partition_set_system_doc, "set_system(self, FileSystemType) -> boolean\n\n" "Set the system type on self to FileSystemType. On error,\n" "_ped.PartitionException is raised."); PyDoc_STRVAR(partition_set_name_doc, "set_name(self, string) -> boolean\n\n" "On disk labels that support it, this method sets the partition's name.\n" "Before attempting this operation, DiskType.check_feature() can be used to\n" "determine if it is even supported. On error, _ped.PartitionException will\n" "be raised."); PyDoc_STRVAR(partition_get_name_doc, "get_name(self) -> string\n\n" "On disk labels that support it, this method returns the partition's name. On\n" "all other disk labels, _ped.PartitionException will be raised. Before calling\n" "this method, DiskType.check_feature() can be called to check for support."); PyDoc_STRVAR(partition_is_busy_doc, "is_busy(self) -> boolean\n\n" "Return whether self is busy or not. The most likely reason for a partition\n" "to be busy is because it's mounted. Additionally, extended partitions are\n" "busy if any of their logical partitions are busy."); PyDoc_STRVAR(partition_get_path_doc, "get_path(self) -> string\n\n" "Return a path that could be used for addressing self at an operating system\n" "level. For instance, on Linux this could return '/dev/sda' for a partition.\n" "If an error occurs, _ped.PartitionException is raised."); PyDoc_STRVAR(disk_duplicate_doc, "duplicate(self) -> Disk\n\n" "Return a new Disk that is a copy of self. This method raises\n" "_ped.DiskException if there is an error making the copy."); PyDoc_STRVAR(disk_destroy_doc, "destroy(self) -> None\n\n" "Destroy the Disk object."); PyDoc_STRVAR(disk_commit_doc, "commit(self) -> boolean\n\n" "Write the in-memory changes to the disk's partition table and inform the\n" "operating system of the changes. This method is equivalent to calling:\n" "\tself.disk_commit_to_dev()\n" "\tself.disk_commit_to_os()\n" "On error, _ped.DiskException is raised."); PyDoc_STRVAR(disk_commit_to_dev_doc, "commit_to_dev(self) -> boolean\n\n" "Write the in-memory changes to the disk's partition table. On error,\n" "_ped.DiskException is raised."); PyDoc_STRVAR(disk_commit_to_os_doc, "commit_to_os(self) -> boolean\n\n" "Inform the operating system that disk's partition table layout has changed.\n" "What exactly this means depends on the operating system. On error, a\n" "_ped.DiskException is raised."); PyDoc_STRVAR(disk_check_doc, "check(self) -> boolean\n\n" "Perform a basic sanity check on the partition table. This check does not\n" "depend on the type of disk. If there is an error performing the check,\n" "_ped.DiskException is raised."); PyDoc_STRVAR(disk_print_doc, "print(self) -> None\n\n" "Print a summary of the partitions on self."); PyDoc_STRVAR(disk_get_primary_partition_count_doc, "get_primary_partition_count(self) -> integer\n\n" "Return the number of primary partitions on self."); PyDoc_STRVAR(disk_get_last_partition_num_doc, "get_last_partition_num(self) -> integer\n\n" "Return the highest in-use partition number on self."); PyDoc_STRVAR(disk_get_max_primary_partition_count_doc, "get_max_primary_partition_count(self) -> integer\n\n" "Get the maximum number of primary partitions spported by the disk label."); PyDoc_STRVAR(disk_get_max_supported_partition_count_doc, "get_max_supported_partition_count(self) -> integer\n\n" "Get the highest supported partition number of this disk."); PyDoc_STRVAR(disk_get_partition_alignment_doc, "get_partition_alignment(self) -> Alignment\n\n" "Get the alignment needed for partition boundaries on this disk.\n" "The returned alignment describes the alignment for the start sector\n" "of the partition, for all disklabel types which require alignment,\n" "except Sun disklabels, the end sector must be aligned too.\n" "To get the end sector alignment decrease the PedAlignment offset by 1.\n"); PyDoc_STRVAR(disk_max_partition_length_doc, "max_partition_length(self) -> long\n\n" "This returns the maximum length for a partition the label on this disk\n" "can represent. This does not necessarily mean that there is enough\n" "freespace to create such a partition.\n" "If this information is not available 0 is returned"); PyDoc_STRVAR(disk_max_partition_start_sector_doc, "max_partition_start_sector(self) -> long\n\n" "This returns the maximum partition start sector the label on this disk\n" "can represent.\n" "If this information is not available 0 is returned"); PyDoc_STRVAR(disk_set_flag_doc, "set_flag(self, flag, state) -> boolean\n\n" "Sets the state of the given flag on self .\n" "If provided with an invalid flag for the disk's label,\n" "a PartedException is raised."); PyDoc_STRVAR(disk_get_flag_doc, "get_flag(self, flag) -> boolean\n\n" "Return the state of the given flag on self. There is no check for invalid\n" "flag types, so these will always return 0. It is therefore recommended to\n" "call self.is_flag_available() first to make sure."); PyDoc_STRVAR(disk_is_flag_available_doc, "is_flag_available(self, flag) -> boolean\n\n" "Return whether the given flag is valid for self."); PyDoc_STRVAR(disk_add_partition_doc, "add_partition(self, Partition, Constraint) -> boolean\n\n" "Adds the new partition Partition to self. This operation may modify the\n" "partition's geometry, subject to Constraint. Having a strict Constraint\n" "will likely cause this operation to fail, raising a _ped.PartitionException\n" "in the process."); PyDoc_STRVAR(disk_remove_partition_doc, "remove_partition(self, Partition) -> boolean\n\n" "Remove Partition from self. If Partition is an extended partition, it must\n" "not contain any logical partitions. The Partition object itself is not\n" "destroyed. The caller must use Partition.destroy() or self.delete_partition().\n" "For all error cases, _ped.PartitionException will be raised."); PyDoc_STRVAR(disk_delete_partition_doc, "delete_partition(self, Partition) -> boolean\n\n" "Remove Partition from self and destroy the Partition object afterwards. This\n" "is equivalent to calling:\n" "\tself.remove_partition(Partition)\n" "\tPartition.destroy()\n" "For all error cases, _ped.PartitionException will be raised."); PyDoc_STRVAR(disk_delete_all_doc, "disk_delete_all(self) -> boolean\n\n" "Remove and destroy all partitions on self, raising _ped.PartitionException on\n" "any error case."); PyDoc_STRVAR(disk_set_partition_geom_doc, "set_partition_geom(self, Partition, Constraint, start_sector, end_sector) ->\n" " boolean\n\n" "Change the location of Partition by setting a new Geometry on it, subject to\n" "the restrictions of Constraint. This operation can fail for many reasons,\n" "all of which result in a _ped.PartitionException. One of the most likely\n" "failure cases is that the new location overlaps with an existing partition.\n" "On error, Partition will be unchanged. On success, the contents of the\n" "partition will still not be changed - the file system itself will still\n" "need to be resized."); PyDoc_STRVAR(disk_maximize_partition_doc, "maximize_partition(self, Partition, Constraint) -> boolean\n\n" "Grow the Partition to the largest possibly size, subject to the restrictions\n" "of Constraint. Raise _ped.PartitionException on error."); PyDoc_STRVAR(disk_get_max_partition_geometry_doc, "get_max_partition_geometry(self, Partition, Constraint) -> Geometry\n\n" "Return the maximum Geometry that Partition can be grown to, subject to the\n" "restrictions of Constraint. Raise _ped.PartitionException on error."); PyDoc_STRVAR(disk_minimize_extended_partition_doc, "minimize_extended_partition(self) -> boolean\n\n" "Reduce the size of an extended partition on self to the minimum while still\n" "including all logical partitions. If there are no logical partitions, the\n" "extended partition will be deleted. If the extended partition cannot be\n" "shrunk, a _ped.PartitionException will be raised."); PyDoc_STRVAR(disk_next_partition_doc, "next_partition(self, Partition) -> Partition\n\n" "Return the next partition on self after Partition. If Partition is None,\n" "return the first partition. If Partition is an extended partition, return\n" "the first logical partition inside it. If Partition is the last partition,\n" "raise IndexError. Repeatedly calling this method has the effect of\n" "performing a depth-first traversal on self."); PyDoc_STRVAR(disk_get_partition_doc, "get_partition(self, num) -> Partition\n\n" "Return the Partition given by num, or raise _ped.PartitionException if no\n" "partition with that index exists."); PyDoc_STRVAR(disk_get_partition_by_sector_doc, "get_partition_by_sector(self, sector) -> Partition\n\n" "Return the Partition containing sector, or raise _ped.PartitionException\n" "otherwise. If sector exists within a logical partition, the logical\n" "partition is returned."); PyDoc_STRVAR(disk_extended_partition_doc, "extended_partition(self) -> Partition\n\n" "If an extended partition exists on self, return it. Otherwise, raise\n" "_ped.PartitionException"); PyDoc_STRVAR(disk_type_check_feature_doc, "check_feature(self, DiskTypeFeature) -> boolean\n\n" "Return whether or not self supports a particular partition table feature.\n" "DiskTypeFeatures are given by the _ped.DISK_TYPE_* constants."); PyDoc_STRVAR(_ped_Partition_doc, "A _ped.Partition object describes a single partition on a disk. Operations\n" "on Partition objects are limited to getting and setting flags, names, and\n" "paths. All other operations you may wish to do involving partitions are\n" "done through a _ped.Disk or _ped.FileSystem object. These objects all exist\n" "as attributes of a Partition, though.\n\n" "Valid flags for Partitions are given by the _ped.PARTITION_* constants,\n" "though not all flags are valid for every disk label type.\n\n" "For most errors involving a Partition object, _ped.PartitionException will\n" "be raised."); PyDoc_STRVAR(_ped_Disk_doc, "A _ped.Disk object represents a disk label, or partition table, on a single\n" "_ped.Device. Since parted supports a variety of platforms, it must also\n" "support a variety of disk labels, not all of which may support the same set\n" "of features. For instance, DOS disk labels support extended partitions while\n" "other systems do not. The Disk object therefore includes a DiskType\n" "reference to enumerate supported features. However, all other Disk operations\n" "are supported on all disk label types.\n\n" "Operations on Disk objects include creating, deleting, moving, and resizing\n" "partitions in various ways. Creating filesystems within these partitions is\n" "left up to the FileSystem objects.\n\n" "For most errors involving a Disk object, _ped.PartitionException will be\n" "raised. Some operations can also raise _ped.IOException or IndexError."); PyDoc_STRVAR(_ped_DiskType_doc, "A _ped.DiskType object is a simple object that gives a partition table a\n" "name and describes features it supports. A reference to one of these\n" "objects is stored inside a _ped.Disk object."); #endif /* PYDISK_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/pyfilesys.h0000664000076400007640000001272511151317256016160 00000000000000/* * pyfilesys.h * pyparted docstrings for pyfilesys.c * * Copyright (C) 2007 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef DOCSTRINGS_PYFILESYS_H_INCLUDED #define DOCSTRINGS_PYFILESYS_H_INCLUDED #include PyDoc_STRVAR(file_system_clobber_doc, "clobber(self) -> boolean\n\n" "This method erases any file system signatures found in the region given by\n" "self.geom, effectively removing the file system from the partition. After\n" "calling this method, _ped.file_system_probe() won't detect any filesystem.\n" "This method is called by self.create() before creating a new filesystem.\n" "Raises _ped.IOException on any internal parted errors or\n" "_ped.FileSystemException if no filesystem exists in self.geom"); PyDoc_STRVAR(file_system_open_doc, "open(self) -> _ped.FileSystem\n\n" "Open and return the file system in the region given by self.geom, if one\n" "exists. If no file system is found, _ped.FileSystemException is raised.\n" "For all other error conditions, _ped.IOException is raised. This method is\n" "not necessarily implemented for all filesystem types parted understands."); PyDoc_STRVAR(file_system_create_doc, "create(self, timer=None) -> _ped.FileSystem\n\n" "Initialize a new filesystem of type self.type on the region given by\n" "self.geom and return that new filesystem. If the filesystem cannot be\n" "created, a _ped.FileSystemException is rasied. For all other error\n" "conditions, _ped.IOException is raised. This method is not necesssarily\n" "implemented for all filesystem types parted understands."); PyDoc_STRVAR(file_system_close_doc, "close(self) -> boolean\n\n" "Close the filesystem, raising _ped.FileSystemException on error."); PyDoc_STRVAR(file_system_check_doc, "check(self, timer=None) -> boolean\n\n" "Check the filesystem for errors, returning False if any are found. This\n" "method is not necessarily implemented for all filesystem types parted\n" "understands."); PyDoc_STRVAR(file_system_copy_doc, "copy(self, Geometry, timer=None) -> _ped.FileSystem\n\n" "Create and return a new filesystem of the same type on the region given by\n" "Geometry, and copy the contents of the existing filesystem into the new\n" "one. If an error occurrs creating or copying the new filesystem,\n" "_ped.FileSystemException is raised. This method is not necessarily\n" "implemented for all filesystem types parted understands\n"); PyDoc_STRVAR(file_system_resize_doc, "resize(self, Geometry, timer=None) -> boolean\n\n" "Resize self to the new region described by Geometry. It is highly\n" "recommended that Geometry satisfy self.get_resize_constraint(), though\n" "parted does not enforce this recommendation. In this case, the resize\n" "operation will most likely fail. On error, _ped.FileSystemException is\n" "raised. This method is not necessarily implemented for all filesystem\n" "types parted understands."); PyDoc_STRVAR(file_system_get_resize_constraint_doc, "get_resize_constraint(self) -> Constraint\n\n" "Return a constraint that represents all possible ways self can be resized\n" "with self.resize(). This takes into account the amount of space already\n" "in use on the filesystem."); PyDoc_STRVAR(_ped_FileSystemType_doc, "A _ped.FileSystemType object gives a name to a single filesystem that parted\n" "understands. parted maintains a list of these objects which can be\n" "traversed with the self.get_next method or accessed directly via self.get()."); PyDoc_STRVAR(_ped_FileSystem_doc, "A _ped.FileSystem object describes a filesystem that exists in a given\n" "region on a device. The region is given by a _ped.Geometry object, and\n" "the filesystem is further described by a _ped.FileSystemType object.\n\n" "It is recommended that self.check() be called before any of the create,\n" "resize, or copy operations are called.\n\n" "Filesystem operations are especially prone to failures, and pyparted raises\n" "a variety of exceptions when error conditions are encountered. The most\n" "common is _ped.FileSystemException, though _ped.IOException and\n" "_ped.CreateException may also be raised.\n\n" "parted knows about a variety of filesystems, but supports them to varying\n" "degrees. For some filesystems, it supports the full range of reading,\n" "copying, resizing, and checking operations. Other filesystems may only\n" "support reading but no write operations, or all operations but resize.\n" "If an operation is not supported, a NotImplementedError will be raised."); #endif /* DOCSTRINGS_PYFILESYS_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/pydevice.h0000664000076400007640000002434411323155321015733 00000000000000/* * pydevice.h * pyparted docstrings for pydevice.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell */ #ifndef DOCSTRINGS_PYDEVICE_H_INCLUDED #define DOCSTRINGS_PYDEVICE_H_INCLUDED #include PyDoc_STRVAR(disk_probe_doc, "disk_probe(self) -> DiskType\n\n" "Return the type of partition table detected, or raise _ped.IOException if\n" "there is an error reading self."); PyDoc_STRVAR(device_is_busy_doc, "is_busy(self) -> bool\n\n" "Return True if this Device is currently in use, False otherwise."); PyDoc_STRVAR(device_open_doc, "open(self) -> bool\n\n" "Attempt to open this Device to allow use of read(), write(), and sync()\n" "methods. The open() call is architecture-dependent. Apart from\n" "requesting access to the device from the operating system, it does things\n" "flushing caches.\n\n" "This method may allocate internal resources depending on the architecture\n" "All allocated resources are freed when you call the close() method.\n\n" "Return True if the Device could be opened, False otherwise."); PyDoc_STRVAR(device_close_doc, "close(self) -> bool\n\n" "Close this Device. All allocated resources are freed. If a failure\n" "occurs while closing the Device, this method returns False. The method\n" "returns True on success."); PyDoc_STRVAR(device_destroy_doc, "destroy(self) -> None\n\n" "Destroys the Device, removes it from the device list, destroys all\n" "allocated resources associated with it, and destroys the object."); PyDoc_STRVAR(device_cache_remove_doc, "cache_remove(self) -> None\n\n" "Remove the Device from the device list, but does not destroy it or any\n" "allocated resources associated with it. USE WITH CAUTION."); PyDoc_STRVAR(device_begin_external_access_doc, "begin_external_accessself() -> bool\n\n" "Begins external access mode for this Device. External access mode allows\n" "you to safely do I/O on the device. If a Device is open, then you should\n" "not do any I/O on that Device, e.g. by calling an external program like\n" "e2fsck, unless you put it in external access mode. You should not use\n" "any commands that do I/O to a Device while it is in external access mode.\n\n" "Also, you should not close a Device while it is in external access mode.\n\n" "Return True if the Device was successfully put in external access mode,\n" "False otherwise."); PyDoc_STRVAR(device_end_external_access_doc, "end_external_access(self) -> bool\n\n" "Ends external access mode for this Device. Returns True on success,\n" "False on failure."); PyDoc_STRVAR(device_read_doc, "read(self, start, count) -> bool\n\n" "Read and return count sectors from this Device, starting at sector start.\n" "Both start and count are long integers and buffer is a Python object large\n" "enough to hold what you want to read."); PyDoc_STRVAR(device_write_doc, "write(self, buffer, start, count) -> bool\n\n" "Write count sectors from buffer to this Device, starting at sector start.\n" "Both start and count are long integers and buffer is a Python object holding\n" "what you want to write to this Device.\n\n" "Return True if the write was successful, False otherwise."); PyDoc_STRVAR(device_sync_doc, "sync(self) -> bool\n\n" "Flushes all write-behind caches that might be holding up writes. It is\n" "slow because it guarantees cache coherency among all relevant caches.\n" "Return True on success, False otherwise."); PyDoc_STRVAR(device_sync_fast_doc, "sync_fast(self) -> bool\n\n" "Flushes all write-behind caches that might be holding writes. WARNING:\n" "Does NOT ensure cache coherency with other caches. If you need cache\n" "coherency, use sync() instead. Return True on success, False otherwise."); PyDoc_STRVAR(device_check_doc, "check(self) -> long int\n\n" "Architecture-dependent function that returns the number of sectors on\n" "this Device that are ok."); PyDoc_STRVAR(disk_clobber_doc, "clobber(self) -> boolean\n\n" "Remove all identifying information from a partition table. If the partition\n" "table cannot be cleared, a _ped.DiskException is raised."); PyDoc_STRVAR(device_get_constraint_doc, "get_constraint(self) -> Constraint\n\n" "Get a constraint that represents hardware requirements on geometry.\n" "This method will return a constraint representing the limits imposed by\n" "the size of the disk, it will *not* provide any alignment constraints.\n" "\n" "Alignment constraints may be desirable when using media that have a\n" "physical sector size that is a multiple of the logical sector size, as\n" "in this case proper partition alignment can benefit disk performance\n" "signigicantly.\n"); PyDoc_STRVAR(device_get_minimal_aligned_constraint_doc, "get_minimal_aligned_constraint(self) -> Constraint\n\n" "Get a constraint that represents hardware requirements on geometry and\n" "alignment. This method returns a constraint representing the limits\n" "imposed by the size of the disk and the minimal alignment requirements\n" "for proper performance of the disk.\n"); PyDoc_STRVAR(device_get_optimal_aligned_constraint_doc, "get_optimal_aligned_constraint(self) -> Constraint\n\n" "Get a constraint that represents hardware requirements on geometry and\n" "alignment. This method returns a constraint representing the limits\n" "imposed by the size of the disk and the alignment requirements for\n" "optimal performance of the disk.\n"); PyDoc_STRVAR(device_get_minimum_alignment_doc, "get_minimum_alignment(self) -> Alignment\n\n" "Get an alignment that represents minimum hardware requirements on\n" "alignment. When for example using media that has a physical sector size\n" "that is a multiple of the logical sector size, it is desirable to have\n" "disk accesses (and thus partitions) properly aligned. Having partitions\n" "not aligned to the minimum hardware requirements may lead to a\n" "performance penalty.\n\n" "The returned alignment describes the alignment for the start sector of\n" "the partition, the end sector should be aligned too, to get the end\n" "sector alignment decrease the returned alignment's offset by 1.\n"); PyDoc_STRVAR(device_get_optimum_alignment_doc, "get_optimum_alignment(self) -> Alignment\n\n" "Get an alignment that represents the hardware requirements for optimal\n" "performance.\n\n" "The returned alignment describes the alignment for the start sector of\n" "the partition, the end sector should be aligned too, to get the end\n" "sector alignment decrease the returned alignment's offset by 1.\n"); PyDoc_STRVAR(file_system_get_create_constraint_doc, "get_create_constraint(self, Device) -> Constraint\n\n" "Return a constraint that all filesystems of type self.type that are created\n" "on Device must satisfy. This includes restrictions on the minimum or\n" "maximum size of a given filesystem type, or where it must be created."); PyDoc_STRVAR(file_system_get_copy_constraint_doc, "get_copy_constraint(self, Device) -> Constraint\n\n" "Return a constraint on copying self to somewhere on Device using\n" "self.copy()"); PyDoc_STRVAR(unit_get_size_doc, "unit_get_size(self, Unit) -> long\n\n" "Returns the byte size of self in the specified Unit. The Unit\n" "is any of the _ped.UNIT_* constants."); PyDoc_STRVAR(unit_format_custom_byte_doc, "unit_format_custom_byte(Sector, Unit) -> string\n\n" "Return a string that describes the location of the byte Sector on\n" "self, as described by Unit. The Unit is any of the _ped.UNIT_*\n" "constants."); PyDoc_STRVAR(unit_format_byte_doc, "unit_format_byte(Sector) -> string\n\n" "Return a string that describes the location of the byte Sector on\n" "self, as described by the default Unit."); PyDoc_STRVAR(unit_format_custom_doc, "unit_format_custom(Sector, Unit) -> string\n\n" "Return a string that describes the location of Sector on self, as\n" "described by Unit. The Unit is any of the _ped.UNIT_* constants."); PyDoc_STRVAR(unit_format_doc, "unit_format(Device, Sector) -> string\n\n" "Return a string that describes the location of Sector on self, as\n" "described by the default Unit."); PyDoc_STRVAR(unit_parse_doc, "unit_parse(string, Sector, Geometry) -> boolean\n\n" "Given a string providing a valid description of a location on self,\n" "create a Geometry and Sector describing it. Geometry will be two units\n" "large, centered on Sector. If this makes the Geometry exist partially\n" "outside self, the Geometry will be intersected with the whole device\n" "geometry. This uses the default unit."); PyDoc_STRVAR(unit_parse_custom_doc, "unit_parse(string, Unit, Sector, Geometry) -> boolean\n\n" "Follows the same description as unit_parse_doc, but takes a Unit as\n" "well. The Unit is any of the _ped.UNIT_* constants."); PyDoc_STRVAR(_ped_CHSGeometry_doc, "A _ped.CHSGeometry object describes a disk using the older CHS style\n" "of defining disk geometry. CHS stands for cylinders-heads-sectors.\n\n" "The _ped.CHSGeometry objects are created automatically when devices are\n" "probed by libparted. They are used for reference purposes to get the\n" "number of cylinders, heads, or sectors on a disk. They cannot be used\n" "to change the CHS values on a device."); PyDoc_STRVAR(_ped_Device_doc, "A _ped.Device object describes a block device accessible via the\n" "operating system. On Linux, an example block device is /dev/sda.\n\n" "It is important to note that _ped.Device objects describe entire\n" "block devices and not just partitions."); #endif /* DOCSTRINGS_PYDEVICE_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/Makefile.am0000664000076400007640000000230411151317256016004 00000000000000# # Makefile.am for pyparted include/docstrings subdirectory # # Copyright (C) 2007 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Cantrell # noinst_HEADERS = pyconstraint.h pydevice.h pydisk.h pyfilesys.h pygeom.h \ pynatmath.h MAINTAINERCLEANFILES = Makefile.in pyparted-3.6/include/docstrings/pygeom.h0000664000076400007640000001311711161036567015431 00000000000000/* * pygeom.h * pyparted docstrings for pygeom.c * * Copyright (C) 2007, 2008 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef DOCSTRINGS_PYGEOM_H_INCLUDED #define DOCSTRINGS_PYGEOM_H_INCLUDED #include PyDoc_STRVAR(geometry_duplicate_doc, "duplicate(self) -> _ped.Geometry\n\n" "Create an identical copy of self. Raises _ped.CreateException if the\n" "operation fails"); PyDoc_STRVAR(geometry_intersect_doc, "intersect(self, Geometry) -> _ped.Geometry\n\n" "Create a new Geometry describing the region common to both self and\n" "Geometry. Raises ArithmeticError if the two regions do not intersect."); PyDoc_STRVAR(geometry_set_doc, "set(self, start, length) -> boolean\n\n" "Sets a new start Sector and length Sector in the Geometry object,\n" "also implicitly setting the end Sector as well."); PyDoc_STRVAR(geometry_set_start_doc, "set_start(self, start) -> boolean\n\n" "Sets a new start Sector without modifying the end Sector. Length\n" "will be modified to match the new starting position."); PyDoc_STRVAR(geometry_set_end_doc, "set_end(self, end) -> boolean\n\n" "Sets a new ending Sector without modifying the start Sector. Length\n" "will be modified to match the new ending position."); PyDoc_STRVAR(geometry_test_overlap_doc, "test_overlap(self, Geometry) -> boolean\n\n" "Return whether self and Geometry are on the same physical device and\n" "share at least part of the same region."); PyDoc_STRVAR(geometry_test_inside_doc, "test_inside(self, Geometry) -> boolean\n\n" "Return whether Geometry is entirely within self and on the same physical\n" "device."); PyDoc_STRVAR(geometry_test_equal_doc, "test_equal(self, Geometry) -> boolean\n\n" "Return whether self and Geometry are on the same device and have the same\n" "region."); PyDoc_STRVAR(geometry_test_sector_inside_doc, "test_sector_inside(self, Sector) -> boolean\n\n" "Return whether Sector is entirely within the region described by self."); PyDoc_STRVAR(geometry_read_doc, "read(self, buffer, offset, count) -> boolean\n\n" "Read data from the region described by self. This method reads count\n" "Sectors starting at Sector offset (from the start of the region, not\n" "from the start of the disk) into buffer. This method raises\n" "_ped.IOException on error."); PyDoc_STRVAR(geometry_sync_doc, "sync(self) -> boolean\n\n" "Flushes all caches on the device described by self. This operation can be\n" "slow because it must guarantee cache coherency among multiple caches. This\n" "method raises _ped.IOException on error."); PyDoc_STRVAR(geometry_sync_fast_doc, "sync_fast(self) -> boolean\n\n" "Flushes all caches on the device described by self without guaranteeing\n" "cache coherency. This makes it fast but more prone to error. This method\n" "raises _ped.IOException on error."); PyDoc_STRVAR(geometry_write_doc, "write(self, buffer, offset, count) -> boolean\n\n" "Write data into the region described by self. This method writes count\n" "Sectors of buffer into the region starting at Sector offset. The offset is\n" "from the beginning of the region, not of the disk. This method raises\n" "_ped.IOException on error."); PyDoc_STRVAR(geometry_check_doc, "check(self, offset, granularity, count, timer=None) -> Sector\n\n" "This method checks the region described by self for errors on the disk.\n" "The region to check starts at offset Sectors from the beginning of the\n" "region and is count Sectors long. granularity specifies how Sectors should\n" "be grouped together.\n\n" "This method returns the first bad sector, or 0 if there are no errors."); PyDoc_STRVAR(geometry_map_doc, "map(self, Geometry, Sector) -> integer\n\n" "Given a Geometry that overlaps with self and a Sector inside Geometry,\n" "this method translates the address of Sector into an address inside self.\n" "The new address is returned, or ArithmeticError is raised if Sector does\n" "not exist within self."); PyDoc_STRVAR(_ped_Geometry_doc, "A _ped.Geometry object describes a continuous region on a physical device.\n" "This device is given by the dev attribute when the Geometry is created.\n" "Most methods on this object involve creating new Geometry objects as needed\n" "and can therefore raise _ped.CreateException when an error occurs creating\n" "the new object. Most methods can also raise _ped.IOException when reading\n" "or writing the underlying physical device fails.\n\n" "libparted (and therefore pyparted) attempts to enforce the following\n" "conditions on Geometry objects:\n\n" "\t- start + length - 1 == end\n" "\t- length > 0\n" "\t- start >= 0\n" "\t- end < dev.length"); #endif /* DOCSTRINGS_PYGEOM_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/docstrings/pyconstraint.h0000664000076400007640000000571011151317256016662 00000000000000/* * pyconstraint.h * pyparted docstrings for pyconstraint.c * * Copyright (C) 2007, 2008 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef DOCSTRINGS_PYCONSTRAINT_H_INCLUDED #define DOCSTRINGS_PYCONSTRAINT_H_INCLUDED #include PyDoc_STRVAR(constraint_duplicate_doc, "duplicate(Constraint) -> Constraint\n\n" "Return a new Constraint that is a copy of the given Constraint."); PyDoc_STRVAR(constraint_intersect_doc, "intersect(Constraint) -> Constraint\n\n" "Return a Constraint that requires a region to satisfy both this\n" "Constraint object and the one passed in to the method. Any\n" "region satisfying both Constraints will also satisfy the returned\n" "Constraint."); PyDoc_STRVAR(constraint_solve_max_doc, "solve_max() -> Constraint\n\n" "Find the largest region that satisfies this Constraint object and\n" "return a new Constraint. There may be more than one solution.\n" "There are no guarantees about which solution will be returned.\n"); PyDoc_STRVAR(constraint_solve_nearest_doc, "solve_nearest(Geometry) -> Constraint\n\n" "Return the nearest region to Geometry that will satisfy this\n" "Constraint object. This function does not guarantee what nearest\n" "means."); PyDoc_STRVAR(constraint_is_solution_doc, "is_solution(Geometry) -> bool\n\n" "Return True if Geometry satisfies this Constraint, False otherwise."); PyDoc_STRVAR(_ped_Constraint_doc, "A _ped.Constraint object describes a set of restrictions on other pyparted\n" "operations. Constraints can restrict the location and alignment of the start\n" "and end of a partition, and its minimum and maximum size. Constraint\n" "operations include various methods of creating constraints, intersecting,\n" "and solving sets of constraints.\n\n" "Most constraint operations can raise _ped.CreateException if creating\n" "temporary objects fails, or ArithmeticError if an error occurrs during\n" "calculations."); #endif /* DOCSTRINGS_PYCONSTRAINT_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/pyconstraint.h0000664000076400007640000000614611170723402014502 00000000000000/* * pyconstraint.h * pyparted type definitions for pyconstraint.c * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef PYCONSTRAINT_H_INCLUDED #define PYCONSTRAINT_H_INCLUDED #include #include /* 1:1 function mappings for constraint.h in libparted */ PyObject *py_ped_constraint_new_from_min_max(PyObject *, PyObject *); PyObject *py_ped_constraint_new_from_min(PyObject *, PyObject *); PyObject *py_ped_constraint_new_from_max(PyObject *, PyObject *); PyObject *py_ped_constraint_duplicate(PyObject *, PyObject *); PyObject *py_ped_constraint_intersect(PyObject *, PyObject *); PyObject *py_ped_constraint_solve_max(PyObject *, PyObject *); PyObject *py_ped_constraint_solve_nearest(PyObject *, PyObject *); PyObject *py_ped_constraint_is_solution(PyObject *, PyObject *); PyObject *py_ped_constraint_any(PyObject *, PyObject *); PyObject *py_ped_constraint_exact(PyObject *, PyObject *); /* _ped.Constraint type is the Python equiv of PedConstraint in libparted */ typedef struct { PyObject_HEAD /* PedConstraint members */ PyObject *start_align; /* _ped.Alignment */ PyObject *end_align; /* _ped.Alignment */ PyObject *start_range; /* _ped.Geometry */ PyObject *end_range; /* _ped.Geometry */ long long min_size; /* PedSector */ long long max_size; /* PedSector */ } _ped_Constraint; void _ped_Constraint_dealloc(_ped_Constraint *); int _ped_Constraint_compare(_ped_Constraint *, PyObject *); PyObject *_ped_Constraint_richcompare(_ped_Constraint *, PyObject *, int); PyObject *_ped_Constraint_str(_ped_Constraint *); int _ped_Constraint_traverse(_ped_Constraint *, visitproc, void *); int _ped_Constraint_clear(_ped_Constraint *); int _ped_Constraint_init(_ped_Constraint *, PyObject *, PyObject *); PyObject *_ped_Constraint_get(_ped_Constraint *, void *); int _ped_Constraint_set(_ped_Constraint *, PyObject *, void *); extern PyTypeObject _ped_Constraint_Type_obj; #endif /* PYCONSTRAINT_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/include/convert.h0000664000076400007640000000534011261517554013432 00000000000000/* * convert.h * Functions for converting to/from Python _ped types and C libparted types * * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions of * the GNU General Public License v.2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY expressed or implied, including the implied warranties of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. You should have received a copy of the * GNU General Public License along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. Any Red Hat trademarks that are incorporated in the * source code or documentation are not subject to the GNU General Public * License and may only be used or replicated with the express permission of * Red Hat, Inc. * * Red Hat Author(s): David Cantrell * Chris Lumens */ #ifndef CONVERT_H_INCLUDED #define CONVERT_H_INCLUDED #include #include "pyconstraint.h" #include "pydevice.h" #include "pydisk.h" #include "pyfilesys.h" #include "pygeom.h" #include "pynatmath.h" #include "pytimer.h" PedAlignment *_ped_Alignment2PedAlignment(PyObject *); _ped_Alignment *PedAlignment2_ped_Alignment(PedAlignment *); PedConstraint *_ped_Constraint2PedConstraint(PyObject *); _ped_Constraint *PedConstraint2_ped_Constraint(PedConstraint *); PedDevice *_ped_Device2PedDevice(PyObject *); _ped_Device *PedDevice2_ped_Device(PedDevice *); PedDisk *_ped_Disk2PedDisk(PyObject *); _ped_Disk *PedDisk2_ped_Disk(PedDisk *); PedDiskType *_ped_DiskType2PedDiskType(PyObject *); _ped_DiskType *PedDiskType2_ped_DiskType(const PedDiskType *); PedFileSystem *_ped_FileSystem2PedFileSystem(PyObject *); _ped_FileSystem *PedFileSystem2_ped_FileSystem(PedFileSystem *); PedFileSystemType *_ped_FileSystemType2PedFileSystemType(PyObject *); _ped_FileSystemType *PedFileSystemType2_ped_FileSystemType(const PedFileSystemType *); PedGeometry *_ped_Geometry2PedGeometry(PyObject *); _ped_Geometry *PedGeometry2_ped_Geometry(PedGeometry *); PedCHSGeometry *_ped_CHSGeometry2PedCHSGeometry(PyObject *); _ped_CHSGeometry *PedCHSGeometry2_ped_CHSGeometry(PedCHSGeometry *); PedPartition *_ped_Partition2PedPartition(_ped_Partition *); _ped_Partition *PedPartition2_ped_Partition(PedPartition *, _ped_Disk *); PedTimer *_ped_Timer2PedTimer(PyObject *); _ped_Timer *PedTimer2_ped_Timer(PedTimer *); #endif /* CONVERT_H_INCLUDED */ /* vim:tw=78:ts=4:et:sw=4 */ pyparted-3.6/NEWS0000664000076400007640000000100611312767136010650 00000000000000pyparted-2.1.0 -------------- pyparted supports new libparted API functions, but those functions are not yet available in an official release on ftp.gnu.org. On Fedora systems, the parted-1.9.0-23 package and later revisions contain the necessary API support. pyparted-2.0.0 -------------- Complete rewrite of pyparted. There are now two Python modules exposing the pyparted API: _ped Low level module interfacing with libparted. parted Higher level module written in Python building on _ped. pyparted-3.6/aclocal.m40000664000076400007640000120532011542323602012006 00000000000000# generated automatically by aclocal 1.11.1 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.63],, [m4_warning([this file was generated for autoconf 2.63. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically `autoreconf'.])]) # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ]) # serial 56 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_CC_BASENAME(CC) # ------------------- # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. m4_defun([_LT_CC_BASENAME], [for cc_temp in $1""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl _LT_PROG_ECHO_BACKSLASH case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from `configure', and `config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # `config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain="$ac_aux_dir/ltmain.sh" ])# _LT_PROG_LTMAIN # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the `libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to `config.status' so that its # declaration there will have the same value as in `configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "X$" | $Xsed -e "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags="_LT_TAGS"dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the `libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into `config.status', and then the shell code to quote escape them in # for loops in `config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Fix-up fallback echo if it was mangled by the above quoting rules. case \$lt_ECHO in *'\\\[$]0 --fallback-echo"')dnl " lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\` ;; esac _LT_OUTPUT_LIBTOOL_INIT ]) # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) cat >"$CONFIG_LT" <<_LTEOF #! $SHELL # Generated by $as_me. # Run this file to recreate a libtool stub with the current configuration. lt_cl_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ \`$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2008 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test $[#] != 0 do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try \`$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try \`$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. if test "$no_create" != yes; then lt_cl_success=: test "$silent" = yes && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) fi ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # _LT_COPYING _LT_LIBTOOL_TAGS # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) _LT_PROG_XSI_SHELLFNS sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS="$save_LDFLAGS" ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[[012]]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES # -------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(whole_archive_flag_spec, $1)='' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=echo _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" m4_if([$1], [CXX], [ if test "$lt_cv_apple_cc_single_mod" != "yes"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX # ----------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl AC_LINK_IFELSE(AC_LANG_PROGRAM,[ lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [ifdef([AC_DIVERSION_NOTICE], [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], [AC_DIVERT_PUSH(NOTICE)]) $1 AC_DIVERT_POP ])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Add some code to the start of the generated configure script which # will find an echo command which doesn't interpret backslashes. m4_defun([_LT_PROG_ECHO_BACKSLASH], [_LT_SHELL_INIT([ # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} case X$lt_ECHO in X*--fallback-echo) # Remove one level of quotation (which was required for Make). ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` ;; esac ECHO=${lt_ECHO-echo} if test "X[$]1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X[$]1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then # Yippee, $ECHO works! : else # Restart under the correct shell. exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} fi if test "X[$]1" = X--fallback-echo; then # used as fallback echo shift cat <<_LT_EOF [$]* _LT_EOF exit 0 fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$lt_ECHO"; then if test "X${echo_test_string+set}" != Xset; then # find a string as large as possible, as long as the shell can cope with it for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... if { echo_test_string=`eval $cmd`; } 2>/dev/null && { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null then break fi done fi if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then : else # The Solaris, AIX, and Digital Unix default echo programs unquote # backslashes. This makes it impossible to quote backslashes using # echo "$something" | sed 's/\\/\\\\/g' # # So, first we look for a working echo in the user's PATH. lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for dir in $PATH /usr/ucb; do IFS="$lt_save_ifs" if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$dir/echo" break fi done IFS="$lt_save_ifs" if test "X$ECHO" = Xecho; then # We didn't find a better echo, so look for alternatives. if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # This shell has a builtin print -r that does the trick. ECHO='print -r' elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && test "X$CONFIG_SHELL" != X/bin/ksh; then # If we have ksh, try running configure again with it. ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} export ORIGINAL_CONFIG_SHELL CONFIG_SHELL=/bin/ksh export CONFIG_SHELL exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} else # Try using printf. ECHO='printf %s\n' if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # Cool, printf works : elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL export CONFIG_SHELL SHELL="$CONFIG_SHELL" export SHELL ECHO="$CONFIG_SHELL [$]0 --fallback-echo" elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$CONFIG_SHELL [$]0 --fallback-echo" else # maybe with a smaller string... prev=: for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null then break fi prev="$cmd" done if test "$prev" != 'sed 50q "[$]0"'; then echo_test_string=`eval $prev` export echo_test_string exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} else # Oops. We lost completely, so just stick with echo. ECHO=echo fi fi fi fi fi fi # Copy echo and quote the copy suitably for passing to libtool from # the Makefile, instead of quoting the original, which is used later. lt_ECHO=$ECHO if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" fi AC_SUBST(lt_ECHO) ]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that does not interpret backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '[#]line __oline__ "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; sparc*-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" ])# _LT_ENABLE_LOCK # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [AC_CHECK_TOOL(AR, ar, false) test -z "$AR" && AR=ar test -z "$AR_FLAGS" && AR_FLAGS=cru _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1]) AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test x"[$]$2" = xyes; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" ]) if test x"[$]$2" = xyes; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ = "XX$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n $lt_cv_sys_max_cmd_len ; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "$cross_compiling" = yes; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line __oline__ "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen="dlopen"], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) ]) ]) ]) ]) ]) ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links="nottested" if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test "$hard_links" = no; then AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", [Define to the sub-directory in which libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then # We can hardcode non-existent directories. if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` else lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[[4-9]]*) version_type=linux need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; esac ;; *) library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' ;; esac dynamic_linker='Win32 ld.exe' # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd1*) dynamic_linker=no ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[123]]*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555. postinstall_cmds='chmod 555 $lib' ;; interix[[3-9]]*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be Linux ELF. linux* | k*bsd*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[[89]] | openbsd2.[[89]].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([], [sys_lib_dlsearch_path_spec], [2], [Run-time system search path for libraries]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program which can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/$1; then lt_cv_path_MAGIC_CMD="$ac_dir/$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac]) MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program which can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test "$withval" = no || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux* | k*bsd*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method == "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi]) if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :) AC_SUBST([DUMPBIN]) if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM="-lm") ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ const struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS="$lt_save_LIBS" CFLAGS="$lt_save_CFLAGS" else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= AC_MSG_CHECKING([for $compiler option to produce PIC]) m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC*) # IBM XL 8.0 on PPC _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; pgcc* | pgf77* | pgf90* | pgf95*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl*) # IBM XL C 8.0/Fortran 10.1 on PPC _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Sun\ F*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes if test "$with_gnu_ld" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.9.1, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to modify your PATH *** so that a non-GNU linker is found, and then restart. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag= tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; freebsd1*) _LT_TAGVAR(ld_shlibs, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; hpux10*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" AC_LINK_IFELSE(int foo(void) {}, _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ) LDFLAGS="$save_LDFLAGS" else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' else case $host_os in openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' ;; esac fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_MSG_CHECKING([whether -lc should be explicitly linked in]) $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then _LT_TAGVAR(archive_cmds_need_lc, $1)=no else _LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)]) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], [[If ld is used when linking, flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting ${shlibpath_var} if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [fix_srcfile_path], [1], [Fix the shell variable $srcfile for the compiler]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC="$CC" AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report which library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC="$lt_save_CC" ])# _LT_LANG_C_CONFIG # _LT_PROG_CXX # ------------ # Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++ # compiler, we have our own version here. m4_defun([_LT_PROG_CXX], [ pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes]) AC_PROG_CXX if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_CXX dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_CXX], []) # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [AC_REQUIRE([_LT_PROG_CXX])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared # libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd[[12]]*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; gnu*) ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 will use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; xl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd2*) # C++ shared libraries are fairly broken _LT_TAGVAR(ld_shlibs, $1)=no ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=echo else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='echo' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(GCC, $1)="$GXX" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ]) dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue else prev= fi if test "$pre_test_object_deps_done" = no; then case $p in -L* | -R*) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)="${prev}${p}" else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" fi fi ;; *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)="$p" else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)="$p" else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_PROG_F77 # ------------ # Since AC_PROG_F77 is broken, in that it returns the empty string # if there is no fortran compiler, we have our own version here. m4_defun([_LT_PROG_F77], [ pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes]) AC_PROG_F77 if test -z "$F77" || test "X$F77" = "Xno"; then _lt_disable_F77=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_F77 dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_F77], []) # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_REQUIRE([_LT_PROG_F77])dnl AC_LANG_PUSH(Fortran 77) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_F77" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC CC=${F77-"f77"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$G77" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" fi # test "$_lt_disable_F77" != yes AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_PROG_FC # ----------- # Since AC_PROG_FC is broken, in that it returns the empty string # if there is no fortran compiler, we have our own version here. m4_defun([_LT_PROG_FC], [ pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes]) AC_PROG_FC if test -z "$FC" || test "X$FC" = "Xno"; then _lt_disable_FC=yes fi popdef([AC_MSG_ERROR]) ])# _LT_PROG_FC dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([_LT_PROG_FC], []) # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_REQUIRE([_LT_PROG_FC])dnl AC_LANG_PUSH(Fortran) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_FC" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC CC=${FC-"f95"} compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" fi # test "$_lt_disable_FC" != yes AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC="$lt_save_CC" ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code="$lt_simple_compile_test_code" # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC GCC= CC=${RC-"windres"} compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC="$lt_save_CC" ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f $lt_ac_sed && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test $lt_ac_count -gt 10 && break lt_ac_count=`expr $lt_ac_count + 1` if test $lt_ac_count -gt $lt_ac_max; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [AC_MSG_CHECKING([whether the shell understands some XSI constructs]) # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes AC_MSG_RESULT([$xsi_shell]) _LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) AC_MSG_CHECKING([whether the shell understands "+="]) lt_shell_append=no ( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes AC_MSG_RESULT([$lt_shell_append]) _LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PROG_XSI_SHELLFNS # --------------------- # Bourne and XSI compatible variants of some useful shell functions. m4_defun([_LT_PROG_XSI_SHELLFNS], [case $xsi_shell in yes) cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac } # func_basename file func_basename () { func_basename_result="${1##*/}" } # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}" } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). func_stripname () { # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"} } # func_opt_split func_opt_split () { func_opt_split_opt=${1%%=*} func_opt_split_arg=${1#*=} } # func_lo2o object func_lo2o () { case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac } # func_xform libobj-or-source func_xform () { func_xform_result=${1%.*}.lo } # func_arith arithmetic-term... func_arith () { func_arith_result=$(( $[*] )) } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=${#1} } _LT_EOF ;; *) # Bourne compatible functions. cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_basename file func_basename () { func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } dnl func_dirname_and_basename dnl A portable version of this function is already defined in general.m4sh dnl so there is no need for it here. # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac } # sed scripts: my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' my_sed_long_arg='1s/^-[[^=]]*=//' # func_opt_split func_opt_split () { func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` } # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` } # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'` } # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "$[@]"` } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` } _LT_EOF esac case $lt_shell_append in yes) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$[1]+=\$[2]" } _LT_EOF ;; *) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$[1]=\$$[1]\$[2]" } _LT_EOF ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option `$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl `shared' nor `disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) ]) ])# _LT_SET_OPTIONS # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [0], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the `shared' and # `disable-shared' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the `static' and # `disable-static' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the `fast-install' # and `disable-fast-install' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the `pic-only' and `no-pic' # LT_INIT options. # MODE is either `yes' or `no'. If omitted, it defaults to `both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [pic_mode="$withval"], [pic_mode=default]) test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) # ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59 which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) # ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # Generated from ltversion.in. # serial 3017 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.2.6b]) m4_define([LT_PACKAGE_REVISION], [1.3017]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.2.6b' macro_revision='1.3017' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) # lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 4 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_PATH)?$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # # Similar to PKG_CHECK_MODULES, make sure that the first instance of # this or PKG_CHECK_MODULES is called, or make sure to call # PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_ifval([$2], [$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD ifelse([$4], , [AC_MSG_ERROR(dnl [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT ])], [AC_MSG_RESULT([no]) $4]) elif test $pkg_failed = untried; then ifelse([$4], , [AC_MSG_FAILURE(dnl [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])], [$4]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) ifelse([$3], , :, [$3]) fi[]dnl ])# PKG_CHECK_MODULES # Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.11' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.11.1], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.11.1])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to # `$srcdir', `$srcdir/..', or `$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is `.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 9 # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ(2.52)dnl ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 10 # There are a few dirty hacks below to avoid letting `AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "GCJ", or "OBJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl ifelse([$1], CC, [depcc="$CC" am_compiler_list=], [$1], CXX, [depcc="$CXX" am_compiler_list=], [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], UPC, [depcc="$UPC" am_compiler_list=], [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE(dependency-tracking, [ --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. #serial 5 # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each `.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2008, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 16 # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.62])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) AM_MISSING_PROG(AUTOCONF, autoconf) AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) AM_MISSING_PROG(AUTOHEADER, autoheader) AM_MISSING_PROG(MAKEINFO, makeinfo) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AM_PROG_MKDIR_P])dnl # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES(CC)], [define([AC_PROG_CC], defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES(CXX)], [define([AC_PROG_CXX], defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES(OBJC)], [define([AC_PROG_OBJC], defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl ]) _AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl dnl The `parallel-tests' driver may need to know about EXEEXT, so add the dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl ]) dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST(install_sh)]) # Copyright (C) 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 6 # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it supports --run. # If it does, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= AC_MSG_WARN([`missing' script is too old or missing]) fi ]) # Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_MKDIR_P # --------------- # Check for `mkdir -p'. AC_DEFUN([AM_PROG_MKDIR_P], [AC_PREREQ([2.60])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, dnl while keeping a definition of mkdir_p for backward compatibility. dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of dnl Makefile.ins that do not define MKDIR_P, so we do our own dnl adjustment using top_builddir (which is defined more often than dnl MKDIR_P). AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl case $mkdir_p in [[\\/$]]* | ?:[[\\/]]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # ------------------------------ # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), 1)]) # _AM_SET_OPTIONS(OPTIONS) # ---------------------------------- # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # --------------------------------------------------------------------------- # Adds support for distributing Python modules and packages. To # install modules, copy them to $(pythondir), using the python_PYTHON # automake variable. To install a package with the same name as the # automake package, install to $(pkgpythondir), or use the # pkgpython_PYTHON automake variable. # # The variables $(pyexecdir) and $(pkgpyexecdir) are provided as # locations to install python extension modules (shared libraries). # Another macro is required to find the appropriate flags to compile # extension modules. # # If your package is configured with a different prefix to python, # users will have to add the install directory to the PYTHONPATH # environment variable, or create a .pth file (see the python # documentation for details). # # If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will # cause an error if the version of python installed on the system # doesn't meet the requirement. MINIMUM-VERSION should consist of # numbers and dots only. AC_DEFUN([AM_PATH_PYTHON], [ dnl Find a Python interpreter. Python versions prior to 2.0 are not dnl supported. (2.0 was released on October 16, 2000). m4_define_default([_AM_PYTHON_INTERPRETER_LIST], [python python2 python3 python3.0 python2.5 python2.4 python2.3 python2.2 dnl python2.1 python2.0]) m4_if([$1],[],[ dnl No version check is needed. # Find any Python interpreter. if test -z "$PYTHON"; then AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :) fi am_display_PYTHON=python ], [ dnl A version check is needed. if test -n "$PYTHON"; then # If the user set $PYTHON, use it and don't search something else. AC_MSG_CHECKING([whether $PYTHON version >= $1]) AM_PYTHON_CHECK_VERSION([$PYTHON], [$1], [AC_MSG_RESULT(yes)], [AC_MSG_ERROR(too old)]) am_display_PYTHON=$PYTHON else # Otherwise, try each interpreter until we find one that satisfies # VERSION. AC_CACHE_CHECK([for a Python interpreter with version >= $1], [am_cv_pathless_PYTHON],[ for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do test "$am_cv_pathless_PYTHON" = none && break AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break]) done]) # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. if test "$am_cv_pathless_PYTHON" = none; then PYTHON=: else AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON]) fi am_display_PYTHON=$am_cv_pathless_PYTHON fi ]) if test "$PYTHON" = :; then dnl Run any user-specified action, or abort. m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])]) else dnl Query Python for its version number. Getting [:3] seems to be dnl the best way to do this; it's what "site.py" does in the standard dnl library. AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version], [am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`]) AC_SUBST([PYTHON_VERSION], [$am_cv_python_version]) dnl Use the values of $prefix and $exec_prefix for the corresponding dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made dnl distinct variables so they can be overridden if need be. However, dnl general consensus is that you shouldn't need this ability. AC_SUBST([PYTHON_PREFIX], ['${prefix}']) AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}']) dnl At times (like when building shared libraries) you may want dnl to know which OS platform Python thinks this is. AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform], [am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`]) AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform]) dnl Set up 4 directories: dnl pythondir -- where to install python scripts. This is the dnl site-packages directory, not the python standard library dnl directory like in previous automake betas. This behavior dnl is more consistent with lispdir.m4 for example. dnl Query distutils for this directory. distutils does not exist in dnl Python 1.5, so we fall back to the hardcoded directory if it dnl doesn't work. AC_CACHE_CHECK([for $am_display_PYTHON script directory], [am_cv_python_pythondir], [if test "x$prefix" = xNONE then am_py_prefix=$ac_default_prefix else am_py_prefix=$prefix fi am_cv_python_pythondir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(0,0,prefix='$am_py_prefix'))" 2>/dev/null || echo "$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages"` case $am_cv_python_pythondir in $am_py_prefix*) am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` ;; *) case $am_py_prefix in /usr|/System*) ;; *) am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pythondir], [$am_cv_python_pythondir]) dnl pkgpythondir -- $PACKAGE directory under pythondir. Was dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is dnl more consistent with the rest of automake. AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE]) dnl pyexecdir -- directory for installing python extension modules dnl (shared libraries) dnl Query distutils for this directory. distutils does not exist in dnl Python 1.5, so we fall back to the hardcoded directory if it dnl doesn't work. AC_CACHE_CHECK([for $am_display_PYTHON extension module directory], [am_cv_python_pyexecdir], [if test "x$exec_prefix" = xNONE then am_py_exec_prefix=$am_py_prefix else am_py_exec_prefix=$exec_prefix fi am_cv_python_pyexecdir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(1,0,prefix='$am_py_exec_prefix'))" 2>/dev/null || echo "$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages"` case $am_cv_python_pyexecdir in $am_py_exec_prefix*) am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` ;; *) case $am_py_exec_prefix in /usr|/System*) ;; *) am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir]) dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE) AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE]) dnl Run any user-specified action. $2 fi ]) # AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) # --------------------------------------------------------------------------- # Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION. # Run ACTION-IF-FALSE otherwise. # This test uses sys.hexversion instead of the string equivalent (first # word of sys.version), in order to cope with versions such as 2.2c1. # This supports Python 2.0 or higher. (2.0 was released on October 16, 2000). AC_DEFUN([AM_PYTHON_CHECK_VERSION], [prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]] sys.exit(sys.hexversion < minverhex)" AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT(yes)]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor `install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in `make install-strip', and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be `maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of `v7', `ustar', or `pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. AM_MISSING_PROG([AMTAR], [tar]) m4_if([$1], [v7], [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], [m4_case([$1], [ustar],, [pax],, [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' _am_tools=${am_cv_prog_tar_$1-$_am_tools} # Do not fold the above two line into one, because Tru64 sh and # Solaris sh will not grok spaces in the rhs of `-'. for _am_tool in $_am_tools do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/libparted.m4]) m4_include([m4/python.m4]) pyparted-3.6/config.guess0000755000076400007640000012761511542323606012501 00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 # Free Software Foundation, Inc. timestamp='2009-11-20' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA # 02110-1301, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Per Bothner. Please send patches (context # diff format) to and include a ChangeLog # entry. # # This script attempts to guess a canonical system name similar to # config.sub. If it succeeds, it prints the system name on stdout, and # exits with 0. Otherwise, it exits with 1. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently, or will in the future. case "${UNAME_MACHINE_ARCH}" in arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE="alpha" ;; "EV4.5 (21064)") UNAME_MACHINE="alpha" ;; "LCA4 (21066/21068)") UNAME_MACHINE="alpha" ;; "EV5 (21164)") UNAME_MACHINE="alphaev5" ;; "EV5.6 (21164A)") UNAME_MACHINE="alphaev56" ;; "EV5.6 (21164PC)") UNAME_MACHINE="alphapca56" ;; "EV5.7 (21164PC)") UNAME_MACHINE="alphapca57" ;; "EV6 (21264)") UNAME_MACHINE="alphaev6" ;; "EV6.7 (21264A)") UNAME_MACHINE="alphaev67" ;; "EV6.8CB (21264C)") UNAME_MACHINE="alphaev68" ;; "EV6.8AL (21264B)") UNAME_MACHINE="alphaev68" ;; "EV6.8CX (21264D)") UNAME_MACHINE="alphaev68" ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE="alphaev69" ;; "EV7 (21364)") UNAME_MACHINE="alphaev7" ;; "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` exit ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm:riscos:*:*|arm:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH="i386" # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH="x86_64" fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[456]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH="hppa2.0n" ;; 64) HP_ARCH="hppa2.0w" ;; '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = "hppa2.0w" ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH="hppa2.0w" else HP_ARCH="hppa64" fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) case ${UNAME_MACHINE} in pc98) echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-gnu else echo ${UNAME_MACHINE}-unknown-linux-gnueabi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; cris:Linux:*:*) echo cris-axis-linux-gnu exit ;; crisv32:Linux:*:*) echo crisv32-axis-linux-gnu exit ;; frv:Linux:*:*) echo frv-unknown-linux-gnu exit ;; i*86:Linux:*:*) LIBC=gnu eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __dietlibc__ LIBC=dietlibc #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` echo "${UNAME_MACHINE}-pc-linux-${LIBC}" exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } ;; or32:Linux:*:*) echo or32-unknown-linux-gnu exit ;; padre:Linux:*:*) echo sparc-unknown-linux-gnu exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-gnu exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-gnu ;; PA8*) echo hppa2.0-unknown-linux-gnu ;; *) echo hppa-unknown-linux-gnu ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-gnu exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-gnu exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-gnu exit ;; x86_64:Linux:*:*) echo x86_64-unknown-linux-gnu exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-gnu exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configury will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown case $UNAME_PROCESSOR in i386) eval $set_cc_for_build if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then UNAME_PROCESSOR="x86_64" fi fi ;; unknown) UNAME_PROCESSOR=powerpc ;; esac echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = "x86"; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NSE-?:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = "386"; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; esac #echo '(No uname command or uname output not recognized.)' 1>&2 #echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 eval $set_cc_for_build cat >$dummy.c < # include #endif main () { #if defined (sony) #if defined (MIPSEB) /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, I don't know.... */ printf ("mips-sony-bsd\n"); exit (0); #else #include printf ("m68k-sony-newsos%s\n", #ifdef NEWSOS4 "4" #else "" #endif ); exit (0); #endif #endif #if defined (__arm) && defined (__acorn) && defined (__unix) printf ("arm-acorn-riscix\n"); exit (0); #endif #if defined (hp300) && !defined (hpux) printf ("m68k-hp-bsd\n"); exit (0); #endif #if defined (NeXT) #if !defined (__ARCHITECTURE__) #define __ARCHITECTURE__ "m68k" #endif int version; version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; if (version < 4) printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); else printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); exit (0); #endif #if defined (MULTIMAX) || defined (n16) #if defined (UMAXV) printf ("ns32k-encore-sysv\n"); exit (0); #else #if defined (CMU) printf ("ns32k-encore-mach\n"); exit (0); #else printf ("ns32k-encore-bsd\n"); exit (0); #endif #endif #endif #if defined (__386BSD__) printf ("i386-pc-bsd\n"); exit (0); #endif #if defined (sequent) #if defined (i386) printf ("i386-sequent-dynix\n"); exit (0); #endif #if defined (ns32000) printf ("ns32k-sequent-dynix\n"); exit (0); #endif #endif #if defined (_SEQUENT_) struct utsname un; uname(&un); if (strncmp(un.version, "V2", 2) == 0) { printf ("i386-sequent-ptx2\n"); exit (0); } if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ printf ("i386-sequent-ptx1\n"); exit (0); } printf ("i386-sequent-ptx\n"); exit (0); #endif #if defined (vax) # if !defined (ultrix) # include # if defined (BSD) # if BSD == 43 printf ("vax-dec-bsd4.3\n"); exit (0); # else # if BSD == 199006 printf ("vax-dec-bsd4.3reno\n"); exit (0); # else printf ("vax-dec-bsd\n"); exit (0); # endif # endif # else printf ("vax-dec-bsd\n"); exit (0); # endif # else printf ("vax-dec-ultrix\n"); exit (0); # endif #endif #if defined (alliant) && defined (i860) printf ("i860-alliant-bsd\n"); exit (0); #endif exit (1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } # Convex versions that predate uname can use getsysinfo(1) if [ -x /usr/convex/getsysinfo ] then case `getsysinfo -f cpu_type` in c1*) echo c1-convex-bsd exit ;; c2*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; c34*) echo c34-convex-bsd exit ;; c38*) echo c38-convex-bsd exit ;; c4*) echo c4-convex-bsd exit ;; esac fi cat >&2 < in order to provide the needed information to handle your system. config.guess timestamp = $timestamp uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: pyparted-3.6/configure0000775000076400007640000154260111542323604012065 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.63 for pyparted 3.6. # # Report bugs to . # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH if test "x$CONFIG_SHELL" = x; then if (eval ":") 2>/dev/null; then as_have_required=yes else as_have_required=no fi if test $as_have_required = yes && (eval ": (as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=\$LINENO as_lineno_2=\$LINENO test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } ") 2> /dev/null; then : else as_candidate_shells= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. case $as_dir in /*) for as_base in sh bash ksh sh5; do as_candidate_shells="$as_candidate_shells $as_dir/$as_base" done;; esac done IFS=$as_save_IFS for as_shell in $as_candidate_shells $SHELL; do # Try only shells that exist, to save several forks. if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { ("$as_shell") 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : _ASEOF }; then CONFIG_SHELL=$as_shell as_have_required=yes if { "$as_shell" 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : (as_func_return () { (exit $1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = "$1" ); then : else exitcode=1 echo positional parameters were not saved. fi test $exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } _ASEOF }; then break fi fi done if test "x$CONFIG_SHELL" != x; then for as_var in BASH_ENV ENV do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done export CONFIG_SHELL exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi if test $as_have_required = no; then echo This script requires a shell more modern than all the echo shells that I found on your system. Please install a echo modern shell, or manually run the script under such a echo shell if you do have one. { (exit 1); exit 1; } fi fi fi (eval "as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0") || { echo No shell found that supports shell functions. echo Please tell bug-autoconf@gnu.org about your system, echo including any error possibly output before this message. echo This can help us improve future autoconf versions. echo Configuration will now proceed without shell functions. } as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} case X$lt_ECHO in X*--fallback-echo) # Remove one level of quotation (which was required for Make). ECHO=`echo "$lt_ECHO" | sed 's,\\\\\$\\$0,'$0','` ;; esac ECHO=${lt_ECHO-echo} if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X$1" = X--fallback-echo; then # Avoid inline document here, it may be left over : elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then # Yippee, $ECHO works! : else # Restart under the correct shell. exec $SHELL "$0" --no-reexec ${1+"$@"} fi if test "X$1" = X--fallback-echo; then # used as fallback echo shift cat <<_LT_EOF $* _LT_EOF exit 0 fi # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH if test -z "$lt_ECHO"; then if test "X${echo_test_string+set}" != Xset; then # find a string as large as possible, as long as the shell can cope with it for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... if { echo_test_string=`eval $cmd`; } 2>/dev/null && { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null then break fi done fi if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then : else # The Solaris, AIX, and Digital Unix default echo programs unquote # backslashes. This makes it impossible to quote backslashes using # echo "$something" | sed 's/\\/\\\\/g' # # So, first we look for a working echo in the user's PATH. lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for dir in $PATH /usr/ucb; do IFS="$lt_save_ifs" if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$dir/echo" break fi done IFS="$lt_save_ifs" if test "X$ECHO" = Xecho; then # We didn't find a better echo, so look for alternatives. if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # This shell has a builtin print -r that does the trick. ECHO='print -r' elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && test "X$CONFIG_SHELL" != X/bin/ksh; then # If we have ksh, try running configure again with it. ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} export ORIGINAL_CONFIG_SHELL CONFIG_SHELL=/bin/ksh export CONFIG_SHELL exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} else # Try using printf. ECHO='printf %s\n' if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then # Cool, printf works : elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL export CONFIG_SHELL SHELL="$CONFIG_SHELL" export SHELL ECHO="$CONFIG_SHELL $0 --fallback-echo" elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && test "X$echo_testing_string" = 'X\t' && echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && test "X$echo_testing_string" = "X$echo_test_string"; then ECHO="$CONFIG_SHELL $0 --fallback-echo" else # maybe with a smaller string... prev=: for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null then break fi prev="$cmd" done if test "$prev" != 'sed 50q "$0"'; then echo_test_string=`eval $prev` export echo_test_string exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} else # Oops. We lost completely, so just stick with echo. ECHO=echo fi fi fi fi fi fi # Copy echo and quote the copy suitably for passing to libtool from # the Makefile, instead of quoting the original, which is used later. lt_ECHO=$ECHO if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" fi exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='pyparted' PACKAGE_TARNAME='pyparted' PACKAGE_VERSION='3.6' PACKAGE_STRING='pyparted 3.6' PACKAGE_BUGREPORT='pyparted-devel@redhat.com' ac_unique_file="src/_pedmodule.c" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS LIBOBJS LIBPARTED_LIBS libparted_LIBS libparted_CFLAGS PKG_CONFIG PYTHON_LDFLAGS PYTHON_EMBED_LIBS PYTHON_LIBS PYTHON_INCLUDES pkgpyexecdir pyexecdir pkgpythondir pythondir PYTHON_PLATFORM PYTHON_EXEC_PREFIX PYTHON_PREFIX PYTHON_VERSION PYTHON OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL lt_ECHO RANLIB AR OBJDUMP LN_S NM ac_ct_DUMPBIN DUMPBIN LD FGREP SED host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL EGREP GREP CPP am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_static enable_dependency_tracking enable_shared with_pic enable_fast_install with_gnu_ld enable_libtool_lock ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP PKG_CONFIG libparted_CFLAGS libparted_LIBS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) { $as_echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { $as_echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2 { (exit 1); exit 1; }; } ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; } done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || { $as_echo "$as_me: error: working directory cannot be determined" >&2 { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || { $as_echo "$as_me: error: pwd does not report name of working directory" >&2 { (exit 1); exit 1; }; } # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2 { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures pyparted 3.6 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/pyparted] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of pyparted 3.6:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-static[=PKGS] build static libraries [default=no] --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors --enable-shared[=PKGS] build shared libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic try to use only PIC/non-PIC objects [default=use both] --with-gnu-ld assume the C compiler uses GNU ld [default=no] Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor PKG_CONFIG path to pkg-config utility libparted_CFLAGS C compiler flags for libparted, overriding pkg-config libparted_LIBS linker flags for libparted, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF pyparted configure 3.6 generated by GNU Autoconf 2.63 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by pyparted $as_me 3.6, which was generated by GNU Autoconf 2.63. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac done done $as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } $as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo cat <<\_ASBOX ## ---------------- ## ## Cache variables. ## ## ---------------- ## _ASBOX echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo cat <<\_ASBOX ## ----------------- ## ## Output variables. ## ## ----------------- ## _ASBOX echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then cat <<\_ASBOX ## ------------------- ## ## File substitutions. ## ## ------------------- ## _ASBOX echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then cat <<\_ASBOX ## ----------- ## ## confdefs.h. ## ## ----------- ## _ASBOX echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then ac_site_file1=$CONFIG_SITE elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test -r "$ac_site_file"; then { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 $as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu am__api_version='1.11' ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 $as_echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} { (exit 1); exit 1; }; } fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if test "${ac_cv_path_install+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:$LINENO: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) { { $as_echo "$as_me:$LINENO: error: unsafe absolute working directory name" >&5 $as_echo "$as_me: error: unsafe absolute working directory name" >&2;} { (exit 1); exit 1; }; };; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) { { $as_echo "$as_me:$LINENO: error: unsafe srcdir value: \`$srcdir'" >&5 $as_echo "$as_me: error: unsafe srcdir value: \`$srcdir'" >&2;} { (exit 1); exit 1; }; };; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". { { $as_echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&5 $as_echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken alias in your environment" >&2;} { (exit 1); exit 1; }; } fi test "$2" = conftest.file ) then # Ok. : else { { $as_echo "$as_me:$LINENO: error: newly created file is older than distributed files! Check your system clock" >&5 $as_echo "$as_me: error: newly created file is older than distributed files! Check your system clock" >&2;} { (exit 1); exit 1; }; } fi { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= { $as_echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} fi if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:$LINENO: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if test "${ac_cv_path_mkdir+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. test -d ./--version && rmdir ./--version MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:$LINENO: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } mkdir_p="$MKDIR_P" case $mkdir_p in [\\/$]* | ?:[\\/]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AWK+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:$LINENO: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then { { $as_echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5 $as_echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;} { (exit 1); exit 1; }; } fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='pyparted' VERSION='3.6' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # We need awk for the "check" target. The system "awk" is bad on # some platforms. # Always define AMTAR for backward compatibility. AMTAR=${AMTAR-"${am_missing_run}tar"} am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' ac_config_headers="$ac_config_headers config.h" ac_config_files="$ac_config_files Makefile include/Makefile include/docstrings/Makefile include/typeobjects/Makefile src/Makefile src/parted/Makefile tests/Makefile tests/_ped/Makefile tests/parted/Makefile" cat >>confdefs.h <<_ACEOF #define BUILD_DATE "`date +%m%d%Y`" _ACEOF # Check whether --enable-static was given. if test "${enable_static+set}" = set; then enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac else enable_static=no fi DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:$LINENO: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&5 $as_echo "$as_me: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } # Provide some information about the compiler. $as_echo "$as_me:$LINENO: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { (ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi { $as_echo "$as_me:$LINENO: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } if test -z "$ac_file"; then $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: C compiler cannot create executables See \`config.log' for more details." >&5 $as_echo "$as_me: error: C compiler cannot create executables See \`config.log' for more details." >&2;} { (exit 77); exit 77; }; }; } fi ac_exeext=$ac_cv_exeext # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:$LINENO: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } # FIXME: These cross compiler hacks should be removed for Autoconf 3.0 # If not cross compiling, check that we can run a simple program. if test "$cross_compiling" != yes; then if { ac_try='./$ac_file' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi fi fi { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } { $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } { $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi rm -f conftest$ac_cv_exeext { $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT { $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if test "${ac_cv_objext+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&5 $as_echo "$as_me: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if test "${ac_cv_c_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if test "${ac_cv_prog_cc_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CFLAGS="" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if test "${ac_cv_prog_CPP+set}" = set; then $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:$LINENO: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Broken: fails on valid input. continue fi rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then # Broken: success on invalid input. continue else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&5 $as_echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&5 $as_echo "$as_me: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } # Provide some information about the compiler. $as_echo "$as_me:$LINENO: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if test "${ac_cv_c_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if test "${ac_cv_prog_cc_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CFLAGS="" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi { $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if test "${ac_cv_path_GREP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:$LINENO: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if test "${ac_cv_path_EGREP+set}" = set; then $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if test "${ac_cv_header_stdc+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_header_stdc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF rm -f conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { ac_try='./conftest$ac_exeext' { (case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then : else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ( exit $ac_status ) ac_cv_header_stdc=no fi rm -rf conftest.dSYM rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then cat >>confdefs.h <<\_ACEOF #define STDC_HEADERS 1 _ACEOF fi case `pwd` in *\ * | *\ *) { $as_echo "$as_me:$LINENO: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.2.6b' macro_revision='1.3017' ltmain="$ac_aux_dir/ltmain.sh" # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || { { $as_echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 $as_echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if test "${ac_cv_build+set}" = set; then $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && { { $as_echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 $as_echo "$as_me: error: cannot guess build type; you must specify one" >&2;} { (exit 1); exit 1; }; } ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 $as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} { (exit 1); exit 1; }; } fi { $as_echo "$as_me:$LINENO: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 $as_echo "$as_me: error: invalid value of canonical build" >&2;} { (exit 1); exit 1; }; };; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:$LINENO: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if test "${ac_cv_host+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 $as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} { (exit 1); exit 1; }; } fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 $as_echo "$as_me: error: invalid value of canonical host" >&2;} { (exit 1); exit 1; }; };; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if test "${ac_cv_path_SED+set}" = set; then $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed $as_unset ac_script || ac_script= if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then { { $as_echo "$as_me:$LINENO: error: no acceptable sed could be found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable sed could be found in \$PATH" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:$LINENO: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if test "${ac_cv_path_FGREP+set}" = set; then $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then { { $as_echo "$as_me:$LINENO: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 $as_echo "$as_me: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:$LINENO: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:$LINENO: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${lt_cv_path_LD+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 $as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} { (exit 1); exit 1; }; } { $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${lt_cv_prog_gnu_ld+set}" = set; then $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:$LINENO: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if test "${lt_cv_path_NM+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$ac_tool_prefix"; then for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DUMPBIN+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:$LINENO: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in "dumpbin -symbols" "link -dump -symbols" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:$LINENO: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if test "${lt_cv_nm_interface+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:5663: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:5666: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:5669: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } { $as_echo "$as_me:$LINENO: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # find the maximum length of command line arguments { $as_echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if test "${lt_cv_sys_max_cmd_len+set}" = set; then $as_echo_n "(cached) " >&6 else i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`$SHELL $0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ = "XX$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then { $as_echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:$LINENO: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} { $as_echo "$as_me:$LINENO: checking whether the shell understands some XSI constructs" >&5 $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes { $as_echo "$as_me:$LINENO: result: $xsi_shell" >&5 $as_echo "$xsi_shell" >&6; } { $as_echo "$as_me:$LINENO: checking whether the shell understands \"+=\"" >&5 $as_echo_n "checking whether the shell understands \"+=\"... " >&6; } lt_shell_append=no ( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes { $as_echo "$as_me:$LINENO: result: $lt_shell_append" >&5 $as_echo "$lt_shell_append" >&6; } if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if test "${lt_cv_ld_reload_flag+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in darwin*) if test "$GCC" = yes; then reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:$LINENO: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if test "${lt_cv_deplibs_check_method+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # `unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # which responds to the $file_magic_cmd with a given extended regex. # If you have `file' or equivalent on your system and you're not sure # whether `pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be Linux ELF. linux* | k*bsd*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_AR+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:$LINENO: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_AR+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi test -z "$AR" && AR=ar test -z "$AR_FLAGS" && AR_FLAGS=cru if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:$LINENO: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_RANLIB+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:$LINENO: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Now try to grab the symbols. nlist=conftest.nm if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ const struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" lt_save_CFLAGS="$CFLAGS" LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS="$lt_save_LIBS" CFLAGS="$lt_save_CFLAGS" else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:$LINENO: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:$LINENO: result: ok" >&5 $as_echo "ok" >&6; } fi # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then enableval=$enable_libtool_lock; fi test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '#line 6874 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if test "${lt_cv_cc_needs_belf+set}" = set; then $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_cv_cc_needs_belf=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 lt_cv_cc_needs_belf=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; sparc*-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_DSYMUTIL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:$LINENO: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_NMEDIT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:$LINENO: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_LIPO+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:$LINENO: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:$LINENO: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_OTOOL64+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:$LINENO: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:$LINENO: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if test "${lt_cv_apple_cc_single_mod+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:$LINENO: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:$LINENO: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if test "${lt_cv_ld_exported_symbols_list+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_cv_ld_exported_symbols_list=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 lt_cv_ld_exported_symbols_list=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:$LINENO: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[012]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then eval "$as_ac_Header=yes" else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac else enable_shared=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then withval=$with_pic; pic_mode="$withval" else pic_mode=default fi test -z "$pic_mode" && pic_mode=default # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac else enable_fast_install=yes fi # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:$LINENO: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if test "${lt_cv_objdir+set}" = set; then $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/${ac_tool_prefix}file; then lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:$LINENO: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if test "${lt_cv_path_MAGIC_CMD+set}" = set; then $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/file; then lt_cv_path_MAGIC_CMD="$ac_dir/file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC="$CC" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then lt_prog_compiler_no_builtin_flag=' -fno-builtin' { $as_echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:8282: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:8286: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= { $as_echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if test "$GCC" = yes; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; pgcc* | pgf77* | pgf90* | pgf95*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl*) # IBM XL C 8.0/Fortran 10.1 on PPC lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Sun\ F*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 $as_echo "$lt_prog_compiler_pic" >&6; } # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if test "${lt_cv_prog_compiler_pic_works+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:8621: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:8625: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if test "${lt_cv_prog_compiler_static_works+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:8726: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:8730: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if test "${lt_cv_prog_compiler_c_o+set}" = set; then $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:8781: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:8785: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:$LINENO: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_flag_spec_ld= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.9.1, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to modify your PATH *** so that a non-GNU linker is found, and then restart. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag= tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; xl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec= hardcode_libdir_flag_spec_ld='-rpath $libdir' archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/ p } }' aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' ${wl}-bernotok' allow_undefined_flag=' ${wl}-berok' # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' archive_cmds_need_lc=yes # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' fix_srcfile_path='`cygpath -w "$srcfile"`' enable_shared_with_static_runtimes=yes ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported whole_archive_flag_spec='' link_all_deplibs=yes allow_undefined_flag="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=echo archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; freebsd1*) ld_shlibs=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test "$GCC" = yes; then archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='${wl}-E' ;; hpux10*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_flag_spec_ld='+b $libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$GCC" = yes -a "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" cat >conftest.$ac_ext <<_ACEOF int foo(void) {} _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' else case $host_os in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-R$libdir' ;; *) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='${wl}-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='${wl}-z,text' allow_undefined_flag='${wl}-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:$LINENO: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test "$ld_shlibs" = no && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\"") >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } then archive_cmds_need_lc=no else archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* { $as_echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 $as_echo "$archive_cmds_need_lc" >&6; } ;; esac fi ;; esac { $as_echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` else lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac ;; *) library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ;; esac dynamic_linker='Win32 ld.exe' # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd1*) dynamic_linker=no ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[123]*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555. postinstall_cmds='chmod 555 $lib' ;; interix[3-9]*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be Linux ELF. linux* | k*bsd*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then shlibpath_overrides_runpath=yes fi else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:$LINENO: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test "X$hardcode_automatic" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && test "$hardcode_minus_L" != no; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:$LINENO: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test "$hardcode_action" = relink || test "$inherit_rpath" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dl_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; *) { $as_echo "$as_me:$LINENO: checking for shl_load" >&5 $as_echo_n "checking for shl_load... " >&6; } if test "${ac_cv_func_shl_load+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define shl_load to an innocuous variant, in case declares shl_load. For example, HP-UX 11i declares gettimeofday. */ #define shl_load innocuous_shl_load /* System header to define __stub macros and hopefully few prototypes, which can conflict with char shl_load (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef shl_load /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_shl_load || defined __stub___shl_load choke me #endif int main () { return shl_load (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_shl_load=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_shl_load=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 $as_echo "$ac_cv_func_shl_load" >&6; } if test "x$ac_cv_func_shl_load" = x""yes; then lt_cv_dlopen="shl_load" else { $as_echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if test "${ac_cv_lib_dld_shl_load+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dld_shl_load=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_shl_load=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = x""yes; then lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else { $as_echo "$as_me:$LINENO: checking for dlopen" >&5 $as_echo_n "checking for dlopen... " >&6; } if test "${ac_cv_func_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Define dlopen to an innocuous variant, in case declares dlopen. For example, HP-UX 11i declares gettimeofday. */ #define dlopen innocuous_dlopen /* System header to define __stub macros and hopefully few prototypes, which can conflict with char dlopen (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef dlopen /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_dlopen || defined __stub___dlopen choke me #endif int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_func_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_func_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 $as_echo "$ac_cv_func_dlopen" >&6; } if test "x$ac_cv_func_dlopen" = x""yes; then lt_cv_dlopen="dlopen" else { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if test "${ac_cv_lib_dl_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dl_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dl_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else { $as_echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if test "${ac_cv_lib_svld_dlopen+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_svld_dlopen=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_svld_dlopen=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = x""yes; then lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" else { $as_echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if test "${ac_cv_lib_dld_dld_link+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_dld_dld_link=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_dld_dld_link=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = x""yes; then lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi fi fi fi fi fi ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 11584 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if test "${lt_cv_dlopen_self_static+set}" = set; then $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line 11680 "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif void fnord() { int i=42;} int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report which library types will actually be built { $as_echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:$LINENO: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac { $as_echo "$as_me:$LINENO: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:$LINENO: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes { $as_echo "$as_me:$LINENO: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC="$lt_save_CC" ac_config_commands="$ac_config_commands libtool" # Only expand once: if test -n "$PYTHON"; then # If the user set $PYTHON, use it and don't search something else. { $as_echo "$as_me:$LINENO: checking whether $PYTHON version >= 2.7" >&5 $as_echo_n "checking whether $PYTHON version >= 2.7... " >&6; } prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '2.7'.split('.'))) + [0, 0, 0] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] sys.exit(sys.hexversion < minverhex)" if { echo "$as_me:$LINENO: $PYTHON -c "$prog"" >&5 ($PYTHON -c "$prog") >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { { $as_echo "$as_me:$LINENO: error: too old" >&5 $as_echo "$as_me: error: too old" >&2;} { (exit 1); exit 1; }; } fi am_display_PYTHON=$PYTHON else # Otherwise, try each interpreter until we find one that satisfies # VERSION. { $as_echo "$as_me:$LINENO: checking for a Python interpreter with version >= 2.7" >&5 $as_echo_n "checking for a Python interpreter with version >= 2.7... " >&6; } if test "${am_cv_pathless_PYTHON+set}" = set; then $as_echo_n "(cached) " >&6 else for am_cv_pathless_PYTHON in python python2 python3 python3.0 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do test "$am_cv_pathless_PYTHON" = none && break prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '2.7'.split('.'))) + [0, 0, 0] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] sys.exit(sys.hexversion < minverhex)" if { echo "$as_me:$LINENO: $am_cv_pathless_PYTHON -c "$prog"" >&5 ($am_cv_pathless_PYTHON -c "$prog") >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then break fi done fi { $as_echo "$as_me:$LINENO: result: $am_cv_pathless_PYTHON" >&5 $as_echo "$am_cv_pathless_PYTHON" >&6; } # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. if test "$am_cv_pathless_PYTHON" = none; then PYTHON=: else # Extract the first word of "$am_cv_pathless_PYTHON", so it can be a program name with args. set dummy $am_cv_pathless_PYTHON; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_PYTHON+set}" = set; then $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:$LINENO: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi am_display_PYTHON=$am_cv_pathless_PYTHON fi if test "$PYTHON" = :; then { { $as_echo "$as_me:$LINENO: error: no suitable Python interpreter found" >&5 $as_echo "$as_me: error: no suitable Python interpreter found" >&2;} { (exit 1); exit 1; }; } else { $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON version" >&5 $as_echo_n "checking for $am_display_PYTHON version... " >&6; } if test "${am_cv_python_version+set}" = set; then $as_echo_n "(cached) " >&6 else am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"` fi { $as_echo "$as_me:$LINENO: result: $am_cv_python_version" >&5 $as_echo "$am_cv_python_version" >&6; } PYTHON_VERSION=$am_cv_python_version PYTHON_PREFIX='${prefix}' PYTHON_EXEC_PREFIX='${exec_prefix}' { $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON platform" >&5 $as_echo_n "checking for $am_display_PYTHON platform... " >&6; } if test "${am_cv_python_platform+set}" = set; then $as_echo_n "(cached) " >&6 else am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"` fi { $as_echo "$as_me:$LINENO: result: $am_cv_python_platform" >&5 $as_echo "$am_cv_python_platform" >&6; } PYTHON_PLATFORM=$am_cv_python_platform { $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON script directory" >&5 $as_echo_n "checking for $am_display_PYTHON script directory... " >&6; } if test "${am_cv_python_pythondir+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$prefix" = xNONE then am_py_prefix=$ac_default_prefix else am_py_prefix=$prefix fi am_cv_python_pythondir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(0,0,prefix='$am_py_prefix'))" 2>/dev/null || echo "$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages"` case $am_cv_python_pythondir in $am_py_prefix*) am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` ;; *) case $am_py_prefix in /usr|/System*) ;; *) am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac fi { $as_echo "$as_me:$LINENO: result: $am_cv_python_pythondir" >&5 $as_echo "$am_cv_python_pythondir" >&6; } pythondir=$am_cv_python_pythondir pkgpythondir=\${pythondir}/$PACKAGE { $as_echo "$as_me:$LINENO: checking for $am_display_PYTHON extension module directory" >&5 $as_echo_n "checking for $am_display_PYTHON extension module directory... " >&6; } if test "${am_cv_python_pyexecdir+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$exec_prefix" = xNONE then am_py_exec_prefix=$am_py_prefix else am_py_exec_prefix=$exec_prefix fi am_cv_python_pyexecdir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(1,0,prefix='$am_py_exec_prefix'))" 2>/dev/null || echo "$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages"` case $am_cv_python_pyexecdir in $am_py_exec_prefix*) am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` ;; *) case $am_py_exec_prefix in /usr|/System*) ;; *) am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac fi { $as_echo "$as_me:$LINENO: result: $am_cv_python_pyexecdir" >&5 $as_echo "$am_cv_python_pyexecdir" >&6; } pyexecdir=$am_cv_python_pyexecdir pkgpyexecdir=\${pyexecdir}/$PACKAGE fi { $as_echo "$as_me:$LINENO: checking for headers required to compile python extensions" >&5 $as_echo_n "checking for headers required to compile python extensions... " >&6; } py_prefix=`$PYTHON -c "import sys; print sys.prefix"` py_exec_prefix=`$PYTHON -c "import sys; print sys.exec_prefix"` if test -x "$PYTHON-config"; then PYTHON_INCLUDES=`$PYTHON-config --includes 2>/dev/null` else PYTHON_INCLUDES="-I${py_prefix}/include/python${PYTHON_VERSION}" if test "$py_prefix" != "$py_exec_prefix"; then PYTHON_INCLUDES="$PYTHON_INCLUDES -I${py_exec_prefix}/include/python${PYTHON_VERSION}" fi fi save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $PYTHON_INCLUDES" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then { $as_echo "$as_me:$LINENO: result: found" >&5 $as_echo "found" >&6; } { $as_echo "$as_me:$LINENO: checking for python libraries" >&5 $as_echo_n "checking for python libraries... " >&6; } link_pymodules_libpython=false; if egrep "^#define Py_ENABLE_SHARED" "${py_exec_prefix}/include/python${PYTHON_VERSION}/pyconfig.h" > /dev/null ; then if test x`uname -s` != xDarwin; then PYTHON_LDFLAGS="-no-undefined" link_pymodules_libpython=true; fi fi PYTHON_LIB_DEPS=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('SYSLIBS'), sysconfig.get_config_var('SHLIBS')"` PYTHON_LIBDIR=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('LIBDIR')"` PYTHON_LIBPL=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('LIBPL')"` save_LIBS="$LIBS" PYTHON_EMBED_LIBS="-L${PYTHON_LIBDIR} ${PYTHON_LIB_DEPS} -lpython${PYTHON_VERSION}" LIBS="$LIBS $PYTHON_EMBED_LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then LIBS="$save_LIBS"; if $link_pymodules_libpython; then PYTHON_LIBS="$PYTHON_EMBED_LIBS"; fi { $as_echo "$as_me:$LINENO: result: $PYTHON_EMBED_LIBS" >&5 $as_echo "$PYTHON_EMBED_LIBS" >&6; }; else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 PYTHON_EMBED_LIBS="-L${PYTHON_LIBPL} ${PYTHON_LIB_DEPS} -lpython${PYTHON_VERSION}" LIBS="$save_LIBS $PYTHON_EMBED_LIBS"; cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then LIBS="$save_LIBS"; if $link_pymodules_libpython; then PYTHON_LIBS="$PYTHON_EMBED_LIBS"; fi { $as_echo "$as_me:$LINENO: result: $PYTHON_EMBED_LIBS" >&5 $as_echo "$PYTHON_EMBED_LIBS" >&6; }; else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: result: not found" >&5 $as_echo "not found" >&6; }; { { $as_echo "$as_me:$LINENO: error: could not find Python headers or library" >&5 $as_echo "$as_me: error: could not find Python headers or library" >&2;} { (exit 1); exit 1; }; } fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { $as_echo "$as_me:$LINENO: result: not found" >&5 $as_echo "not found" >&6; } { { $as_echo "$as_me:$LINENO: error: could not find Python headers or library" >&5 $as_echo "$as_me: error: could not find Python headers or library" >&2;} { (exit 1); exit 1; }; } fi rm -f conftest.err conftest.$ac_ext CPPFLAGS="$save_CPPFLAGS" if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_path_ac_pt_PKG_CONFIG+set}" = set; then $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:$LINENO: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:$LINENO: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi pkg_failed=no { $as_echo "$as_me:$LINENO: checking for libparted" >&5 $as_echo_n "checking for libparted... " >&6; } if test -n "$libparted_CFLAGS"; then pkg_cv_libparted_CFLAGS="$libparted_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libparted >= 2.3\"") >&5 ($PKG_CONFIG --exists --print-errors "libparted >= 2.3") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_libparted_CFLAGS=`$PKG_CONFIG --cflags "libparted >= 2.3" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$libparted_LIBS"; then pkg_cv_libparted_LIBS="$libparted_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { ($as_echo "$as_me:$LINENO: \$PKG_CONFIG --exists --print-errors \"libparted >= 2.3\"") >&5 ($PKG_CONFIG --exists --print-errors "libparted >= 2.3") 2>&5 ac_status=$? $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then pkg_cv_libparted_LIBS=`$PKG_CONFIG --libs "libparted >= 2.3" 2>/dev/null` else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then libparted_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libparted >= 2.3" 2>&1` else libparted_PKG_ERRORS=`$PKG_CONFIG --print-errors "libparted >= 2.3" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$libparted_PKG_ERRORS" >&5 { { $as_echo "$as_me:$LINENO: error: Package requirements (libparted >= 2.3) were not met: $libparted_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables libparted_CFLAGS and libparted_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&5 $as_echo "$as_me: error: Package requirements (libparted >= 2.3) were not met: $libparted_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables libparted_CFLAGS and libparted_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. " >&2;} { (exit 1); exit 1; }; } elif test $pkg_failed = untried; then { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables libparted_CFLAGS and libparted_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&5 $as_echo "$as_me: error: The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables libparted_CFLAGS and libparted_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } else libparted_CFLAGS=$pkg_cv_libparted_CFLAGS libparted_LIBS=$pkg_cv_libparted_LIBS { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } : fi { $as_echo "$as_me:$LINENO: checking for ped_get_version in -lparted" >&5 $as_echo_n "checking for ped_get_version in -lparted... " >&6; } if test "${ac_cv_lib_parted_ped_get_version+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lparted $LIBS" cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ped_get_version (); int main () { return ped_get_version (); ; return 0; } _ACEOF rm -f conftest.$ac_objext conftest$ac_exeext if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || $as_test_x conftest$ac_exeext }; then ac_cv_lib_parted_ped_get_version=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_cv_lib_parted_ped_get_version=no fi rm -rf conftest.dSYM rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:$LINENO: result: $ac_cv_lib_parted_ped_get_version" >&5 $as_echo "$ac_cv_lib_parted_ped_get_version" >&6; } if test "x$ac_cv_lib_parted_ped_get_version" = x""yes; then : else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: *** Unable to find requested library libparted See \`config.log' for more details." >&5 $as_echo "$as_me: error: *** Unable to find requested library libparted See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi for ac_header in parted/parted.h do as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 $as_echo_n "checking $ac_header usability... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #include <$ac_header> _ACEOF rm -f conftest.$ac_objext if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_header_compiler=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 $as_echo_n "checking $ac_header presence... " >&6; } cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include <$ac_header> _ACEOF if { (ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" $as_echo "$ac_try_echo") >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } >/dev/null && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then ac_header_preproc=yes else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_header_preproc=no fi rm -f conftest.err conftest.$ac_ext { $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in yes:no: ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} ac_header_preproc=yes ;; no:yes:* ) { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 $as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 $as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} ( cat <<\_ASBOX ## ---------------------------------------- ## ## Report this to pyparted-devel@redhat.com ## ## ---------------------------------------- ## _ASBOX ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 $as_echo_n "checking for $ac_header... " >&6; } if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else eval "$as_ac_Header=\$ac_header_preproc" fi ac_res=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi as_val=`eval 'as_val=${'$as_ac_Header'} $as_echo "$as_val"'` if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { { $as_echo "$as_me:$LINENO: error: *** Header file $ac_header not found. See \`config.log' for more details." >&5 $as_echo "$as_me: error: *** Header file $ac_header not found. See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; }; } fi done LIBPARTED_LIBS="$(pkg-config --libs libparted)" LIBPARTED_VERSION=2.3 cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include &5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then cat >>confdefs.h <<\_ACEOF #define HAVE_PED_PARTITION_LEGACY_BOOT 1 _ACEOF else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&5 $as_echo "$as_me: error: conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi : ${CONFIG_STATUS=./config.status} ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 # Save the log message, to keep $[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by pyparted $as_me 3.6, which was generated by GNU Autoconf 2.63. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files from templates according to the current configuration. Usage: $0 [OPTION]... [FILE]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_version="\\ pyparted config.status 3.6 configured by $0, generated by GNU Autoconf 2.63, with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" Copyright (C) 2008 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac CONFIG_FILES="$CONFIG_FILES '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header { $as_echo "$as_me: error: ambiguous option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; };; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) { $as_echo "$as_me: error: unrecognized option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *) ac_config_targets="$ac_config_targets $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' enable_static='`$ECHO "X$enable_static" | $Xsed -e "$delay_single_quote_subst"`' macro_version='`$ECHO "X$macro_version" | $Xsed -e "$delay_single_quote_subst"`' macro_revision='`$ECHO "X$macro_revision" | $Xsed -e "$delay_single_quote_subst"`' enable_shared='`$ECHO "X$enable_shared" | $Xsed -e "$delay_single_quote_subst"`' pic_mode='`$ECHO "X$pic_mode" | $Xsed -e "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "X$enable_fast_install" | $Xsed -e "$delay_single_quote_subst"`' host_alias='`$ECHO "X$host_alias" | $Xsed -e "$delay_single_quote_subst"`' host='`$ECHO "X$host" | $Xsed -e "$delay_single_quote_subst"`' host_os='`$ECHO "X$host_os" | $Xsed -e "$delay_single_quote_subst"`' build_alias='`$ECHO "X$build_alias" | $Xsed -e "$delay_single_quote_subst"`' build='`$ECHO "X$build" | $Xsed -e "$delay_single_quote_subst"`' build_os='`$ECHO "X$build_os" | $Xsed -e "$delay_single_quote_subst"`' SED='`$ECHO "X$SED" | $Xsed -e "$delay_single_quote_subst"`' Xsed='`$ECHO "X$Xsed" | $Xsed -e "$delay_single_quote_subst"`' GREP='`$ECHO "X$GREP" | $Xsed -e "$delay_single_quote_subst"`' EGREP='`$ECHO "X$EGREP" | $Xsed -e "$delay_single_quote_subst"`' FGREP='`$ECHO "X$FGREP" | $Xsed -e "$delay_single_quote_subst"`' LD='`$ECHO "X$LD" | $Xsed -e "$delay_single_quote_subst"`' NM='`$ECHO "X$NM" | $Xsed -e "$delay_single_quote_subst"`' LN_S='`$ECHO "X$LN_S" | $Xsed -e "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "X$max_cmd_len" | $Xsed -e "$delay_single_quote_subst"`' ac_objext='`$ECHO "X$ac_objext" | $Xsed -e "$delay_single_quote_subst"`' exeext='`$ECHO "X$exeext" | $Xsed -e "$delay_single_quote_subst"`' lt_unset='`$ECHO "X$lt_unset" | $Xsed -e "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "X$lt_SP2NL" | $Xsed -e "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "X$lt_NL2SP" | $Xsed -e "$delay_single_quote_subst"`' reload_flag='`$ECHO "X$reload_flag" | $Xsed -e "$delay_single_quote_subst"`' reload_cmds='`$ECHO "X$reload_cmds" | $Xsed -e "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "X$OBJDUMP" | $Xsed -e "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "X$deplibs_check_method" | $Xsed -e "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "X$file_magic_cmd" | $Xsed -e "$delay_single_quote_subst"`' AR='`$ECHO "X$AR" | $Xsed -e "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "X$AR_FLAGS" | $Xsed -e "$delay_single_quote_subst"`' STRIP='`$ECHO "X$STRIP" | $Xsed -e "$delay_single_quote_subst"`' RANLIB='`$ECHO "X$RANLIB" | $Xsed -e "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "X$old_postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "X$old_postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "X$old_archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' CC='`$ECHO "X$CC" | $Xsed -e "$delay_single_quote_subst"`' CFLAGS='`$ECHO "X$CFLAGS" | $Xsed -e "$delay_single_quote_subst"`' compiler='`$ECHO "X$compiler" | $Xsed -e "$delay_single_quote_subst"`' GCC='`$ECHO "X$GCC" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "X$lt_cv_sys_global_symbol_pipe" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "X$lt_cv_sys_global_symbol_to_cdecl" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' objdir='`$ECHO "X$objdir" | $Xsed -e "$delay_single_quote_subst"`' SHELL='`$ECHO "X$SHELL" | $Xsed -e "$delay_single_quote_subst"`' ECHO='`$ECHO "X$ECHO" | $Xsed -e "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "X$MAGIC_CMD" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "X$lt_prog_compiler_no_builtin_flag" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "X$lt_prog_compiler_wl" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "X$lt_prog_compiler_pic" | $Xsed -e "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "X$lt_prog_compiler_static" | $Xsed -e "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "X$lt_cv_prog_compiler_c_o" | $Xsed -e "$delay_single_quote_subst"`' need_locks='`$ECHO "X$need_locks" | $Xsed -e "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "X$DSYMUTIL" | $Xsed -e "$delay_single_quote_subst"`' NMEDIT='`$ECHO "X$NMEDIT" | $Xsed -e "$delay_single_quote_subst"`' LIPO='`$ECHO "X$LIPO" | $Xsed -e "$delay_single_quote_subst"`' OTOOL='`$ECHO "X$OTOOL" | $Xsed -e "$delay_single_quote_subst"`' OTOOL64='`$ECHO "X$OTOOL64" | $Xsed -e "$delay_single_quote_subst"`' libext='`$ECHO "X$libext" | $Xsed -e "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "X$shrext_cmds" | $Xsed -e "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "X$extract_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "X$archive_cmds_need_lc" | $Xsed -e "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "X$enable_shared_with_static_runtimes" | $Xsed -e "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "X$export_dynamic_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "X$whole_archive_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "X$compiler_needs_object" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "X$old_archive_from_new_cmds" | $Xsed -e "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "X$old_archive_from_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_cmds='`$ECHO "X$archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "X$archive_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' module_cmds='`$ECHO "X$module_cmds" | $Xsed -e "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "X$module_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "X$with_gnu_ld" | $Xsed -e "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "X$allow_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "X$no_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "X$hardcode_libdir_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_ld='`$ECHO "X$hardcode_libdir_flag_spec_ld" | $Xsed -e "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "X$hardcode_libdir_separator" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "X$hardcode_direct" | $Xsed -e "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "X$hardcode_direct_absolute" | $Xsed -e "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "X$hardcode_minus_L" | $Xsed -e "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "X$hardcode_shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "X$hardcode_automatic" | $Xsed -e "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "X$inherit_rpath" | $Xsed -e "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "X$link_all_deplibs" | $Xsed -e "$delay_single_quote_subst"`' fix_srcfile_path='`$ECHO "X$fix_srcfile_path" | $Xsed -e "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "X$always_export_symbols" | $Xsed -e "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "X$export_symbols_cmds" | $Xsed -e "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "X$exclude_expsyms" | $Xsed -e "$delay_single_quote_subst"`' include_expsyms='`$ECHO "X$include_expsyms" | $Xsed -e "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "X$prelink_cmds" | $Xsed -e "$delay_single_quote_subst"`' file_list_spec='`$ECHO "X$file_list_spec" | $Xsed -e "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "X$variables_saved_for_relink" | $Xsed -e "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "X$need_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' need_version='`$ECHO "X$need_version" | $Xsed -e "$delay_single_quote_subst"`' version_type='`$ECHO "X$version_type" | $Xsed -e "$delay_single_quote_subst"`' runpath_var='`$ECHO "X$runpath_var" | $Xsed -e "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "X$shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "X$shlibpath_overrides_runpath" | $Xsed -e "$delay_single_quote_subst"`' libname_spec='`$ECHO "X$libname_spec" | $Xsed -e "$delay_single_quote_subst"`' library_names_spec='`$ECHO "X$library_names_spec" | $Xsed -e "$delay_single_quote_subst"`' soname_spec='`$ECHO "X$soname_spec" | $Xsed -e "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "X$postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "X$postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' finish_cmds='`$ECHO "X$finish_cmds" | $Xsed -e "$delay_single_quote_subst"`' finish_eval='`$ECHO "X$finish_eval" | $Xsed -e "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "X$hardcode_into_libs" | $Xsed -e "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "X$sys_lib_search_path_spec" | $Xsed -e "$delay_single_quote_subst"`' sys_lib_dlsearch_path_spec='`$ECHO "X$sys_lib_dlsearch_path_spec" | $Xsed -e "$delay_single_quote_subst"`' hardcode_action='`$ECHO "X$hardcode_action" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "X$enable_dlopen" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "X$enable_dlopen_self" | $Xsed -e "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "X$enable_dlopen_self_static" | $Xsed -e "$delay_single_quote_subst"`' old_striplib='`$ECHO "X$old_striplib" | $Xsed -e "$delay_single_quote_subst"`' striplib='`$ECHO "X$striplib" | $Xsed -e "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # Quote evaled strings. for var in SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ AR \ AR_FLAGS \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ SHELL \ ECHO \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_wl \ lt_prog_compiler_pic \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_flag_spec_ld \ hardcode_libdir_separator \ fix_srcfile_path \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ finish_eval \ old_striplib \ striplib; do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ sys_lib_dlsearch_path_spec; do case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Fix-up fallback echo if it was mangled by the above quoting rules. case \$lt_ECHO in *'\\\$0 --fallback-echo"') lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\$0 --fallback-echo"\$/\$0 --fallback-echo"/'\` ;; esac ac_aux_dir='$ac_aux_dir' xsi_shell='$xsi_shell' lt_shell_append='$lt_shell_append' # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile' _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; "include/docstrings/Makefile") CONFIG_FILES="$CONFIG_FILES include/docstrings/Makefile" ;; "include/typeobjects/Makefile") CONFIG_FILES="$CONFIG_FILES include/typeobjects/Makefile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "src/parted/Makefile") CONFIG_FILES="$CONFIG_FILES src/parted/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; "tests/_ped/Makefile") CONFIG_FILES="$CONFIG_FILES tests/_ped/Makefile" ;; "tests/parted/Makefile") CONFIG_FILES="$CONFIG_FILES tests/parted/Makefile" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 $as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;} { (exit 1); exit 1; }; };; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || { $as_echo "$as_me: cannot create a temporary directory in ." >&2 { (exit 1); exit 1; } } # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=' ' ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 $as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\).*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\).*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5 $as_echo "$as_me: error: could not setup config files machinery" >&2;} { (exit 1); exit 1; }; } _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/ s/:*\${srcdir}:*/:/ s/:*@srcdir@:*/:/ s/^\([^=]*=[ ]*\):*/\1/ s/:*$// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_t=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_t"; then break elif $ac_last_try; then { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5 $as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5 $as_echo "$as_me: error: could not setup config headers machinery" >&2;} { (exit 1); exit 1; }; } fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5 $as_echo "$as_me: error: invalid tag $ac_tag" >&2;} { (exit 1); exit 1; }; };; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 $as_echo "$as_me: error: cannot find input file: $ac_f" >&2;} { (exit 1); exit 1; }; };; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac ac_file_inputs="$ac_file_inputs '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:$LINENO: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir="$ac_dir" case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 $as_echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p ' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} rm -f "$tmp/stdin" case $ac_file in -) cat "$tmp/out" && rm -f "$tmp/out";; *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; esac \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" } >"$tmp/config.h" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$tmp/config.h" "$ac_file" \ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 $as_echo "$as_me: error: could not create $ac_file" >&2;} { (exit 1); exit 1; }; } fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5 $as_echo "$as_me: error: could not create -" >&2;} { (exit 1); exit 1; }; } fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir=$dirpart/$fdir case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 $as_echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; "libtool":C) # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # The names of the tagged configurations supported by this script. available_tags="" # ### BEGIN LIBTOOL CONFIG # Whether or not to build static libraries. build_old_libs=$enable_static # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method == "file_magic". file_magic_cmd=$lt_file_magic_cmd # The archiver. AR=$lt_AR AR_FLAGS=$lt_AR_FLAGS # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # The name of the directory that contains temporary libtool files. objdir=$objdir # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that does not interpret backslashes. ECHO=$lt_ECHO # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # If ld is used when linking, flag to hardcode \$libdir into a binary # during linking. This must work even if \$libdir does not exist. hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Fix the shell variable \$srcfile for the compiler. fix_srcfile_path=$lt_fix_srcfile_path # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain="$ac_aux_dir/ltmain.sh" # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) case $xsi_shell in yes) cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac } # func_basename file func_basename () { func_basename_result="${1##*/}" } # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}" } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). func_stripname () { # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"} } # func_opt_split func_opt_split () { func_opt_split_opt=${1%%=*} func_opt_split_arg=${1#*=} } # func_lo2o object func_lo2o () { case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac } # func_xform libobj-or-source func_xform () { func_xform_result=${1%.*}.lo } # func_arith arithmetic-term... func_arith () { func_arith_result=$(( $* )) } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=${#1} } _LT_EOF ;; *) # Bourne compatible functions. cat << \_LT_EOF >> "$cfgfile" # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_basename file func_basename () { func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` } # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "X${3}" \ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac } # sed scripts: my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' my_sed_long_arg='1s/^-[^=]*=//' # func_opt_split func_opt_split () { func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` } # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` } # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[^.]*$/.lo/'` } # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "$@"` } # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` } _LT_EOF esac case $lt_shell_append in yes) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$1+=\$2" } _LT_EOF ;; *) cat << \_LT_EOF >> "$cfgfile" # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "$1=\$$1\$2" } _LT_EOF ;; esac sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ;; esac done # for ac_tag { (exit 0); exit 0; } _ACEOF chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5 $as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi pyparted-3.6/install-sh0000755000076400007640000003253711542323606012163 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2009-04-28.21; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then trap '(exit $?); exit' 1 2 13 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names starting with `-'. case $src in -*) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # Protect names starting with `-'. case $dst in -*) dst=./$dst;; esac # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; -*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test -z "$d" && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: pyparted-3.6/missing0000755000076400007640000002623311542323606011552 00000000000000#! /bin/sh # Common stub for a few missing GNU programs while installing. scriptversion=2009-04-28.21; # UTC # Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, # 2008, 2009 Free Software Foundation, Inc. # Originally by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try \`$0 --help' for more information" exit 1 fi run=: sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' sed_minuso='s/.* -o \([^ ]*\).*/\1/p' # In the cases where this matters, `missing' is being run in the # srcdir already. if test -f configure.ac; then configure_ac=configure.ac else configure_ac=configure.in fi msg="missing on your system" case $1 in --run) # Try to run requested program, and just exit if it succeeds. run= shift "$@" && exit 0 # Exit code 63 means version mismatch. This often happens # when the user try to use an ancient version of a tool on # a file that requires a minimum version. In this case we # we should proceed has if the program had been absent, or # if --run hadn't been passed. if test $? = 63; then run=: msg="probably too old" fi ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an error status if there is no known handling for PROGRAM. Options: -h, --help display this help and exit -v, --version output version information and exit --run try to run the given command, and emulate it if it fails Supported PROGRAM values: aclocal touch file \`aclocal.m4' autoconf touch file \`configure' autoheader touch file \`config.h.in' autom4te touch the output file, or create a stub one automake touch all \`Makefile.in' files bison create \`y.tab.[ch]', if possible, from existing .[ch] flex create \`lex.yy.c', if possible, from existing .c help2man touch the output file lex create \`lex.yy.c', if possible, from existing .c makeinfo touch the output file tar try tar, gnutar, gtar, then tar without non-portable flags yacc create \`y.tab.[ch]', if possible, from existing .[ch] Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and \`g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: Unknown \`$1' option" echo 1>&2 "Try \`$0 --help' for more information" exit 1 ;; esac # normalize program name to check for. program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` # Now exit if we have it, but it failed. Also exit now if we # don't have it and --version was passed (most likely to detect # the program). This is about non-GNU programs, so use $1 not # $program. case $1 in lex*|yacc*) # Not GNU programs, they don't have --version. ;; tar*) if test -n "$run"; then echo 1>&2 "ERROR: \`tar' requires --run" exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then exit 1 fi ;; *) if test -z "$run" && ($1 --version) > /dev/null 2>&1; then # We have it, but it failed. exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then # Could not run --version or --help. This is probably someone # running `$TOOL --version' or `$TOOL --help' to check whether # $TOOL exists and not knowing $TOOL uses missing. exit 1 fi ;; esac # If it does not exist, or fails to run (possibly an outdated version), # try to emulate it. case $program in aclocal*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." touch aclocal.m4 ;; autoconf*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." touch configure ;; autoheader*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acconfig.h' or \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` test -z "$files" && files="config.h" touch_files= for f in $files; do case $f in *:*) touch_files="$touch_files "`echo "$f" | sed -e 's/^[^:]*://' -e 's/:.*//'`;; *) touch_files="$touch_files $f.in";; esac done touch $touch_files ;; automake*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." find . -type f -name Makefile.am -print | sed 's/\.am$/.in/' | while read f; do touch "$f"; done ;; autom4te*) echo 1>&2 "\ WARNING: \`$1' is needed, but is $msg. You might have modified some files without having the proper tools for further handling them. You can get \`$1' as part of \`Autoconf' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo "#! /bin/sh" echo "# Created by GNU Automake missing as a replacement of" echo "# $ $@" echo "exit 0" chmod +x $file exit 1 fi ;; bison*|yacc*) echo 1>&2 "\ WARNING: \`$1' $msg. You should only need it if you modified a \`.y' file. You may need the \`Bison' package in order for those modifications to take effect. You can get \`Bison' from any GNU archive site." rm -f y.tab.c y.tab.h if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.y) SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.c fi SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.h fi ;; esac fi if test ! -f y.tab.h; then echo >y.tab.h fi if test ! -f y.tab.c; then echo 'main() { return 0; }' >y.tab.c fi ;; lex*|flex*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.l' file. You may need the \`Flex' package in order for those modifications to take effect. You can get \`Flex' from any GNU archive site." rm -f lex.yy.c if test $# -ne 1; then eval LASTARG="\${$#}" case $LASTARG in *.l) SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" lex.yy.c fi ;; esac fi if test ! -f lex.yy.c; then echo 'main() { return 0; }' >lex.yy.c fi ;; help2man*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a dependency of a manual page. You may need the \`Help2man' package in order for those modifications to take effect. You can get \`Help2man' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo ".ab help2man is required to generate this page" exit $? fi ;; makeinfo*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.texi' or \`.texinfo' file, or any other file indirectly affecting the aspect of the manual. The spurious call might also be the consequence of using a buggy \`make' (AIX, DU, IRIX). You might want to install the \`Texinfo' package or the \`GNU make' package. Grab either from any GNU archive site." # The file to touch is that specified with -o ... file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -z "$file"; then # ... or it is the one specified with @setfilename ... infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` file=`sed -n ' /^@setfilename/{ s/.* \([^ ]*\) *$/\1/ p q }' $infile` # ... or it is derived from the source name (dir/f.texi becomes f.info) test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info fi # If the file does not exist, the user really needs makeinfo; # let's fail without touching anything. test -f $file || exit 1 touch $file ;; tar*) shift # We have already tried tar in the generic part. # Look for gnutar/gtar before invocation to avoid ugly error # messages. if (gnutar --version > /dev/null 2>&1); then gnutar "$@" && exit 0 fi if (gtar --version > /dev/null 2>&1); then gtar "$@" && exit 0 fi firstarg="$1" if shift; then case $firstarg in *o*) firstarg=`echo "$firstarg" | sed s/o//` tar "$firstarg" "$@" && exit 0 ;; esac case $firstarg in *h*) firstarg=`echo "$firstarg" | sed s/h//` tar "$firstarg" "$@" && exit 0 ;; esac fi echo 1>&2 "\ WARNING: I can't seem to be able to run \`tar' with the given arguments. You may want to install GNU tar or Free paxutils, or check the command line arguments." exit 1 ;; *) echo 1>&2 "\ WARNING: \`$1' is needed, and is $msg. You might have modified some files without having the proper tools for further handling them. Check the \`README' file, it often tells you about the needed prerequisites for installing this package. You may also peek at any GNU archive site, in case some other package would contain this missing \`$1' program." exit 1 ;; esac exit 0 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: pyparted-3.6/config.sub0000755000076400007640000010316711542323606012140 00000000000000#! /bin/sh # Configuration validation subroutine script. # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 # Free Software Foundation, Inc. timestamp='2009-11-20' # This file is (in principle) common to ALL GNU software. # The presence of a machine in this file suggests that SOME GNU software # can handle that machine. It does not imply ALL GNU software can. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA # 02110-1301, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Please send patches to . Submit a context # diff and a properly formatted GNU ChangeLog entry. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS $0 [OPTION] ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \ uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \ kopensolaris*-gnu* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ | bfin \ | c4x | clipper \ | d10v | d30v | dlx | dsp16xx \ | fido | fr30 | frv \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nios | nios2 \ | ns16k | ns32k \ | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ | pyramid \ | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu | strongarm \ | tahoe | thumb | tic4x | tic80 | tron \ | ubicom32 \ | v850 | v850e \ | we32k \ | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; m6811 | m68hc11 | m6812 | m68hc12 | picochip) # Motorola 68HC11/12. basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nios-* | nios2-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ | pyramid-* \ | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \ | tahoe-* | thumb-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | vax-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; # I'm not sure what "Sysv32" means. Should this be sysv3.2? i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze) basic_machine=microblaze-xilinx ;; mingw32) basic_machine=i386-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; mvs) basic_machine=i370-ibm os=-mvs ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc) basic_machine=powerpc-unknown ;; ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle | ppc-le | powerpc-little) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little | ppc64-le | powerpc64-little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tic54x | c54x*) basic_machine=tic54x-unknown os=-coff ;; tic55x | c55x*) basic_machine=tic55x-unknown os=-coff ;; tic6x | c6x*) basic_machine=tic6x-unknown os=-coff ;; tile*) basic_machine=tile-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -openbsd* | -solidbsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -kaos*) os=-kaos ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 # This also exists in the configure program, but was not the # default. # os=-sunos4 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: pyparted-3.6/py-compile0000755000076400007640000001013511542323606012151 00000000000000#!/bin/sh # py-compile - Compile a Python program scriptversion=2009-04-28.21; # UTC # Copyright (C) 2000, 2001, 2003, 2004, 2005, 2008, 2009 Free Software # Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . if [ -z "$PYTHON" ]; then PYTHON=python fi basedir= destdir= files= while test $# -ne 0; do case "$1" in --basedir) basedir=$2 if test -z "$basedir"; then echo "$0: Missing argument to --basedir." 1>&2 exit 1 fi shift ;; --destdir) destdir=$2 if test -z "$destdir"; then echo "$0: Missing argument to --destdir." 1>&2 exit 1 fi shift ;; -h|--h*) cat <<\EOF Usage: py-compile [--help] [--version] [--basedir DIR] [--destdir DIR] FILES..." Byte compile some python scripts FILES. Use --destdir to specify any leading directory path to the FILES that you don't want to include in the byte compiled file. Specify --basedir for any additional path information you do want to be shown in the byte compiled file. Example: py-compile --destdir /tmp/pkg-root --basedir /usr/share/test test.py test2.py Report bugs to . EOF exit $? ;; -v|--v*) echo "py-compile $scriptversion" exit $? ;; *) files="$files $1" ;; esac shift done if test -z "$files"; then echo "$0: No files given. Try \`$0 --help' for more information." 1>&2 exit 1 fi # if basedir was given, then it should be prepended to filenames before # byte compilation. if [ -z "$basedir" ]; then pathtrans="path = file" else pathtrans="path = os.path.join('$basedir', file)" fi # if destdir was given, then it needs to be prepended to the filename to # byte compile but not go into the compiled file. if [ -z "$destdir" ]; then filetrans="filepath = path" else filetrans="filepath = os.path.normpath('$destdir' + os.sep + path)" fi $PYTHON -c " import sys, os, py_compile files = '''$files''' sys.stdout.write('Byte-compiling python modules...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() py_compile.compile(filepath, filepath + 'c', path) sys.stdout.write('\n')" || exit $? # this will fail for python < 1.5, but that doesn't matter ... $PYTHON -O -c " import sys, os, py_compile files = '''$files''' sys.stdout.write('Byte-compiling python modules (optimized versions) ...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() py_compile.compile(filepath, filepath + 'o', path) sys.stdout.write('\n')" 2>/dev/null || : # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: pyparted-3.6/COPYING0000644000076400007640000004307611110363047011204 00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 675 Mass Ave, Cambridge, MA 02139, USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS Appendix: How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) 19yy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) 19yy name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. pyparted-3.6/ChangeLog0000664000076400007640000111460311542323614011726 00000000000000commit a687c9671981c5f06e1a679f5ecf1407bbf21ad6 Author: David Cantrell Date: Tue Mar 22 21:55:30 2011 -1000 New version. commit 99d144c414a62d017f4cce076aaadb86e278650d Author: David Cantrell Date: Tue Mar 22 21:52:58 2011 -1000 Fix ConstraintNewTestCase in tests/parted commit 6f3f39262cbc1b6d20581c835e2cdfe661975bfb Author: David Cantrell Date: Tue Mar 22 17:21:27 2011 -1000 Fix FreshDiskTestCase and VersionTestCase in tests/parted. commit 76187f55e49796506c01919813cc42e19082f09b Author: David Cantrell Date: Tue Mar 22 17:20:28 2011 -1000 Fix pyparted_version() function. Handle version numbers in multiple formats: X, X.Y, and X.Y.Z. Also, any of those formats with -string on the end. commit c75465f29d4ff2cd2e8a95576d9d23b2e77bf948 Author: David Cantrell Date: Tue Mar 22 16:31:55 2011 -1000 Fix failing test cases in tests/_ped These were failing due to leftovers from incomplete functionality in either pyparted or libparted, but more things are working these days. Most notably, we now have working test cases for: DeviceGetMinimumAlignmentTestCase DeviceGetOptimumAlignmentTestCase PartitionGetNameTestCase Most of these changes are due to improvements in libparted. commit 8a8433094ea5adcd0f6214db1eceb43a849773bd Author: David Campbell Date: Fri Mar 18 13:53:09 2011 -0400 Added __archLabels and getLabels(). __archLabels is a tuple of pairs (disk label, regex) where regex specifies what architectures the given label supports, and getLabels is a function for returning a set containing the disk labels for some architecture. These additions are intended to replace the now deprecated archLabels dict. Signed-off-by: David Cantrell commit 60f08d6899deb5217f59fa193e5948f27bde0ffc Author: David Cantrell Date: Thu Mar 17 17:07:05 2011 -1000 Mark _exponent as deprecated, clean up Deprecated() Use a dict to define the deprecated globals and supplemental warning message text. commit 1c1ada9441ca54e0f6b80948c2cb8f63adfaf4b9 Author: David Cantrell Date: Thu Mar 17 16:53:43 2011 -1000 Mark partitionTypesDict as deprecated. Add code to mark globals with DeprecationWarning. Mark partitionTypesDict as deprecated. commit cbd1460e49fe085dff2711e3255b851daca3eaa1 Author: David Campbell Date: Tue Mar 8 14:50:04 2011 -0500 Added sizeToSectors function. Originally named bytesToSectors, but renamed to sizeToSectors (dcantrell). Signed-off-by: David Cantrell commit 1dea9e3d8617738c83166df1524968712d95cdbd Author: David Campbell Date: Thu Mar 17 14:12:38 2011 -0400 Use assertGreater and assertGreaterEqual There were some places where assertTrue was being used but assertGreater or assertGreaterEqual could have been used instead. Use the latter since it gives better output when there the assert fails. Signed-off-by: David Cantrell commit 0a3c14bd23643312286e461a1160230ff75f44a8 Author: David Cantrell Date: Thu Mar 17 16:31:11 2011 -1000 Formatting improvements. commit bf5bd64438a5ffbc6083853c96ff5078450b5eaa Author: David Cantrell Date: Thu Mar 17 16:28:32 2011 -1000 Add support for PED_PARTITION_LEGACY_BOOT flag. This is currently in the master branch for upstream parted and is part of the parted package in Fedora rawhide. The latest stable release of parted is 2.3. Since we cannot depend on the library version to know if PED_PARTITION_LEGACY_BOOT is defined, add in a special check for that constant and enable it in _ped and parted if it's present. commit aed73f598c02afeba923f34da76fd6af8dedeab3 Author: David Cantrell Date: Wed Mar 16 17:11:16 2011 -1000 Use the mailing list as the owner email address. Report bugs to the list or in Trac at http://fedorahosted.org/pyparted not me directly. commit aa722da61a6216620fe1bba6f319fe3738b20992 Author: David Cantrell Date: Wed Mar 16 16:58:44 2011 -1000 parted may not be installed as /sbin/parted, rely on $PATH Fedora-based systems install parted as /sbin/parted, but not every distribution does that. Rely on $PATH and let the shell find it. commit 5ef68057e3827c30ffdbfd59d430551c3fb581df Author: David Campbell Date: Sat Mar 12 21:13:59 2011 -0500 Use unittest.main() to run tests as scripts. This makes sure that the tests return the correct values on exiting. Signed-off-by: David Cantrell commit 458883b7063a7b9b7ef085167c44b5aaef818cf1 Author: David Campbell Date: Fri Mar 11 16:29:13 2011 -0500 Added getLength method. The classes Device, Partition, and Geometry now have a getLength method that is to replace the getSize method. It uses the new formatBytes function, allowing for a wider (and more correct) range of byte prefixes. It defaults to returning the length in sectors, so that it mirrors the length property. Also, sectors can be passed as an argument to allow easier dynamic unit selection in user programs. Signed-off-by: David Cantrell commit f3007caf42ffd3da77d73e36b5ac8a2244d343a0 Author: David Campbell Date: Mon Mar 7 14:08:39 2011 -0500 Added formatBytes function and __exponents dict. The old _exponents dictionary is incorrect and specifying the base seperate from the power seems prone to errors. Signed-off-by: David Cantrell commit 166e61a86c567a1da1b7a6ec25561a6ddbf06a82 Author: David Campbell Date: Fri Mar 11 18:06:39 2011 -0500 Fix script wrapper for _ped/test_ped.py I got the script working to import all the tests and run them as one suite, and noticed that there was a '=' where it should be '==' in test_ped.py. commit 167d7c9295cdcc9afe8cdc4ed9a5785fa3597612 Author: David Cantrell Date: Thu Mar 10 10:54:37 2011 -1000 We have a new contributor: David Campbell commit c76f82a008a0bfb91dd8295914843c92246b2ef5 Author: David Cantrell Date: Thu Mar 10 10:47:11 2011 -1000 Enable colorized test output for things like PASS and FAIL. Found it in the automake documentation, looked neat. commit 57b8b61979ba06ffd6ef097ee9df17530d65c028 Author: David Campbell Date: Wed Mar 9 15:37:13 2011 -0500 Made all the test suites importable. Added 'if __name__ = "__main__":' wrappers to all the test case scripts so that they can be imported as modules without running the suite. commit 6628aa6e31c1d0000f5870bc426a1c544fee456c Author: David Campbell Date: Wed Mar 9 11:14:13 2011 -0500 Use the skip decorator to skip unimplemented tests. Test output is more readable now, and regressions and real failures are easier to find. commit 5d2a4a88b297bd030e368ac7bb1d720bc5749124 Author: David Cantrell Date: Thu Mar 10 10:47:40 2011 -1000 Remove #! from test files, add $(PYTHON) to TESTS_ENVIRONMENT. We had #!/usr/bin/python at the top of each test file that ran, which is fine unless you are on a system with multiple versions of Python. Removing the sha-bang line and add $(PYTHON) to the TESTS_ENVIRONMENT variable in the Makefile.am files, which ensures our TESTS are run with the Python picked up by the configure script. commit 86240b4be7d404c1ca7faccb6407ad6280ac518b Author: David Cantrell Date: Thu Mar 10 10:44:55 2011 -1000 Increase minimum required Python version to 2.7. commit 85b987d79735512dec5c961e46e02fee550f4a1b Author: David Cantrell Date: Tue Mar 8 13:09:09 2011 -1000 Updated the HACKING file. Reference PEP-8, clarify some statements. commit d64ca6451d8ea18408bb460b1280204be011b5a6 Author: David Campbell Date: Wed Mar 2 10:53:30 2011 -0500 Wrapped localeC to preserve attributes. The auto-generated documentation is much more readable now; although, the function signature still does not appear correct. commit 18c5e11aff077d3cbc51cc74e7eb5addd8a2965f Author: David Cantrell Date: Mon Jan 17 11:01:24 2011 -1000 New version. commit 63edbaaf86a3229d6dc625b016d05b10ff743a4e Author: David Cantrell Date: Mon Jan 17 09:56:30 2011 -1000 Revert "Allow getting the actual list from the cache." This reverts commit 696d14b1109787ca23de067cc0c1fbae9f81f968. We don't want to allow peeking in to the internal structures of CachedList. commit 953d56308b2790ccc3c02dec9d35279b5c5236a9 Author: David Cantrell Date: Sun Jan 16 01:39:12 2011 -1000 Bad commit in git repo seems to be gone. commit 0360d3020ae77431807acc4ba78342a9af7f74da Author: David Cantrell Date: Sun Jan 16 00:40:49 2011 -1000 Drop dependency on python-decorator module. Drop the dependency on the python-decorator module. Still retain the internal @localeC functionality, but without using the decorator module. Patch from Toshio Kuratomi. Resolves https://fedorahosted.org/pyparted/ticket/28 commit 696d14b1109787ca23de067cc0c1fbae9f81f968 Author: Joel Granados Moreno Date: Thu Aug 13 13:45:00 2009 +0200 Allow getting the actual list from the cache. commit 983d118f09bc3b5f974c3c2695ddd8ae8a718261 Author: Joel Granados Moreno Date: Thu Aug 13 13:45:00 2009 +0200 Differentiate the "Could not commit" messages. Very usefull when trying to debug down to libparted. commit c975e64fc73711194192815be80d5e88f6173b17 Author: Colin Watson Date: Tue Jun 22 09:29:22 2010 +0100 Import _ped.DiskLabelException into parted namespace commit a28d68388c6e346938b8f8497258242b30aa9f36 Author: David Cantrell Date: Wed Jul 7 08:23:08 2010 -1000 Return PED_EXCEPTION_NO for yes/no interactive exceptions. If we get a PED_EXCEPTION_YES_NO exception, we return PED_EXCEPTION_NO to libparted to avoid any potential data loss ('yes' is assuming the yes/no question would use the affirmative response to proceed with destructive operations, which may always be the case...it is for now). In pyparted, we then pass up an appropriate exception to the caller so the user can be prompted and action taken. commit f1d1612afb3c86b88db1935791613218908e0dc2 Author: David Cantrell Date: Thu Apr 29 09:21:30 2010 -1000 New version. commit 4e30c69c74d719ab6d6c63eaf5dcc66501a580f1 Author: David Cantrell Date: Thu Apr 29 08:48:10 2010 -1000 Handle PED_EXCEPTION_WARNING with PED_EXCEPTION_YES_NO (#575749) Catch PED_EXCEPTION_YES_NO and filter it up to the caller. GPT disk label detection currently uses this exception. commit 568a2925a13852fd4f37c232820a3ad1b2eda152 Author: Chris Lumens Date: Wed Apr 21 11:54:31 2010 -0400 New version. Also, bump the required libparted version since we need it for the new parted flag. commit e3d63b886f430a90e58a92e70c6f11ba92c695e5 Author: Chris Lumens Date: Wed Apr 21 11:46:10 2010 -0400 Add support for the PARTITION_DIAG parted flag (#583628). commit 24e0b294cf27d879008520bc0e8ec58545179402 Author: Chris Lumens Date: Thu Mar 25 14:55:25 2010 -0400 New version. commit 2425a8135b34d07afd666a6b0844f70c501e00bd Author: Ales Kozumplik Date: Thu Mar 25 11:26:25 2010 +0100 Restore locales even when an exception is thrown (#571940). Otherwise this disables translations of all glade strings in anaconda. commit 35108d4fea8b6c906f221cc7524e90dfeafbfedb Author: David Cantrell Date: Mon Mar 1 12:29:47 2010 -1000 New version. commit 51a33455b51bac45243141be354c8c147b222c09 Author: David Cantrell Date: Mon Mar 1 11:52:02 2010 -1000 Prevent hangs when adding >4 partitions on an msdos labeled disk. The problem lies within pyparted, and to be precise within the tracking of the "ownership" of PedPartition objects. We can have multiple python partition objects point to one PedPartition and when we then remove a partition from the table, pydisk.c assumes the python partition object now owns the PedPartition as it is no longer part of the disk (this happens for example when temporarily removing the extended partition when re-allocating partitons when doing manually partitioning) However, we may have another python partition object pointing to that same PedPartition object. If we then loose the reference which we used to remove the partition from the disk, which happens when we do self.partitions.invalidate() in parted/disk.py (I think). The underlying PedPartition object gets destroyed / free-ed. Now the other python partition object (the one in the devicetree), has a pointer to memory which is being re-used. The destroying happens because the python partition object used for removing the partition from the disk thinks it is the owner as the partition is no longer owned by the disk. But it is not necessarily the *only* owner, yet it still destroys it. (Patch from Hans de Goede.) commit cad5797cafa0081946fd2a7517750e6bbc14b149 Author: David Cantrell Date: Mon Mar 1 11:49:11 2010 -1000 Updated TODO list. commit 718e815bfa48d67a2a03b06a3484b4924fdccd03 Author: David Cantrell Date: Mon Mar 1 11:48:51 2010 -1000 Whitespace cleanup. commit 6bb53e3a6cb7d56731c74ee5b5265905f51fea25 Author: David Cantrell Date: Tue Jan 12 12:17:36 2010 -1000 New version. commit 5e65db09996f927c966f465d4edd4094b43bd4bf Author: David Cantrell Date: Tue Jan 12 12:04:04 2010 -1000 Increment major version due to libparted API change. We now required libparted 2.1 or higher. The new major version number of pyparted is 3 to reflect this API change. commit b11e639a72cba9babd6c9fb602315c4654e74463 Author: Hans de Goede Date: Mon Jan 11 04:15:46 2010 -1000 Remove py_disk_clobber_exclude binding py_disk_clobber_exclude has been removed from libparted-2.1, which is the release we are targetting, so remove the binding for it. commit 68caa14be4c42c30800ed1ccb8135a8dcb19cccd Author: David Cantrell Date: Sat Dec 19 12:31:27 2009 -1000 Note the rpmlog target in the RELEASE instructions. commit 2a186a98fa0057586b271f568d83a9d283340e14 Author: David Cantrell Date: Sat Dec 19 12:29:50 2009 -1000 Add an rpmlog target in the Makefile. The idea of rpmlog in the pyparted project is that the package can run 'make rpmlog' in the git project and get a changelog entry suitable for pasting in to the spec file. Copied more or less from anaconda. commit 06c8b72f5286f8ea89e6c4fd2303b812fa27b55b Author: David Cantrell Date: Sat Dec 19 11:27:10 2009 -1000 New version. commit b8f008389c47f67e99b1dd82ffc707dbb7a565d3 Author: David Cantrell Date: Sat Dec 19 11:24:34 2009 -1000 Update release instructions. Future work item: Want to remove the '-devel' thing and just have it generate a .gitXXXXXX thing that gets tacked on to the release number if you build from the git repo and not a release version. commit b699f0c4ffb9b74ed9707b5f70959e9f731b7472 Author: David Cantrell Date: Fri Dec 18 13:48:09 2009 -1000 Remove old cylinder alignment test cases for _ped. Removed DiskAlignToCylindersOnTestCase and DiskAlignToCylindersToggleTestCase as those functions have been removed from pyparted. commit a5abd138e07fce7704b8b6771fcabfbe0881fe75 Author: Hans de Goede Date: Fri Dec 18 01:25:21 2009 -1000 Add tests for max partition length / start sector commit ae1747149a4bcba03f2a508de835b082c948968a Author: Hans de Goede Date: Fri Dec 18 01:25:20 2009 -1000 Add _pedmodule and parted functions for max partition length / start sector parted-2.1 and Fedora's parted-1.9.0-24, add functions for querying the maximum partition length and start sector a disk's label can represent. This patch adds _pedmodule and parted module support for these functions. commit 0692b1e24550df9d4d52606a29c41a94058091cb Author: Hans de Goede Date: Fri Dec 18 01:25:19 2009 -1000 Remove align_to_cylinders function bindings The ped_disk_align_to_cylinders_on() and ped_disk_align_to_cylinders_toggle() functions were never in upstream parted, so remove their bindings from pyparted. commit 294471ccc094eec927a3a71bb564ab928cd2814d Author: Hans de Goede Date: Fri Dec 18 01:25:18 2009 -1000 Add tests for disk flag methods commit a5a6ea60fbf2fef4f8d8062b0fcc5e87e3b7cba7 Author: Hans de Goede Date: Fri Dec 18 01:25:17 2009 -1000 Add _pedmodule and parted functions for per disk flags parted-2.1 and Fedora's parted-1.9.0-24, add functions for having per disk flags (instead of per partition). These are used to enable / disable cylinder alignment on disklabels where it is optional (the Fedora specific patch which add the toggleAlignToCylinders function was not accepted upstream). This patch adds _pedmodule and parted module support for these flags, this support is modelled after the partition flags and works identically. commit 738c6e04a0411b6a2786551f44a1c0f38b7b13ff Author: David Cantrell Date: Thu Dec 10 14:08:40 2009 -1000 Every tuple member requires a comma after it. commit c9d43ab4768febbad0cea7673a267e80567c22c0 Author: David Cantrell Date: Thu Dec 10 14:04:33 2009 -1000 Fill out a lot of simple _ped.Disk test cases. commit 7faa9a1d2ab79759214ff3bfb64736e1abfbf461 Author: David Cantrell Date: Thu Dec 10 14:04:03 2009 -1000 Disable DeviceDestroyTestCase for now. Need online Python API reference and I'm currently over the Pacific Ocean. commit 70590a7bd38453584018dc980b8e5cdc478edad1 Author: David Cantrell Date: Thu Dec 10 14:03:13 2009 -1000 Add RequiresLabeledDevice to tests/_ped/baseclass.py. This base test class extends RequiresDevice. All it does on top of RequiresDevice is run parted to label the temporary device with an msdos disk label. commit 05cbb0df9767a13d241616ab10347edf361b77aa Author: David Cantrell Date: Thu Dec 10 13:33:20 2009 -1000 Attempt at fixing _ped.Device.destroy(), no dice. Stashing work for now. commit de3451fa3ab1f51b60d01615ea70f37560f878a9 Author: David Cantrell Date: Thu Dec 10 13:07:36 2009 -1000 Fix UnitFormatCustomTestCase and UnitFormatTestCase. Fix UNIT_PERCENT handling in these test cases. commit e87fa26bb3678b1d6451ca6b6c939ecf038b8682 Author: David Cantrell Date: Thu Dec 10 12:56:22 2009 -1000 Fix UnitFormatCustomByteTestCase and UnitFormatByteTestCase. The UNIT_PERCENT handling was incorrect. Compute actual percentage based on the byte value we're using in the test case and the value of unit_get_size() for UNIT_PERCENT. commit 51b633ae85843864eb5c864083176124a041ede9 Author: David Cantrell Date: Thu Dec 10 09:41:55 2009 -1000 Add DeviceStrTestCase, disable DeviceDestroyTestCase. Added DeviceStrTestCase. Disabled DeviceDestroyTestCase as it's producing gc errors. commit 218c7e56771ec06cef1912782b7d3438b565a190 Author: David Cantrell Date: Thu Dec 10 09:21:35 2009 -1000 Add DeviceDestroyTestCase and DeviceCacheRemoveTestCase. commit f68aec424772ba97bb4975f9c1f606d7830729f9 Author: David Cantrell Date: Thu Dec 3 10:22:01 2009 -1000 Implemented ConstraintIsSolutionTestCase(). commit 958118342e57b64ba63410624bcd57d1509eb330 Author: David Cantrell Date: Thu Dec 3 10:12:38 2009 -1000 Implement ConstraintSolveMaxTestCase(). commit ba9cde5bb3259ddbc596bb62685d00aba282f818 Author: David Cantrell Date: Thu Dec 3 09:42:12 2009 -1000 Implement ConstraintSolveNearestTestCase(). commit 7147b9cc7aa9dcdba4aeec48ee5354557ec60377 Author: David Cantrell Date: Wed Dec 2 10:34:15 2009 -1000 Correct py_ped_file_system_probe_specific() for NULL returns. If the filesystem you try to probe for is not found in the geometry region specific, libparted returns NULL. It may also raise an exception from the filesystem code, but it does not seem appropriate to raise that here. I could be wrong, but I changed this function so on detection failures, we just return None. That seems more appropriate to the use of this function in Python. commit 0c05af36ecdfa4ef82c10e97836215b59184a2d9 Author: David Cantrell Date: Wed Dec 2 10:33:22 2009 -1000 Implement FileSystemProbeSpecificTestCase(). Use ext2 and probe for that explicitly. The equals test for the resulting geometry there is not working correctly, so the test case manually does the geometry richcompre. For other filesystem types, the specific probe returns None. commit 4ed0178467006ba04191632a84acfa520b02c7ad Author: David Cantrell Date: Tue Dec 1 11:40:42 2009 -1000 Implement FileSystemProbeTestCase(). Extend RequiresFileSystem and make sure the probed filesystem is ext2 and not any of the other ones available. commit abe5c09b047ba82394ed790ace9d432b85dd97b9 Author: David Cantrell Date: Tue Dec 1 11:39:47 2009 -1000 Add RequiresFileSystem to tests/_ped/baseclass.py. If a test case needs a device with a file system on it, RequiresFileSystem will do that. It creates an empty file and puts an ext2 filesystem on it. commit 3adf9a486afd8f64ada4f916de2d889cef68f187 Author: David Cantrell Date: Tue Dec 1 09:40:32 2009 -1000 Add disk alignment test cases in test_ped.py. Added the following test cases: DiskAlignToCylindersOnTestCase() DiskAlignToCylindersToggleTestCase() commit 0c65865b3c6dbd34c24ce53498a1cf458d3d57fa Author: David Cantrell Date: Tue Dec 1 07:12:04 2009 -1000 Fix CHSGeometryStrTestCase(). commit 6bf3dc2075ed290899dc79f92f4ccdbfa3d81412 Author: David Cantrell Date: Tue Dec 1 07:07:17 2009 -1000 Fix ConstraintDuplicateTestCase...finally. The test case for this one was mostly wrong. Each object in the _ped.Constraint will have a different repr() than the source, so we need to validate those with assertNotEquals(). But we do assertEquals() and check all (well, most) of the actual values in the constraint. commit 29acecb0cb609256ad0183192c58258f6de7f945 Author: David Cantrell Date: Tue Dec 1 07:05:32 2009 -1000 Put a deprecation warning in py_ped_constraint_duplicate(). Even though the goal of the _ped module is to create a complete Python API of libparted, some public functions in libparted just seem unnecessary in a Python module. The duplicate() functions are among these. In Python we can do a copy.deepcopy() and get the same effect that the libparted duplicate functions provide. Put a deprecation warning in this function to alert users that it will eventually go away and they could just use a copy.deepcopy() instead. commit cfb1d2271bb55600c7acba2ddd7230a1888ce874 Author: David Cantrell Date: Tue Dec 1 06:13:06 2009 -1000 Note that we need parted from Fedora for pyparted. Upstream parted is not releasing versions of parted to ftp.gnu.org, but rather pulling patches in to individual distribution offerings. Very annoying, but I am not upstream parted anymore, so I can only complain. commit c0798ba64c627acea31d071c449e55a4fad01e2d Author: David Cantrell Date: Mon Nov 30 11:52:29 2009 -1000 Fix UnitGetSizeTestCase in _ped test cases for _ped.UNIT_PERCENT. commit c411cb4ec91a93248a28bd82d908a9a95580344c Author: Hans de Goede Date: Mon Nov 16 11:37:32 2009 +0100 Add testcase for new _ped disk get_partition_alignment method commit a9de087daa3ed5abb42df49b7e06b12104cfa075 Author: David Cantrell Date: Fri Nov 6 15:29:16 2009 -1000 New version. commit 1da479c307e7d168bf2b9eafbd6aaecc7d60b1e7 Author: David Cantrell Date: Wed Nov 4 14:37:38 2009 -1000 Update TODO list. commit a57751ffe9b655d5c90c558e34eee4785484bbe4 Author: Hans de Goede Date: Thu Nov 5 23:56:26 2009 -1000 Use PedDevice length instead of DIY (#532023) Use PedDevice length instead of calculating it ourself based on the BIOS Geometry. Using BIOS Geometry gives the wrong answer for DASD disks and partitions (as the actual sector size is 4096 but parted fakes 512 bytes sectors). And can also give the wrong answer for very large disks. commit 2674a71fd2ca7aaeb28cf1577acfec124eb9747f Author: Hans de Goede Date: Thu Nov 5 23:56:25 2009 -1000 Use sectorSize not physicalSectorSize for size calculations physicalSectorSize is the sector size on the platters of the disk, where as sectorSize (which is the logical sector size) is the sector size seen in the (s)ata / scsi commands and on higher levels. IOW physical sector sizer is for informational purposes only, geometry, alignments, etc. Are always expressed in logical sector sizes, so we should use sectorSize in size calculations. commit 843ddc0324255027aa30cf1b2810f3c0fee243d9 Author: David Cantrell Date: Tue Nov 3 13:56:51 2009 -1000 New version. commit 059ea0a6a3a5129d43ee56d93a61d9fe5a9539a5 Author: David Cantrell Date: Tue Nov 3 11:57:48 2009 -1000 Remove root user requirement in _ped. commit 28f095ace135d98b577d62f7361f31278ce3ffb9 Author: Hans de Goede Date: Fri Oct 30 02:03:03 2009 -1000 Add testcases for new _ped device methods commit a78da84200f05808db6468dac2ceb60ac499cf3b Author: Hans de Goede Date: Thu Oct 29 10:21:13 2009 -1000 Add python wrapper for new PedDisk partition alignment info function commit 99e192eb62248f1171bc9903b797a0ebd7827e17 Author: Hans de Goede Date: Thu Oct 29 10:21:12 2009 -1000 Add support for new PedDisk parition alignment info function commit 6bd5a69cefd209d9d02905d92f19bcb806a2ab7a Author: Hans de Goede Date: Thu Oct 29 10:21:10 2009 -1000 Add python wrappers for new PedDevice alignment info functions commit 799a9c4c038f697d495128550492b27cdaa4fbf1 Author: Hans de Goede Date: Thu Oct 29 10:21:09 2009 -1000 Add support for new PedDevice alignment info functions commit a88b6536dc21855c1ec381dad7000a3e7ef7446f Author: Chris Lumens Date: Tue Nov 3 16:28:28 2009 -0500 Fix a whole pile of test cases. commit 81cc7c9c22de5b8b65e3de23a066906c6f9d79fc Author: Hans de Goede Date: Thu Oct 29 10:21:14 2009 -1000 Remove ped_disk_commit_to_dev() call from py_ped_disk_new_fresh() Remove ped_disk_commit_to_dev() call from py_ped_disk_new_fresh(), py_ped_disk_new_fresh() should just create a new in memory disklabel object and not cause any on disk changes (just like the libparted equivalent), the commit was probably added in the past to workaround the bug fixed by commit ac80c5ae274ed572bf6ed7fc65c2473205497e77 commit 2e05d162f8ab6de36b4e3181ccdba7649a5d6dda Author: Hans de Goede Date: Thu Oct 29 10:21:11 2009 -1000 Fix error in Constraint __str__ method commit 3ddb1df4fcce4910e3cb1e476f773cbdab21dec5 Author: Hans de Goede Date: Mon Oct 5 14:12:07 2009 +0200 Make _ped_Device2PedDevice properly set / throw exceptions Make _ped_Device2PedDevice properly set / throw exceptions when ped_device_get() fails. commit 8fb2756fe45a37387984b08205a4c02d5ba4852e Author: Hans de Goede Date: Mon Oct 5 14:10:41 2009 +0200 Fixup various errorhandling issues in pydisk.c This patch fixes: 1) The not checking of partedExnRaised in py_ped_disk_clobber() and py_ped_disk_clobber_exclude() 2) The return of NULL without setting any sort of python exception in py_ped_disk_new_fresh() when ped_disk_new_fresh() fails. 3) The leaking of the just created fresh disk when ped_disk_commit_to_dev() fails in py_ped_disk_new_fresh(). 4) Make py_ped_disk_new_fresh() throw DiskExceptions not PartitionExceptions commit caabe8f52c6a80c476dc3b4ece3344c613885819 Author: Hans de Goede Date: Mon Oct 5 13:19:20 2009 +0200 Add missing _ped_Device2PedDevice() retval checks Add missing check for the retval of _ped_Device2PedDevice() in _ped_Geometry_init() and _ped_Disk_init(). commit 6f8982745750d12842fcee92c19110b14cb18e93 Author: Chris Lumens Date: Mon Oct 5 09:46:40 2009 -0400 Use libparted commit() for parted.disk.Disk.commit() (hdegoede). Currently we are using python code to combine the commit_to_dev and commit_to_os to get commit() however libparted has a native function for this. Using this removes an open / close of the device node and thus an udev change event and unnecessary udev rule scanning. Note that for the unnecessary open / close to be really gone we need a libparted with commit ad25892bb995f61b0ddf801ed1f74e0b1e7390ce. Note that this effectively backs out a previous patch to the commit function that was needed because of errors in libparted. With the above mentioned libparted commit, this is fixed and we no longer need our workaround. https://fedorahosted.org/pyparted/ticket/24 commit 6b068de8f952ea0d4caa7ff4f0fe35e9825d1b43 Author: David Cantrell Date: Fri Oct 2 15:56:59 2009 -1000 New version. commit 3502dc5ad3bcecad39a610f357ddd23baf15ce5b Author: David Cantrell Date: Fri Oct 2 14:58:55 2009 -1000 Add bumpver target to Makefile.am commit bb169a3b7e3ea7ad11c6bf74f5a7945a3530b1a6 Author: David Cantrell Date: Fri Oct 2 14:52:47 2009 -1000 Catch exceptions from ped_disk_new_fresh() call. In py_ped_disk_new_fresh(), catch libparted exceptions when we call ped_disk_new_fresh(). commit ac80c5ae274ed572bf6ed7fc65c2473205497e77 Author: David Cantrell Date: Fri Oct 2 14:48:46 2009 -1000 Correct PedDisk2_ped_Disk() to avoid losing the new disk label. See https://bugzilla.redhat.com/show_bug.cgi?id=526525 for more details. commit 47cdff12ccbe6809f2b5b543ec3a28179fa298d2 Author: David Cantrell Date: Wed Aug 26 11:43:40 2009 -1000 Removed WHY document. commit fe737eed05b2de523d9bbb3ec9a7f2dbb0a8f962 Author: Chris Lumens Date: Tue Aug 25 12:59:09 2009 -0400 If an argument is required, check and raise an exception if not found. I'd prefer to just remove these things as keyword args, but we've already released pyparted and it's supposed to have a stable API. For now, we'll just have to raise exceptions. Later on we can convert them to regular args. commit fab4ed8670a8d7b5754215b162702cc21febec99 Author: Chris Lumens Date: Mon Aug 17 13:47:37 2009 -0400 New version. commit 3e84e2eb789dfa5e612330a477a27b8b68c39bb4 Author: Chris Lumens Date: Fri Aug 14 10:29:19 2009 -0400 PED_DEVICE_DM is always defined in libparted these days. commit ca8cb72b182f6f152dae65491af3927895862fd3 Author: Chris Lumens Date: Tue Aug 11 16:52:37 2009 -0400 Handle parted exceptions arising from ped_device_get (#495433). The bug has an extensive description of what's going wrong, but the short story is that by not handling the libparted exception, partedExnRaised stays set so the next time a libparted function returns NULL, we think that later caller is where the problem happened. Not only does this raise an exception where one shouldn't happen, but it raises the exception with a very outdated and misleading error message. We likely need to audit pyparted for other instances of this problem. commit ec576b3aaed4cd8d345d12ad5b072223a5627018 Author: David Cantrell Date: Tue Aug 4 12:36:03 2009 -1000 New version. commit e9f83215789463417b429b494016a67bf34ba640 Author: David Cantrell Date: Tue Aug 4 12:25:50 2009 -1000 Simple parted.Partition.getDeviceNodeName() This function came over from anaconda's code originally. Removed a lot of unnecessary logic since we really just want the part of the partition's path that does not include /dev/. commit b1bab559955bce27c96e85e53907a9bc9bc7a120 Author: Joel Granados Moreno Date: Tue Aug 4 17:24:43 2009 +0200 Calculate the length of the geometry correctly. When one specifies start and end in Geometry, the length has a off by one error. length != (end - start) because length is a value that includes the start sector and the end sector. When doing just (end-start) one does not take into account the start. commit 6889aed7cd87538df77b72d4b957e968f6143156 Author: Joel Granados Moreno Date: Tue Aug 4 16:14:23 2009 +0200 Be consistent in importing _ped.FUNCTIUON for the cylinder align stuff. commit 5215a92f7fe7610f8a0cefc1469164510b9450ec Author: David Cantrell Date: Tue Aug 4 12:17:00 2009 -1000 git-clone -> git clone commit 70001fcf9f33554946b5b47f18e65ef143243fc4 Author: Joel Granados Moreno Date: Tue Aug 4 18:07:17 2009 +0200 Increase readability. commit 29a98feee7e98e0ea07c8044cd67985d5b8e6dd4 Author: Joel Granados Moreno Date: Tue Aug 4 18:05:25 2009 +0200 s/contraints/constraints/ commit d8bf33682da31914a164e2a9886e342c8d85d41a Author: David Cantrell Date: Thu Jul 16 14:27:45 2009 -1000 New version. commit fa5d9ceb881b1004b5c563a111c380391948fcdd Author: David Cantrell Date: Thu Jul 16 14:24:31 2009 -1000 Check for decorator.decorator in Python. commit f92fc93613bf417b4c6cca1325eed3eafa55a7af Author: David Cantrell Date: Thu Jul 16 13:39:26 2009 -1000 Rename bootstrap to autogen.sh commit 6503a88a583e9805edcc84f118b151e2f9844be9 Author: David Cantrell Date: Thu Jul 16 13:37:52 2009 -1000 Add pass-throughs for the align-to-cylinders functions. libparted now offers two functions to control aligning partitions to cylinders. They are: ped_disk_align_to_cylinders_on() ped_disk_align_to_cylinders_toggle() The first returns true if libparted is set to align partitions to cylinders. The second one toggles the setting back and forth. Pass-throughs have been added to _ped and parted and test case stubs have been added. commit 2912ebd5c575f6859fe4d0b4c01a6fd1e452c446 Author: David Cantrell Date: Thu Jul 16 13:13:01 2009 -1000 ped_free() -> free() parted-1.9.0 has removed ped_free() and replaced it with calls to the regular free() function in the C library. commit fb16e4b8662d48884239f9f037e7ec25396d0a00 Author: David Cantrell Date: Thu Jul 16 13:11:32 2009 -1000 Fill out get_max_supported_partition_count() functions. Implement py_ped_get_max_supported_partition_count() in _ped, add maxSupportedPartitionCount property to parted.Disk, add stubs for test cases. commit f05278c55c3d0047f2135886bd03a960b88ed64d Author: David Cantrell Date: Fri Jul 10 16:43:42 2009 -1000 Add py_ped_disk_get_max_supported_partition_count() This is a new function in parted-1.9.0. Need to finish it in _ped, add test cases, implement it in the parted module, and write test cases for that. commit 91a45f50713016bd63b2e80f435997dc66201e96 Author: David Cantrell Date: Fri Jul 10 16:36:39 2009 -1000 Add PARTITION_APPLE_TV_RECOVERY and PARTITION_BIOS_GRUB These new partition types are in parted-1.9.0 and later revisions. commit 7a8733545c5d62645e083da153ab7f8d98c7ac58 Author: David Cantrell Date: Fri Jul 10 16:19:31 2009 -1000 Add DEVICE_SDMMC and DEVICE_VIRTBLK from new libparted. commit b378c46d241c5a5093cce4dbd4e8e3ebdd3a2694 Author: David Cantrell Date: Fri Jul 10 16:17:04 2009 -1000 Bump minimum required libparted version, set new version number. Require libparted 1.9.0 or greated, bump pyparted version to 2.1.0. commit d89d3d083542fd088ca9ab1ec99896ce6b4e6570 Author: David Cantrell Date: Fri Jul 10 16:16:48 2009 -1000 Updated TODO list. commit 9e471ad36fe3a7162712c1490b800ab8b925fae3 Author: David Cantrell Date: Thu Jul 2 17:50:41 2009 -1000 Add more units to _exponent hash. Add petabytes, exabytes, zettabytes, and yottabytes. commit 79b1b4e307f07dfd253550611c89f71cdfa93895 Author: David Cantrell Date: Wed Apr 15 16:17:44 2009 -1000 Free remaining strings from ped_unit_* functions in pyunit.c. Two more functions needed ped_free() calls to free the string returned to us by libparted. commit de5df8f9422cf1474c3762e426be2f18c818b3c2 Author: David Cantrell Date: Wed Apr 15 16:12:38 2009 -1000 Read in PedUnit values from Python as ints consistently. The pyunit.c code was using long and int inconsistently for reading in a PedUnit value from Python, which would result in losing information since the size of int and long differ. Since PedUnit is just an enum in libparted, use int in _ped's code to read and store PedUnit values. commit 8e114dc0cf7da80070b2058664cd7e927ab1c225 Author: David Cantrell Date: Wed Apr 15 16:11:47 2009 -1000 More test cases for _ped.Device Write test cases for some of the unit format functions and write the DeviceStrTestCase. commit 845a74515615fc5d1a83938ed88798725633c97e Author: David Cantrell Date: Wed Apr 15 16:04:52 2009 -1000 Free string returned from libparted unit format functions. The PedUnit functions that return strings depend on the caller to call ped_free() to release the memory. We take the string from libparted and turn it in to a Python string, which makes a copy and lets Python memory management take over. Be sure to call ped_free() on the pointer returned from libparted. commit 6069a73f6e62a6bce7e14fec22edc58d51899017 Author: Chris Lumens Date: Wed Jun 10 14:52:56 2009 -0400 Add a decorator to set LC_MESSAGES=C before calling into libparted (pjones). This means that exceptions coming from inside libparted will end up being in English instead of whatever language the install is proceeding in, which should be more helpful for developers. commit f3f62c6a25d9163d83cafbd934c029cdefbb17e7 Author: David Cantrell Date: Tue Apr 14 22:50:15 2009 -1000 See if device is open or in external mode in py_ped_geometry_check() If the device is not open or is in external access mode, ped_geometry_check() will fail. Catch before calling libparted and raise an exception. commit 21f6dbb14987cac47cc4ac24aafa8ef9fd50d76c Author: David Cantrell Date: Tue Apr 14 22:48:22 2009 -1000 More _ped test case work. Test case code sanitizing and more work in test_device.py commit a368e9c85e4f8e9bd4df6fb796ae0cbeb603aa36 Author: David Cantrell Date: Tue Apr 14 20:57:35 2009 -1000 Avoid assert_ Use assertEquals(), assertNotEquals(), assertTrue(), and assertFalse() to make the test statements a little more clear. commit 907571d841013a569f60e9c7516aa0d322a90057 Author: David Cantrell Date: Tue Apr 14 20:20:46 2009 -1000 Finished test_geometry.py for _ped test cases. commit 1f0c9f59b9091e008eec5d9963c930db542f1a98 Author: David Cantrell Date: Tue Apr 14 17:14:41 2009 -1000 New version. commit affad0466b7447af1671d4af3f0cf0d8a796db26 Author: Chris Lumens Date: Tue Apr 14 13:16:57 2009 -0400 Be more careful with comparing types of objects now (#495713). commit 1d7cbc2f6f9af4a825ed01d83a0625a3f0f87d6a Author: David Cantrell Date: Mon Apr 13 09:48:10 2009 -1000 New version. commit 98ff1118773ceb430b1fc5a9e6e44b1bb7e6654c Author: David Cantrell Date: Sat Apr 11 12:34:33 2009 -1000 Move unit string format/parse tests to test_device.py These methods are now on _ped.Device, so move the test cases. commit b5d999bd6c0a720890bca12ff24cb9ca4dcb0d81 Author: David Cantrell Date: Sat Apr 11 12:29:56 2009 -1000 Move unit string parsing methods to be members of _ped.Device The following methods on _ped: unit_format_custom_byte() unit_format_byte() unit_format_custom() unit_format() unit_parse() unit_parse_custom() All act on a specific _ped.Device, so these methods are better suited as members of _ped.Device. Moved them from _ped to _ped.Device. These methods no longer take in a _ped.Device argument, since that is now self. commit edfdffea764de132c550ec015950d3de8c6b2686 Author: David Cantrell Date: Sat Apr 11 12:11:31 2009 -1000 Stop on compiler warnings, use -fno-strict-aliasing Python modules have to be compiled this way because the continual use of pointers to different sized types since everything becomes a PyObject * at some point. I don't know of a way around this other than to disable strict aliasing. Apparently Python is compiled this way too, so whatever. commit d7580d4d005e56d48fe302a8745caee4c3c80db8 Author: David Cantrell Date: Sat Apr 11 11:31:33 2009 -1000 Inherit CFLAGS from calling environment. commit f7f8f2fb6f7d507be8708fe5ec4e7b806cb0a10d Author: David Cantrell Date: Thu Apr 9 13:47:29 2009 -1000 Name md partitions as /dev/mdXpY in getDeviceNodeName() 'md' is another device type that needs this special naming format, so add it to the list. Patch from jdanecki at Intel Corporation. commit 52a869d2cf07915a55e7e8702d1598fce3b6a14d Author: David Cantrell Date: Thu Apr 9 13:44:27 2009 -1000 Fix UnitGetSizeTestCase Call assertRaises() correctly. commit a1934af172f207233765335202418fca9a057f96 Author: Chris Lumens Date: Wed Apr 8 11:27:45 2009 -0400 Don't store the partition number in a _ped_Partition since it can change. parted likes to renumber partitions out from under us when partitions are added or deleted, so don't store the number directly. Look it up instead. Also, change num to be a read-only property. https://fedorahosted.org/pyparted/ticket/17 commit cbfc6dce7291e04c55e1b5863ea1a4b4aa3ccf01 Author: Chris Lumens Date: Wed Apr 8 11:24:32 2009 -0400 The first argument to py_ped_unit_get_size should be a PyObject. commit ee4a189287e39acd75f795e59e4331b847c266d1 Author: David Cantrell Date: Mon Apr 6 19:02:07 2009 -1000 Filter exception text back up in py_ped_unit_get_size() If we get 0 back from ped_unit_get_size(), grab the exception text and make sure we filter it back up the caller and keep the exception raised. commit 847fac1ced1638374708c39bfdeebc45dc437fd2 Author: David Cantrell Date: Mon Apr 6 19:01:54 2009 -1000 Fix up UnitGetSizeTestCase in test_ped.py commit 66aa899b4f5c056b36f45d39da7268214f981eb7 Author: David Cantrell Date: Mon Apr 6 18:51:07 2009 -1000 Move unit_get_size() to be a method on _ped.Device Move this method out of _ped at the top level and make it a method on a _ped.Device. The first argument is the _ped.Device you want to work on, so just make it a method. commit 965aa07a42899fb0f1c33fa95a40c36dad70cac7 Author: David Cantrell Date: Mon Apr 6 18:35:21 2009 -1000 Compare start, length, and end values rather than PedGeometry pointer. In tp_compare for _ped.Geometry, compare the start, length, and end values on the PedGeometry instead of comparing the PedGeometry pointer. There will be two different geometries, but the values should match. commit 93917b15b6c265ad91336e812991440aa66da805 Author: David Cantrell Date: Mon Apr 6 18:34:53 2009 -1000 Complete test_disktype.py test cases. Not many in here, but filled out the test cases to cover all of the disk types. commit cf3d1aa411f8f7b4bc710a2dd92bae6a7564b06e Author: David Cantrell Date: Mon Apr 6 18:34:21 2009 -1000 Create RequiresDiskTypes base unit test class. For test cases that need a hash table of all _ped.DiskType instances available. commit d5ea970174cb41d8ab29406f5e83ba734d9d7cf7 Author: David Cantrell Date: Mon Apr 6 18:10:31 2009 -1000 Fix DiskTypeGetTestCase in test_ped.py. commit cd5bf35714d2869a0bf6ab2876d382c427e27f08 Author: David Cantrell Date: Mon Apr 6 18:10:02 2009 -1000 More test_constraint.py work. Fill out some more test cases, start working on solve nearest. commit b39baa067cd7015bb0de20d60efd9ebb9ba67858 Author: David Cantrell Date: Mon Apr 6 16:30:23 2009 -1000 Add tp_compare and tp_richcompare for Constraint. commit dfde260c6c9fa7bd8418b0c3c492e804d3502b84 Author: David Cantrell Date: Mon Apr 6 16:22:35 2009 -1000 Add tp_compare and tp_richcompare for Timer. commit aec25674a69561c02993a593fd713ed1da990973 Author: David Cantrell Date: Mon Apr 6 16:17:34 2009 -1000 Add tp_compare and tp_richcompare for Alignment. commit 52c68180b3b96c10e4ba2e9cb7801e6d7b054af9 Author: David Cantrell Date: Mon Apr 6 16:02:44 2009 -1000 Add tp_compare and tp_richcompare for Geometry. commit 2207bdd06b0725190c507a1f62582b3221224500 Author: David Cantrell Date: Mon Apr 6 15:55:57 2009 -1000 Add tp_compare and tp_richcompare for FileSystemType and FileSystem. commit a710e30777e10de547b5fa1d640425b3ab3c20b9 Author: David Cantrell Date: Mon Apr 6 15:46:10 2009 -1000 Add tp_compare and tp_richcompare for Partition, Disk, and DiskType. commit 81b0a6bdeae282843cd457d933f904c21067c769 Author: David Cantrell Date: Mon Apr 6 15:28:52 2009 -1000 Forgot Py_TPFLAGS_HAVE_RICHCOMPARE on CHSGeometry and Device Need this flag to indicate we've set tp_richcompare to something usable now. commit 191de2441851a72cde5303b40643b23cc3dffc4d Author: David Cantrell Date: Mon Apr 6 15:24:02 2009 -1000 tp_compare and tp_richcompare for _ped.Device and _ped.CHSGeometry Implement tp_compare and tp_richcompare for _ped.Device and _ped.CHSGeometry types. We can really only test for equality with these types, so raise TypeError for unimplemented comparisons in richcompare. The tp_compare function is also poorly documented in the Python guide, so it may or may not be fully complete at this point. commit 46e1ba2f4dfe97a612c3f1f9c8af9f7e01f820c6 Author: David Cantrell Date: Mon Apr 6 14:49:55 2009 -1000 Simplify function prototypes. commit 711a98903330ab4169ac12d94560a81aaf2751e2 Author: David Cantrell Date: Mon Apr 6 14:33:51 2009 -1000 Set .tp_hash to PyObject_HashNotImplemented Setting this to PyObject_HashNotImplemented for now, at least until we determine which types can be hashable. I'm working on tp_compare and tp_richcompare and need this set to something valid in the interim. commit cb96653ebd9d92f9d9e15285a85e467689e5a9de Author: David Cantrell Date: Mon Apr 6 12:46:35 2009 -1000 Finish test_chsgeometry.py test cases commit ebaf1c90c326479321caaf8ec43d3c66cf5fca76 Author: David Cantrell Date: Mon Apr 6 12:41:27 2009 -1000 Add remaining _ped.Alignment test cases. Implement the following _ped.Alignment test cases: AlignmentAlignUpTestCase AlignmentAlignDownTestCase AlignmentAlignNearestTestCase AlignmentStrTestCase commit 5fb5ddfbdab6665579567bd3a4b7bc8bdd48b54f Author: David Cantrell Date: Mon Apr 6 12:40:23 2009 -1000 Add RequiresDeviceAlignment class. The RequiresDeviceAlignment class is for test cases that need a _ped.Geometry (which requires a _ped.Device) for _ped.Alignment operations. The base class has some rounding functions so we can verify the results from _ped.Alignment methods. commit 7ef1dd1d88ed7564e02c319ec12028cda0982c7a Author: David Cantrell Date: Mon Apr 6 11:44:01 2009 -1000 Expand unit type to name test cases for _ped commit 2db07f417a632c53b83a6b2fde1c6aea93d2ec6d Author: David Cantrell Date: Mon Apr 6 11:40:45 2009 -1000 Read param as in for py_ped_unit_get_name() 'i' was defined as a long, but we were reading it as an int with PyArg_ParseTuple(). On 64-bit systems...well, you know how the story ends. commit 599660b0a530b6a6bb00e6acd397a67b624d472a Author: David Cantrell Date: Sat Apr 4 17:26:24 2009 -1000 Implement the AlignmentIntersectTestCase test case. This one was fun. natmath.c has proofs in it, so my thought was to implement the math used in libparted in the test case and compare it to what we get back from libparted. Doing so uncovered a problem in our _ped.Alignment code, and hey, that's what the test cases are all about. commit f417b01ef573b1bec1df3cfa0e7eca009d722816 Author: David Cantrell Date: Sat Apr 4 17:22:55 2009 -1000 Copy back values from ped_alignment_init() Found this problem while working on test cases. It's not sufficient for us to just try to create a new alignment, we need to copy back the possibly-adjusted offset and grain_size from the newly created alignment. ped_alignment_new() calls ped_alignment_init(), which in turn calls abs_mod() if grain_size is non-zero and the offset of the alignment is adjusted accordingly. We need these adjusted values in our _ped.Alignments. commit 80cb1d6f3b56582ac7d1cef0eb8593b945f52fbd Author: David Cantrell Date: Fri Apr 3 16:38:36 2009 -1000 Use kwargs for parted.Constraint commit ad92b5b682e7c376d4f4b17b8b134722877b25f7 Author: David Cantrell Date: Fri Apr 3 16:30:50 2009 -1000 Use kwargs for parted.Alignment commit 8515a2c43764702390c9e31c62727f513ab8a0a7 Author: David Cantrell Date: Fri Apr 3 16:16:56 2009 -1000 New version. commit 566e4888b2a4f7702533a88d8c6f8af28e949217 Author: David Cantrell Date: Fri Apr 3 16:12:19 2009 -1000 Call commitToDevice() then commitToOS() in commit() This is exactly what ped_disk_commit() does in libparted, but for whatever reason, calling that function does not work reliably. Tracing the calls, the lines hit are identical. The only thing that really happens when we commit to OS is that libparted calls sync() a lot and issues BLKRRPART. In an effort to close out storage bugs, I'm changing this in pyparted. https://bugzilla.redhat.com/show_bug.cgi?id=491746 commit eecb1b328f35e0534c23106a972def8fc38eb6c5 Author: Hans de Goede Date: Thu Mar 26 13:05:47 2009 +0100 When checking for PartedException) also check for PyExc_NotImplementedError In various places we check wether the libparted exception handler was called when we got an error return from libparted. We then check to see if our custom libarted exceptionhandler has already defined an exception, using PyErr_ExceptionMatches(PartedException), if it has not already defined an exception, then it should have set partedExnMessage, and we define our own exception using partedExnMessage. However our custom libarted exceptionhandler will also define exceptions of the PyExc_NotImplementedError type, in which case our custom libparted exceptionhandler has not set partedExnMessage. However as PyErr_ExceptionMatches(PartedException) does not catch the PyExc_NotImplementedError case, we still define our own exception using partedExnMessage, this causes us to call PyErr_SetString() with a NULL pointer which triggers in assert inside python's C-code. This patch fixes this by also checking PyErr_ExceptionMatches(PyExc_NotImplementedError) whenever we check PyErr_ExceptionMatches(PartedException) commit 4f4d2eae540fcabe04561b680ea1e8086d44323a Author: Hans de Goede Date: Thu Mar 26 13:05:31 2009 +0100 Fix a memory leak in add_partition error handling path commit fafa4a1af6498eed77c98d689e72909a901a2984 Author: Hans de Goede Date: Thu Mar 26 13:05:08 2009 +0100 Make the constraint argument for various partition functions optional ped_disk_add_partition(), ped_disk_set_partition_geom(), ped_disk_maximize_partition() and ped_disk_get_max_partition_geometry() all except NULL as a valid constraint argument. So allow not passing in any _ped_Constraint object in the wrappers for these. commit 3f4afd2c9644273e5ecd14ba54743c663c0a1fee Author: David Cantrell Date: Mon Mar 23 17:59:40 2009 -1000 Update version number for new development work. commit 5dfe16cd1cb3e218bfdeed5f8f470b317b689195 Author: David Cantrell Date: Mon Mar 23 17:36:45 2009 -1000 New version. commit 9ae61eaef909837825c5bf18c0869af43319addd Author: David Cantrell Date: Mon Mar 23 16:09:08 2009 -1000 Revert "Ensure PedDisk pointers match when we call ped_disk_set_partition_geom()" This reverts commit e43fe6dff7c1b555ac500b58ef00e8bd1fe5135b. commit b2a1bc85f2116ea2431ca15da8ffc17eb4196166 Author: Hans de Goede Date: Mon Mar 23 14:09:20 2009 +0100 Sanity check disk_*_partition arguments Make sure operations done on a disk which have a partition as argument, get passed in a partition of the disk on which the operation is being done! commit d1cfc08a39c59ab99377b656229e15086e335662 Author: David Cantrell Date: Mon Mar 23 13:39:12 2009 -1000 Check ownership when removing partitions from disk. Failing to do this will cause libparted to SIGSEGV. Patch from Joel Granados . commit 3158133ea7904cedbe379e7b3df28d9577158595 Author: David Cantrell Date: Fri Mar 20 17:27:13 2009 -1000 Update version number for new development tree. commit 02f59bcb066cc8b32cf9b2f9130ee4366026eead Author: David Cantrell Date: Fri Mar 20 17:22:25 2009 -1000 New version. commit e43fe6dff7c1b555ac500b58ef00e8bd1fe5135b Author: David Cantrell Date: Fri Mar 20 17:02:07 2009 -1000 Ensure PedDisk pointers match when we call ped_disk_set_partition_geom() commit 03f9751775a2006f3e2bbe18bef4be92accb358d Author: David Cantrell Date: Fri Mar 20 14:59:10 2009 -1000 Remove unused variables. commit 173bbc267b60f4f4f47af7cdae2bf39ed919bb95 Author: Hans de Goede Date: Fri Mar 20 15:59:15 2009 +0100 Use one and only one _ped_Disk object to represent a PedDisk object (#489745). This one is best explained (I think) by explaining what was happening in the old code. 1. Create a new _ped_Disk object, lets call this pydisk 2. Get a _ped_Partition from pydisk with (ie) py_ped_disk_next_partition 3. py_ped_disk_next_partition calls ped_disk_next_partition and gets a PedPartition 4. Then it calls PedPartition2_ped_Partition on the PedPartition it just got, lets call the resulting _ped_Partition pypart, but we are not that far yet. 5. PedPartition2_ped_Partition takes the PedDisk the PedPartition is on, part->disk and then calls PedDisk2_ped_Disk on it. Note part->disk is the same as pydisk->ped_disk ! 6. PedDisk2_ped_Disk creates a new _ped_Disk object with its ped_disk pointing to the passed in part->disk, lets call this pydisk2. Note how pydisk's and pydisk2's ped_disk attribute point to the same PedDisk. 7. PedPartition2_ped_Partition stores pydisk2 in pypart->disk and returns pypart to py_ped_disk_next_partition, which in turn return it to its caller 8. So now we have 2 _ped_Disk objects pointing to the same PedDisk, pydisk and pypart->disk (aka pydisk2) 9. We do some stuff with pypart and then are done with it 10. _ped_Partition_dealloc(pypart) gets called, it DECREF's pypart->disk 11. _ped_Disk_dealloc gets called on pypart->disk aka pydisk2 12. _ped_Disk_dealloc calls ped_disk_destroy(pydisk2->ped_disk) 13. We try to do some stuff to pydisk, which ends up using pydisk->ped_disk, which now points to freed memory / a destroyed PedDisk object, not good. This patch fixes this by passing in the existing _ped_Disk object in to PedPartition2_ped_Partition, and simply INCREF-ing it there. commit e54687d575eccb56438e247dd3e3a1e8c5342133 Author: Chris Lumens Date: Fri Mar 20 14:30:21 2009 -0400 Add missing import. https://fedorahosted.org/pyparted/ticket/12 commit 5ea89ffbfd35c6304215f10614bbcfdbe5e3ee12 Author: Chris Lumens Date: Fri Mar 20 14:23:56 2009 -0400 Use a better test for empty extended partitions (#491326, hdegoede). My original test for whether the extended partition was empty or not was not quite right, as there's still a freespace "partition" in the part_list. libparted works around this by removing freespace and metadata partitions from the part_list before doing the check, then removing them afterwards. We should just ignore them here. commit 1048e1bf29aae86537a066f5450224aedf74d90b Author: David Cantrell Date: Thu Mar 19 18:33:42 2009 -1000 Update version for new development tree. commit 006b52ff74dbde9d79f74ebbcc110d7c43057529 Author: David Cantrell Date: Thu Mar 19 18:30:13 2009 -1000 New version. commit 04e502aef8fb814378a7ee1467f042c0c75e13a5 Author: Chris Lumens Date: Thu Mar 19 13:17:13 2009 -0400 Raise an exception if an extended part still contains logical partitions. commit 845c971eda3a67f5c4fa387f5c7ec23de3747cd2 Author: David Cantrell Date: Wed Mar 18 10:19:39 2009 -1000 Catch PartitionException in parted.Partition.name property. Not all disk labels support get_name(). If we don't have that, an exception is raised. commit d5e20a8f4cb4e18954e584fabe415ce01d096ee7 Author: David Cantrell Date: Wed Mar 18 10:19:13 2009 -1000 s._device -> self._device commit b945a3d8ba2452e383b30cda5e27db4a8e7de65e Author: David Cantrell Date: Wed Mar 18 10:18:05 2009 -1000 Call is_busy(), do not pass it to bool() is_busy() returns a bool, so we only need to return that in the parted.Device.busy property. commit 1cea4bbdd3c9a6c2a852e166c4b9922a10cb69d7 Author: David Cantrell Date: Mon Mar 16 11:31:21 2009 -1000 Use new style Python property syntax for read-only properties. Use @property methods for read-only properties to increase readability in the code. commit 992271fc02b620b31bb27ab3f78c54bee030e52e Author: Chris Lumens Date: Mon Mar 16 11:06:11 2009 -0400 Fix a typo in a test case. https://fedorahosted.org/pyparted/ticket/10 commit f9a01ca683cd06ae7a988c046761fad59560f01b Author: David Cantrell Date: Fri Mar 13 11:27:50 2009 -1000 Improve readability of parted.Device property code. Use new-style @property syntax for the properties on parted.Device. commit ca18a0a64fd1706886ee74b8d55241a9bdd72701 Author: David Cantrell Date: Fri Mar 13 10:57:04 2009 -1000 DEVICE_DM only exists if libparted was built with device-mapper. GNU parted must be compiled with --enable-device-mapper in order to have PED_DEVICE_DM defined (which leads to DEVICE_DM defined. commit 4fcc41b808bb55399760bc9d1540b3bc520fd5ac Author: David Cantrell Date: Thu Mar 12 11:11:49 2009 -1000 Update version for new development series. commit b1093fdccd167263f1d2f1c6ec8d12f2914da274 Author: David Cantrell Date: Thu Mar 12 11:06:07 2009 -1000 New version. commit f8a2d385801019ec07d0ea7fbfeb24077a59cf25 Author: David Cantrell Date: Thu Mar 12 11:04:48 2009 -1000 PED_DEVICE_DM may not be defined If PED_DEVICE_DM is not defined, do not try to add it to our module. parted has to be built with --enable-device-mapper to get this define enabled. https://fedorahosted.org/pyparted/ticket/9 commit 1f80a2abb14b33ce4eb6e346a23c531a12c11ffb Author: Chris Lumens Date: Thu Mar 12 13:46:48 2009 -0400 Don't pass NULL to PyString_FromString (#489518). If the string we're supposed to return is NULL, return an empty string instead. Passing NULL is certainly a bug, though pyparted may not be the offender in the above referenced bug. commit f7a561f40b566dbd67b9179b6436a06653520b40 Author: David Cantrell Date: Thu Mar 5 15:05:58 2009 -1000 Update version number for new development release. commit d9ed8c79e83b0faf0198f155714952d8eff15281 Author: David Cantrell Date: Thu Mar 5 15:03:27 2009 -1000 New version. commit 20d723775331c3ab38e6dadf0acda31bb20233fc Author: David Cantrell Date: Thu Mar 5 14:37:22 2009 -1000 Update cached geometry when py_ped_disk_set_partition_geom() is called. commit 13d599774c6710c701c2454763dbb8911869b612 Author: Chris Lumens Date: Thu Mar 5 10:28:07 2009 -0500 Add a DiskLabelException class and raise it instead of IOException. This makes it much easier to tell when the problem was that there was no disk label on a disk, instead of some other random IO error. commit 6190399332a58218b554285d9969718a02e0a6af Author: David Cantrell Date: Tue Mar 3 14:02:08 2009 -1000 Invalidate cached partitions when minimizeExtendedPartition() is used The geometry of the extended may change, so invalidate the cache if this method succeeds. commit d91d3b6bc9afeb5f00cbdf40a66b1309ef04d33d Author: David Cantrell Date: Tue Mar 3 14:00:55 2009 -1000 Move clobber() method to parted.Device These methods moved in _ped, so move them in parted. commit 939514cb042afb7301e72ff68b6dab8ce445c5f8 Author: David Cantrell Date: Tue Mar 3 13:58:21 2009 -1000 Remove invalid line from py_ped_disk_minimize_extended_partition() There are not _ped_Partition or PedPartition objects we work with in this function. commit 9f4ca6401c43bfb02776cd08966718677dd444bf Author: David Cantrell Date: Tue Mar 3 13:56:24 2009 -1000 When updating a partitions geometry, update the pyobject contents When updating a partitions geometry, update the pyobject contents, instead of dereferencing it and creating a new one. (Patch from Hans de Geode.) commit e28c0d6a88682489716cc96a675496cfe43dc1c6 Author: Hans de Goede Date: Sun Mar 1 20:39:01 2009 +0100 Fix copy and paste error Do not use _ped_FileSystem_Type_obj.tp_repr() to represent a _ped_Geometry object, but use _ped_Geometry_Type_obj.tp_repr() . Signed-off-by: David Cantrell commit 63f204b75ea8833ce0bf963469f899ea8ac5a2d3 Author: Hans de Goede Date: Sun Mar 1 20:38:09 2009 +0100 Update _ped_Filesystem's _ped_Geometry object in py_ped_file_system_resize() py_ped_file_system_resize() changes the geometry of the filesystem, so we need to update our PyObject representing the geometry with the new data. Signed-off-by: David Cantrell commit 424061454f930e65826b4d44c4d0afb24dad75d0 Author: Hans de Goede Date: Sun Mar 1 20:10:02 2009 +0100 Update _ped_Partition's _ped_Geometry object in disk functions which change it py_ped_disk_set_partition_geom() and py_ped_disk_maximize_partition() change the geometry of the partiton, so update the partitions _ped_Geometry object to match the new geometry. Signed-off-by: David Cantrell commit 48ce309268c57c079cade5db647af46b8617aee4 Author: Hans de Goede Date: Sun Mar 1 19:23:27 2009 +0100 Cache libparted geometry objects in our _ped_Geometry objects Currently _ped_Geometry2PedGeometry() returns a pointer to a new libparted PedGeometry object each call, and not a single users frees this object. By making _ped_Geometry cache the underlying PedGeometry object this patch fixes this memory leak (and gives a very very small speedup by avoiding creating a lot of new PedGeometry objects). This patch also adds sanity checking to any changes made to _ped_Geometry. {start,length,end}. And makes setting the end actually change the geometry (the old _ped_Geometry2PedGeometry() never used the end member, so changing it did not have any effect). Signed-off-by: David Cantrell commit 33bb48df89f6d3ab10b067abe678f383f6e57158 Author: David Cantrell Date: Tue Mar 3 13:48:43 2009 -1000 Updated TODO list. commit 0ebea186a827d1e395d81983dc58fcab53e9354c Author: David Cantrell Date: Tue Mar 3 12:03:03 2009 -1000 Call py_ped_disk_remove_partition() from py_ped_disk_delete_partition() Use ped_disk_remove_partition() so that we safely destroy on-disk partitions, but keep the PedPartition alive since we may be pointing to that from Python objects. commit c74c80bfcfca291205b10b769c5972ac789e1019 Author: David Cantrell Date: Tue Mar 3 12:00:02 2009 -1000 Make py_ped_disk_clobber*() members of _ped.Device The ped_disk_clobber() and ped_disk_clobber_exclude() functions work on a PedDevice, so make them members of the _ped.Device object. commit 90985b66f3eeb8e6f1b9db2c12cb0273eeed8539 Author: Chris Lumens Date: Tue Mar 3 11:25:24 2009 -0500 Fix a couple simple build warnings. commit daa6cebbcf4aff6a8d4852b928e8bbafa17555b0 Author: Chris Lumens Date: Tue Mar 3 11:11:11 2009 -0500 _ped.Disk.__init__ only takes one argument now. Update tests. commit 7041b5061065eb944ec9fb167787a31e0da63312 Author: David Cantrell Date: Mon Mar 2 15:22:43 2009 -1000 Update TODO and BUGS file. commit 3f556b4ec5decdc8a83feb9db3360a2ec11cc0b2 Author: David Cantrell Date: Sat Feb 28 18:53:24 2009 -1000 Update version number for new development release. commit d07090834506fcc15f8e4b7591743a812efafe3c Author: David Cantrell Date: Sat Feb 28 18:41:06 2009 -1000 New version. commit 0d0f99d2ffb60f665a2897af9a2a97a5c50fc0a8 Author: David Cantrell Date: Sat Feb 28 18:37:31 2009 -1000 Update geometry when a partition is added to a disk label. Update our copy of the geometry object in both _ped and parted. In the _ped module, grab the new PedGeometry and convert it to a _ped.Geometry and put it in the existing _ped.Partition object. In the parted module, update the partition.geometry property in parted.Disk.addPartition(). commit 5ae6e61a0fe925116f7364a4a9851471efbbb9e1 Author: David Cantrell Date: Sat Feb 28 15:29:23 2009 -1000 Read at most 10 characters for the devel string identifier. commit 20551d5b3d66da0c9abe2d4acb0da757c2388680 Author: David Cantrell Date: Sat Feb 28 12:13:06 2009 -1000 Mark _ped.Partition as owned by a Disk when necessary. Prevent accidental free of PedPartition when it becomes owned by a PedDisk. https://fedorahosted.org/pyparted/ticket/4 commit b450ae9a2fc4b34968e321618c4675e127322ca8 Author: David Cantrell Date: Sat Feb 28 12:06:23 2009 -1000 For _ped.Partition functions, pass _ped_Partition instead of PyObject. For all of the functions that provide methods on _ped.Partition objects, pass the first argument as a (_ped_Partition *) rather than a (PyObject *) that we may or may not have to cast. commit d4f433a85b3be1b63c1bb5a71e2801bd793701fe Author: David Cantrell Date: Fri Feb 27 14:56:17 2009 -1000 Update for new development version. commit 40e72adc3f66a140dcbd4823d1662c3c1c91af64 Author: David Cantrell Date: Fri Feb 27 14:55:57 2009 -1000 The archive is a .tar.gz not a .tar.bz2 commit 008655baf742696210ac9e2d2c40d10b75e4a1f6 Author: David Cantrell Date: Fri Feb 27 14:52:47 2009 -1000 New version. commit e42fa2a2fe16b09f559cd510a7b022e0823f0186 Author: David Cantrell Date: Fri Feb 27 09:52:00 2009 -1000 Revert "Replace PyErr_ExceptionMatches() with PyErr_Occurred()" This reverts commit a54541462b7c7df9a0124cdf25f3348446c59e1c. commit a54541462b7c7df9a0124cdf25f3348446c59e1c Author: Hans de Goede Date: Fri Feb 27 14:35:05 2009 +0100 Replace PyErr_ExceptionMatches() with PyErr_Occurred() In various places we check wether the libparted exception handler was called when we got an error return from libparted. We then check to see if our custom libarted exceptionhandler has already defined an exception, using PyErr_ExceptionMatches(PartedException), if it has not already defined an exception, then it should have set partedExnMessage, and we define our own exception using partedExnMessage. However our custom libarted exceptionhandler will also define exceptions of the PyExc_NotImplementedError type, in which case our custom libparted exceptionhandler has not set partedExnMessage. However as PyErr_ExceptionMatches(PartedException) does not catch the PyExc_NotImplementedError case, we still define our own exception using partedExnMessage, this causes us to call PyErr_SetString() with a NULL pointer which triggers in assert inside python's C-code. This patch avoids triggering this assert by using PyErr_Occurred() instead of PyErr_ExceptionMatches() to see if our custom libparted exceptionhandler has already defined an exception. Signed-off-by: David Cantrell commit 5e66da497f2b5be9f4c68f33acaab345a90c3d15 Author: David Cantrell Date: Fri Feb 27 09:36:40 2009 -1000 Update development version number. commit 3e39c8c8e250b78c2feaae3b7af8879a646322e8 Author: David Cantrell Date: Fri Feb 27 09:35:22 2009 -1000 Instructions on making a new release. Rather than increasing the version number after each release, I'd rather tack on '-devel' to the version. Modified the pyparted_version() function to work accordingly. Added RELEASE file which explains the steps I use to make a new release. commit 58ce4355b6f0b9711d9b05c1b07a2b3fa96d00b3 Author: Chris Lumens Date: Fri Feb 27 09:39:19 2009 -0500 Include cachedlist.py in the archive, thanks. commit 9c0041e37e0240bae540e74e47a191cf30de171c Author: David Cantrell Date: Wed Feb 25 14:11:35 2009 -1000 Fix compile errors in pyfilesys.c commit 056b247fa6bf57c6158a1aa550d5590fd7756611 Author: David Cantrell Date: Wed Feb 25 14:08:37 2009 -1000 Add some more test cases. Not much here, but I've been working on it. commit bd5468bec2876270d51eb8e3a7ab1cc4b7b47865 Author: David Cantrell Date: Wed Feb 25 14:08:25 2009 -1000 Add parted.freeAllDevices() commit 4b43fdf320910cdfebd43029eea8124c170fb8f7 Author: David Cantrell Date: Wed Feb 25 10:39:09 2009 -1000 Add ped_filesystem to _ped_FileSystem struct. Cache the PedFileSystem inside a _ped.FileSystem now. commit 166961ced21c26e66def2ec14b5029ef72d45fbb Author: David Cantrell Date: Wed Feb 25 10:37:58 2009 -1000 Clean up cached libparted types in dealloc functions. In _ped_Disk_dealloc(), call ped_disk_destroy() on the cached PedDisk. In _ped_FileSystem_dealloc(), call ped_file_system_close() on the cached PedFileSystem. In _ped_Partition_dealloc(), call ped_partition_destroy() on the cached PedPartition. commit 240bb8f707ee5d71f90696b14afeb1f3424a509c Author: David Cantrell Date: Wed Feb 25 10:25:58 2009 -1000 Cache PedFileSystem in _ped.FileSystem Only perform one open operation for the _ped.FileSystem and keep the PedFileSystem cached so we can use that for subsequent fs-related calls. commit 9996752a0e8896e7d6c86262d62d2ce2323847a9 Author: David Cantrell Date: Wed Feb 25 10:04:27 2009 -1000 Remove unncessary Py_INCREF and incorrect GC_DEL. https://fedorahosted.org/pyparted/ticket/1 commit be7e48783072d9f870c6cee4313406329b280318 Author: Chris Lumens Date: Wed Feb 25 15:09:42 2009 -0500 Use a CachedList for storing the partitions list to speed things up. commit 5455a0a1b245961d1908db24f9766e77bdfae18b Author: Chris Lumens Date: Wed Feb 25 15:09:20 2009 -0500 Add a caching list implementation. The documentation in the file should explain how to use it. commit 5524f4aa88b77db735985e6882988df9af6b17e1 Author: Hans de Goede Date: Wed Feb 25 10:02:47 2009 +0100 Fix various memleaks in error paths Fix various memleaks in error paths. Signed-off-by: David Cantrell commit 96f262d017c00a1b4a149558c66c87afc3648663 Author: David Cantrell Date: Mon Feb 23 17:32:39 2009 -1000 Some pychecker fixes. pychecker caught some stuff. It complains about a lot, but then the code works anyway. I do not know how to make pychecker understand dicts. It also seems to hate hidden variables unless they are used in the same module. Might look at pylint. commit f3a777d0afb9ba12dfafeb6fbd0aeb5d87dc7722 Author: David Cantrell Date: Mon Feb 23 17:32:24 2009 -1000 Need reproducers for these bugs that we can run from Python. commit 168e5eafcf6f55abe3ef6be63be1f94423f4021b Author: David Cantrell Date: Mon Feb 23 17:00:39 2009 -1000 Remove unnecessary '*' from boilerplates. 'FOR A * PARTICULAR' -> 'FOR A PARTICULAR' commit bbdacdd10cbfa59f7352c9cbf54cec646a5dd3cd Author: David Cantrell Date: Mon Feb 23 16:54:04 2009 -1000 Update the release tag in Makefile.am and fix boilerplate. Fix a typo in the boilerplate. In the release tag, remind us that we need to make a new Version entry in the Trac admin page for the project. commit d7c8b4327b22d2b64376f4791f5b9896d912d8ce Author: David Cantrell Date: Mon Feb 23 14:56:37 2009 -1000 More BUGS. commit 570d1cf4ec08ba2a17c6a609df15c19c03250e9d Author: Chris Lumens Date: Tue Feb 24 10:15:13 2009 -0500 Use the right types for numeric members of _ped.Disk in __str__. commit cd49c89467ee3ac2e25b5eda97756b7242722bc1 Author: Hans de Goede Date: Sun Feb 22 20:18:59 2009 +0100 Fix typo in error checking in pyfilesys.c Fix typo in error checking in pyfilesys.c. commit fdd3f3824294327cb6aeddbf48c3df0cbfdc5cfa Author: Hans de Goede Date: Sun Feb 22 17:56:03 2009 +0100 Add missing error checking to a few *2_ped_*() calls Add missing error checking to a few *2_ped_*() calls. commit e848b787a7cfdaf0120baa198b2604f6cee89b34 Author: Hans de Goede Date: Sun Feb 22 17:36:28 2009 +0100 Do not call free on a PedDiskType Do not call free on a PedDiskType. commit 621c7d12981c165e5e71de439e2d3f9818d9db6e Author: David Cantrell Date: Mon Feb 23 13:46:53 2009 -1000 Add remaining __str__ methods for _ped module. Remaining __str__ methods and test cases stubs for the following classes in _ped: _ped.DiskType _ped.FileSystemType _ped.CHSGeometry commit 515aed1bfff0dafd8de1419e6af8bc1082087334 Author: David Cantrell Date: Mon Feb 23 13:36:43 2009 -1000 Move _ped_Device_str to the _ped_Device_Type_obj. Accidently put this one on _ped_CHSGeometry_Type_obj first. commit 265bf90f5889369ed08bb9ba5ddb54e3f4848fa8 Author: David Cantrell Date: Mon Feb 23 12:33:54 2009 -1000 Added __str__ method for _ped.Partition commit 8f5b99f7715c6998f4c5ce581017c9ce900de4b9 Author: David Cantrell Date: Mon Feb 23 12:24:33 2009 -1000 Added __str__ method for _ped.Geometry commit 926cd690524c228d16a4c837fc8265076282135d Author: David Cantrell Date: Mon Feb 23 12:07:25 2009 -1000 Added __str__ method for _ped.FileSystem commit 2aaa014fa69b83d399d9a221660b5545bb63d050 Author: David Cantrell Date: Mon Feb 23 11:59:46 2009 -1000 Added __str__ method for _ped.Disk commit 3872a6b411e4f733a4ff2060bd6b1ee73ba0d093 Author: David Cantrell Date: Mon Feb 23 11:46:39 2009 -1000 Added __str__ method for _ped.Device commit 708b70edd3b27851d299c45acd33405b97e6ed2f Author: David Cantrell Date: Mon Feb 23 11:29:36 2009 -1000 Do not free() values from PyString_AsString() commit b1ce959e2c2a013d243bf5acae05731ff4084c6f Author: David Cantrell Date: Mon Feb 23 11:26:58 2009 -1000 If PyString_AsString() returns NULL, filter back up. If we get NULL from PyString_AsString(), we have a TypeError or other exception raised, so we need to return NULL here so it filters back up the stack. commit 72b6c2512c0d45c4d76844c5aa2abb397a6ecc96 Author: David Cantrell Date: Mon Feb 23 11:20:48 2009 -1000 Add __str__ method for _ped.Constraint() This one is more complex than the one for _ped.Alignment. Since a _ped.Constraint has two Alignments and two Geometries and two Sectors, we need to call the repr() methods a few times. Namely, we need to call repr() for the two Alignments and the two Geometries. Sectors are long longs, so we can pull those in directly. Also stubbed out the test case. commit d141db7769224e7b3c6f556b3fcaf1a7387a2e02 Author: David Cantrell Date: Mon Feb 23 11:20:20 2009 -1000 Wrap the long line in pynatmath.c:_ped_Alignment_str() commit 5e8757cd5b950d40b7fc5d092064f6b8b1ecee3b Author: David Cantrell Date: Mon Feb 23 10:51:44 2009 -1000 Added __str__ method for _ped.Alignment. We have __str__ methods on parted module classes, so we should have them on _ped for completeness. This is the one for _ped.Alignment. Also included the stub for the test case. The next commits will be similar for the remainder of the classes that need __str__ methods. commit a1399f30170888e7a8dfb278c95d36e89ee7b820 Author: David Cantrell Date: Mon Feb 23 10:38:13 2009 -1000 Stub out __str__() test cases for the parted module. commit 76d4a4f638acb42ef0a41bc9bdf2dc953dcd41d4 Author: Chris Lumens Date: Mon Feb 23 16:20:56 2009 -0500 Free temporary buffers in read and check methods. commit 7f677f6e3120673e17765f7e8a9eeec5c8df54e7 Author: Chris Lumens Date: Mon Feb 23 16:13:15 2009 -0500 ped_disk_type_check_feature can accept 0 as a feature flag. commit 916109a889895a1f54ae88daa67e271b04ad5ed4 Author: Chris Lumens Date: Mon Feb 23 16:04:37 2009 -0500 Using "z" everywhere doesn't make sense. Not all libparted functions want a NULL string, so we need to make sure to only use "z" in PyArg_ParseTuple either where the libparted method is okay with a potential NULL or where we explicitly test for it. commit bfb8937f067f90b6a3bb0d90eaecdda5a390d306 Author: Hans de Goede Date: Fri Feb 20 20:11:10 2009 +0100 Fix various python object leaks in convert.c This patch adds a ton of missing DECREF's and removes a few unneeded INCREF's from convert.c commit de47c58a9527a19797021232f7791513a56896e6 Author: Chris Lumens Date: Fri Feb 20 15:52:57 2009 -0500 More do not keep a borrowed refence without INCREF-ing it changes (hdegoede). commit 10805323df4bb775f72e7e18de61f4317d61afc3 Author: Chris Lumens Date: Fri Feb 20 15:43:37 2009 -0500 Remove an unneeded INCREF from pygeom (hdegoede). commit 92dc2b40f1b535f0712e0bb55483f2d30949a5f6 Author: Chris Lumens Date: Fri Feb 20 11:51:57 2009 -0500 Remove a couple of unneeded INCREF's from pydisk.c (hdegoede). Remove a couple of unneeded INCREF's from pydisk.c, stopping us from leaking a couple of python objects. commit 2705ccaef2dcf83bfb48a985fb56fb91ddf0f833 Author: Chris Lumens Date: Fri Feb 20 11:50:21 2009 -0500 Free c-strings on dealloc too (hdegoede). In a couple of places we store strdup-ed C-strings in our python objects, free these when the python objects get destroyed. commit 0dc25daef13e9a1ee9aa31e871e78bb11995d71c Author: Chris Lumens Date: Fri Feb 20 11:45:39 2009 -0500 Do not keep a borrowed refence without INCREF-ing it (hdegoede). In _ped_Disk_init we store a borrowed reference in self->dev (through PyArg_ParseTuple()). We INCREF this on success at the end. But we keep it without INCREF-ing it in an error path. This path clears the borrowed reference in the error path. This fixes various cases of python crashing when having an invalid partition table. commit b4c1d75688d93e17b7c55fa111ff34134d22065a Author: Chris Lumens Date: Fri Feb 20 11:29:15 2009 -0500 Add __str__ methods to all parted objects. This will make anaconda exception dumps more useful, since the __str__ methods are what we use there to print objects out. Note that we cannot implement __repr__ methods now, because we use those in the __str__ methods to print out the very short description of objects. Otherwise, we'd end up with never ending printout as we go around in circles printing members. commit 14984c740a9f2c322c2424b1e432376d0a0219c1 Author: David Cantrell Date: Thu Feb 19 12:43:47 2009 -1000 Created a HACKING file. commit 78849cf9c0dcd6d147ba3dcd2f3bd8b7be270967 Author: David Cantrell Date: Mon Feb 16 16:28:54 2009 -1000 Increment version number for next release. commit 569b02a02a728078cb8ce752f577cd5f74766ff8 Author: David Cantrell Date: Mon Feb 16 16:19:47 2009 -1000 Updated documentation. commit 2d23f6048780c31d49d1107dfdb8260db825b41b Author: Chris Lumens Date: Mon Feb 16 10:18:40 2009 -0500 If two object have different types, they are not equal (#485632). commit e5572f25bd2326ec48d01505bee056be191cb0e9 Author: David Cantrell Date: Thu Feb 12 14:37:13 2009 -1000 Increase minor version number for next release. Want to get in the habit of keeping the master branch at the next release version number so I don't forget to bump at the end. I am not storing the spec file for the pyparted package in the upstream project, so configure.ac is the location of the version number. commit 8189df962c17cba5c348b4a2066bbe1a57dee37a Author: David Cantrell Date: Thu Feb 12 14:36:44 2009 -1000 Makefile.am fixes. commit a2e425a9dbbc599edaf91e647e1b2a9c84a9d077 Author: David Cantrell Date: Thu Feb 12 10:49:02 2009 -1000 Fooled around with fdisk.py a bit last night. It's a work in progress, but it could provide a nice way to test out pyparted. commit 2e5dd020aaf9d8806d81a6536b0bb8de7396a34e Author: Chris Lumens Date: Thu Feb 12 15:16:29 2009 -0500 Remove temporary buffers and sizes from the public API. commit 2c37ad942ab79b075e608217a5508ab2ec8b55de Author: David Cantrell Date: Thu Feb 12 10:11:08 2009 -1000 Updated documentation. commit 217878eb9ede310fbff7fa19be760d4edf01aa10 Author: David Cantrell Date: Thu Feb 12 10:10:26 2009 -1000 math.floor() -> float() Fix problem found by jgranados. This function didn't get transferred entirely correctly from partedUtils.py to pyparted. commit 8755935562c5de738b11b86927b290e307132d06 Author: David Cantrell Date: Wed Feb 11 14:17:43 2009 -1000 Speed up parted module a bit. Pass around parted.Disk and parted.Device if we have those. No need to keep creating them over and over. commit ee1ac7ed134027dc4135962088f1779a886d7fdf Author: David Cantrell Date: Wed Feb 11 13:16:00 2009 -1000 Added tag and release targets to Makefile.am Getting near release time. commit d6685ebe180266d0e4d13fe2bdee5d41fdda9c1d Author: David Cantrell Date: Wed Feb 11 10:45:41 2009 -1000 Removed getSizeAsSectors() Why did I add this? It's parted.Device.length commit 281d5b1d5e0e32498f5c5f70a7f522e2df87e6cb Author: Chris Lumens Date: Wed Feb 11 15:57:22 2009 -0500 We did some stuff. commit 380a615529044c86f931c32ff79e6a19bc50ca13 Author: David Cantrell Date: Tue Feb 10 18:39:10 2009 -1000 More test case cleanups and new test cases. commit c613c0a71ca2f692c8386da8b83607e550b198f8 Author: David Cantrell Date: Tue Feb 10 18:38:16 2009 -1000 _ped.partition_flag_next() loops around to the beginning. It does not raise an exception if you try to get a flag beyond the end of the list, it just goes back to the beginning, which is flag 0. Stop the loop on that condition. commit 50e8bd272ee956ed2b3420e12533e55d0cb885eb Author: David Cantrell Date: Tue Feb 10 16:43:07 2009 -1000 Code and documentation cleanups in the parted module. Added more properties to parted.Constraint. commit d82afc658491e92a3fff36cdaf181a4f25fba48f Author: David Cantrell Date: Tue Feb 10 16:42:54 2009 -1000 More test cases for the parted module. commit c0df5e8c197f785dd81d2f05591c535bf7d7ca8a Author: David Cantrell Date: Tue Feb 10 13:18:38 2009 -1000 Fix errors in src/parted/alignment.py setattr() was called incorrectly and the isAligned() method needed to handle invalid input. commit b2d365a809e1dc2dfc00e98534f64eaf401dd05d Author: David Cantrell Date: Tue Feb 10 13:18:11 2009 -1000 Three test cases for parted/test_alignment.py AlignmentNewTestCase AlignmentGetSetTestCase AlignmentIsAlignedTestCase commit 83be0f0c0fe8f0708b16749772974892c0e7bad0 Author: David Cantrell Date: Tue Feb 10 13:07:33 2009 -1000 Remove suite.addTest() lines for rounding functions. commit 0264328d34de531927a7087c5aabc6591dba9ea7 Author: David Cantrell Date: Tue Feb 10 13:07:06 2009 -1000 Add baseclass.py for parted test cases. Adapted from _ped/baseclass.py commit 23b4533349dcfa431fe2abff968d4a645c0c9b1f Author: David Cantrell Date: Tue Feb 10 12:44:23 2009 -1000 Remove rounding function test cases and add simple test case. In the parted module test cases, remove the ones for the rounding functions. Added a test case for the version() function on the parted module. commit 9a1effe2b7a6aefb1d2fbe53c618df7fb63a9676 Author: David Cantrell Date: Tue Feb 10 12:44:07 2009 -1000 Remove rounding function test cases. commit bf44c1afdb820e0e98934216262606aec20d425c Author: David Cantrell Date: Tue Feb 10 12:42:23 2009 -1000 Remove pointless rounding function pass-throughs. libparted has some math utility functions that are useful inside libparted and probably C programs linking with libparted, but we don't need to expose those pyparted users. Removed the following functions because we can perform the same tasks in native Python: py_ped_round_up_to() py_ped_round_down_to() py_ped_round_to_nearest() py_ped_greatest_common_divisor() py_ped_div_round_up() py_ped_div_round_to_nearest() commit 7d09d3d1e38fd023e050774adf2f0dc81508837a Author: Chris Lumens Date: Tue Feb 10 16:56:00 2009 -0500 Add custom __eq__ and __ne__ methods on parted objects. commit 4640a5ef36ab2287a16dd777094693c536113292 Author: David Cantrell Date: Tue Feb 10 11:34:44 2009 -1000 Make all unwritten test cases fail with self.fail() Rather than pass, use self.fail with the message "Unimplemented test case." so we continue to see those messages when we run 'make check'. Right now, it's a bit misleading because the pass causes the test case to succeed. commit aa6eb4eaed4b51c7db8c2491fe84d5d485a21757 Author: David Cantrell Date: Tue Feb 10 11:34:34 2009 -1000 Add docstrings for parted.probeFileSystem() commit 569130afeb021eb9fd58b7801389e49337f783bd Author: David Cantrell Date: Tue Feb 10 11:16:52 2009 -1000 Add tests/parted/Makefile to configure.ac commit a387bc618976aa36bdd249edb09d2ad1ccc68dea Author: David Cantrell Date: Tue Feb 10 11:01:05 2009 -1000 Updated documentation. commit 28505b51d4fa78da587d3f5b47ad60e652176102 Author: David Cantrell Date: Tue Feb 10 10:46:04 2009 -1000 Spelling fix in docstrings. commit 0290e8ad3b45bdaf628b05ceb28f39a8b9623d89 Author: David Cantrell Date: Tue Feb 10 10:45:42 2009 -1000 Started test cases for the parted module. Just the test case file layout. commit 1cf112c83f5dc88bd9cecae3a68fb9ab963b8578 Author: Chris Lumens Date: Tue Feb 10 13:14:08 2009 -0500 We can't call Py_CLEAR on a libparted type. commit b46974a542da2a2cbb8dbed4ab0c2cb5647b890d Author: Chris Lumens Date: Tue Feb 10 11:54:39 2009 -0500 Fix a couple pychecker-discovered errors. commit 77ac49dc681a1a3c6fba503ce70ecdb0e3abae7b Author: Chris Lumens Date: Tue Feb 10 11:46:21 2009 -0500 Type conversions in geometry.py as well. commit 9b0dfe39b60345bbd743044e2456db3b8d1b6506 Author: Chris Lumens Date: Tue Feb 10 11:39:43 2009 -0500 Fix type conversions in filesystem.py commit 16bf5f161bf2e239a073b591f4b234598d2cc32e Author: Chris Lumens Date: Tue Feb 10 11:35:19 2009 -0500 Fix a few more _ped <-> parted type conversions in disk.py. commit af2b84ab975b84b2800a3b8174402e1f3fa847e5 Author: Chris Lumens Date: Tue Feb 10 11:27:17 2009 -0500 Handle conversion between _ped and parted types in constraint.py. commit 066fadaf76c72b119df3cacc26ad97b6b71b428c Author: Chris Lumens Date: Tue Feb 10 11:19:36 2009 -0500 Let Alignment take offset+grainSize or a PedAlignment. Also, handle conversions between _ped types and parted types. commit a7e0ae8a6cd5d56efa7ccdf4b06bcb6a735905a1 Author: Chris Lumens Date: Tue Feb 10 11:07:13 2009 -0500 Return the correct parted module type in more places (dlehman). commit ab182d4ab0c20a1e6834205604a34491d3d4c6ba Author: David Cantrell Date: Mon Feb 9 19:01:54 2009 -1000 Update documentation. commit 86c2ea8ae9e77c70f50a44da209a8f49ad9d3b8f Author: David Cantrell Date: Mon Feb 9 10:51:41 2009 -1000 Added parted.Device.getSizeAsSectors() Returns the number of sectors that make up a device. Size of disk as bytes, divided by sector size, +1 if there is a remainder. commit 7c0ff88155e042ea5b687b4d838415b605367a06 Author: David Cantrell Date: Fri Feb 6 16:55:07 2009 -1000 Disable self.partitions cache in parted.Disk The caching system is not working quite right, so disable it for now. commit 686fb4ace20dd3812f729d2d296a9c120148b733 Author: Chris Lumens Date: Fri Feb 6 15:25:50 2009 -0500 ...except, put "/dev/" on the right argument. commit 2ded7025914a1ec0baa9a8136b8548366b1e6c08 Author: Chris Lumens Date: Fri Feb 6 15:10:03 2009 -0500 getDeviceNodeName returns just the node, not the full path. commit d134337a4f33510c816fd0b1ae704c0d597ed0d5 Author: Chris Lumens Date: Fri Feb 6 11:45:01 2009 -0500 We must call ped_disk_commit_to_dev after ped_disk_fresh_new. If we don't, nothing will get written to the disk and running the conversion functions will fail. ped_disk_fresh_new only operates on the in-memory representation. commit 4c415dc90843c31dd37ddb89bbf31f9e26ea9a33 Author: David Cantrell Date: Thu Feb 5 19:28:32 2009 -1000 Py_INCREF the _ped.Partition in py_ped_disk_add_partition() commit 91e8f585ea78de61ef10e72d612df3c2b5a533c8 Author: David Cantrell Date: Thu Feb 5 19:27:50 2009 -1000 Change how self.partitions is generated. The get_partition() method doesn't seem to work quite right, so use next_partition() to step through the partition list. Syntax fix as well (return Partition should be return partition). commit 58766a01be76989ef9619763fba7793b641908d4 Author: David Cantrell Date: Thu Feb 5 19:04:08 2009 -1000 Add getPartitionByPath() to parted.Disk Removed the parted.getPartitionByName() function since it wasn't really what we wanted. Created getPartitionByPath() on Disk which works in the same way, but only on that Disk. In parted.Disk, make sure the self.partitions list is being refreshed often enough. commit 1861e1bd38413f88647bc7048ba60da021300c40 Author: David Cantrell Date: Thu Feb 5 11:13:13 2009 -1000 Fix warnings in py_ped_disk_add_partition() Pass PyObjects to convert.c functions, but use a _ped_Partition here so we can access those structure members. commit 7ab96f7d53664e2fb2cd5ca32e314f68571a3834 Author: David Cantrell Date: Thu Feb 5 10:52:35 2009 -1000 After ped_disk_add_partition() call, update partition number. The ped_disk_add_partition() function in libparted takes in a new PedPartition and adds it to the in-memory partition table for the disk. The partition is given a number during this operation, so after the call we need to update the number in our Python object otherwise every partition is -1 that we add. commit f0cc56dad8962dd36158ba4f90d4bba1838e6e2c Author: David Cantrell Date: Wed Feb 4 22:02:05 2009 -1000 Store PedPartition in _ped.Partition. commit e9e7ee8db60c70288b702535987c3abd8c3774d4 Author: David Cantrell Date: Wed Feb 4 18:28:53 2009 -1000 Store PedDisk pointer in _ped.Disk object. We need this to avoid needless conversions and to keep the in-memory representation of the changed partition table. commit 29750cd1377cbe03abdd9313c889d75f7b737be7 Author: David Cantrell Date: Wed Feb 4 14:19:31 2009 -1000 Ignore ctags files. commit aedbe201ddaac222f5488b95325c748978fa79bb Author: David Cantrell Date: Wed Feb 4 12:00:16 2009 -1000 Keep the part_list and flags from PedDisk. We are losing information when we convert to and from a _ped_Disk and PedDisk. We're using ped_disk_new(), which works fine to give us the right struct, but we lose anything in memory that has not been committed to the disk. Save part_list, needs_clobber, and update_mode. The block_sizes struct member of PedDisk is not used at all in libparted, so we just ignore it. commit 5f2a2e508782c944c17f407e010bcd5d6137792e Author: Chris Lumens Date: Wed Feb 4 11:22:02 2009 -0500 Start keeping a list of things that are running really slowly. commit 24899abb78d53d62833dea57334b860387b12708 Author: David Cantrell Date: Tue Feb 3 18:01:36 2009 -1000 Fix up _ped_Partition2PedPartition for addPartition() We might be converting a new partition, so account for that at the end of _ped_Partition2PedPartition. Also, when adding a partition, make sure the PedDisk pointers match, otherwise we get an assert from libparted. Some small -Werror clean ups too. commit 21abade6e06f438fc0a4204a1b79bc5d0a1b7c92 Author: David Cantrell Date: Tue Feb 3 15:03:41 2009 -1000 Correct last parameter to _ped.Partition() It needs to be a _ped.FileSystemType, which we can get this way in the parted module: parted.fileSystemType[fs.type] commit 97ed97ad09840037b321efcc0d1c76de0705373e Author: Chris Lumens Date: Tue Feb 3 16:31:12 2009 -0500 getFreeSpaceRegions and getFreeSpacePartitions need to return a list. commit 6813d3d433261fce735fb4c72bf67c876ccb5310 Author: David Cantrell Date: Tue Feb 3 10:58:19 2009 -1000 Fix parted.Partition.nextPartition() and converter function. When using ped_disk_next_partition(), we iterate over every PedPartition region on the disk. Three types of regions have no partition number associated with them, the metadata, free space, and protected regions. Still, we want to iterate over them. In nextPartition(), return None when we get to the end of the partition list. In _ped_Partition2PedPartition, get special partitions by using ped_disk_next_partition(), checking the types, and then comparing the geometry values. commit aa91bc3fd4cffc582c24cc88beb4ec0a6194b661 Author: Chris Lumens Date: Tue Feb 3 15:03:40 2009 -0500 Added parted.Geometry.getSize, and make parted.Partition.getSize use it. commit 626ad9caf7a29bb7a3f0dee770077a6214ecaedc Author: Chris Lumens Date: Tue Feb 3 14:51:02 2009 -0500 If a partition isn't active (freespace, metadata, etc.) we can't get_flag. This is yet another libparted assertion we need to catch before calling into a libparted function. commit d1addcfe61eaadd4f6b244d16c126a78239f840b Author: Chris Lumens Date: Tue Feb 3 14:35:39 2009 -0500 Use a partition filtering function to reduce duplicated code. commit 883328a362d1eac67e34f459cac75380b4dfad5f Author: David Cantrell Date: Mon Feb 2 20:13:47 2009 -1000 Make sure nextPartition() returns a parted.Partition Encapsulate the _ped.Partition as a parted.Partition. commit dd644a8a5833957524e5e6647eed79e72a5c3504 Author: David Cantrell Date: Mon Feb 2 20:12:07 2009 -1000 Add getFirstPartition() to parted.Disk This allows us to get the first partition in the partition list for the Disk. We can then call nextPartition() on the Partition object and iterate the partitions that way. This is the only way in libparted to iterate partitions including commit 11fd27fddaa131a8ad6b05f7b2c2656a86b13c17 Author: David Cantrell Date: Mon Feb 2 20:11:19 2009 -1000 If ped_disk_next_partition() returns NULL, return None in Python. NULL means there is no next partition, so we can call that None in Python. commit 6c5023786f21173a96a177fedf7b1e979538307b Author: David Cantrell Date: Mon Feb 2 19:09:48 2009 -1000 Three fixes to disk.py in the parted module. 1) Make sure we are refreshing the self._partitions list. If we remove a partition, we should remove it from this list. 2) Add getFreeSpaceRegions() to return a list of Geometry objects representing the free space on the disk. 3) Add getFreeSpacePartitions() to do the same as above, but return a list of Partition objects. commit 6d8556d57b94712ce86af26acfeddd6adb70bfed Author: Chris Lumens Date: Mon Feb 2 10:50:03 2009 -0500 Convert from parted objects to _ped objects in constructors (dlehman). We should try to make sure that all parted methods and constructors take parted objects, and that when we call into the _ped module we pass the underlying _ped objects in as arguments. Confused yet? commit 4bee654778db19138ccf3954e01d1dfa40586505 Author: Chris Lumens Date: Fri Jan 30 13:24:44 2009 -0500 When we moved file_system_probe_specific to _ped, we never fixed the args. In particular, it needs to take a FileSystemType object as a first parameter instead of a self pointer. commit 46d24d8fef832572a726b0a2bb38e3119111c578 Author: David Cantrell Date: Thu Jan 29 09:05:20 2009 -1000 Raise object-specific exceptions when necessary. commit cfea3f000925abb58b641232917c3e4f93efa2c7 Author: David Cantrell Date: Thu Jan 29 09:04:36 2009 -1000 Add GeometryException for Geometry objects to raise. commit 7322c437120b70e57560200078988d0baacfe6a0 Author: David Cantrell Date: Thu Jan 29 09:01:19 2009 -1000 _ped.device_get() raises a DeviceException now. commit e569b6869127b5f09067811d6d0b47406bd30e22 Author: David Cantrell Date: Thu Jan 29 08:59:12 2009 -1000 Add _ped.DeviceException for _ped.Device and parted.Device Add a DeviceException class for exceptions we want to raise from a Device object. commit cb0a2e8b433c581dfa00075b29ae839908b82eca Author: David Cantrell Date: Thu Jan 29 08:52:49 2009 -1000 Raise a DiskException in parted.Disk.__init__() on syntax error. commit f81aecb6da5bcc88ded86f82fe9067d3a189f3c0 Author: David Cantrell Date: Thu Jan 29 08:52:30 2009 -1000 Import all _ped exceptions in to parted namespace. This is so we can refer to exceptions in either namespace. commit f2fe580a05c398e1795b29c59cf0daf9c73d31ef Author: David Cantrell Date: Thu Jan 29 07:22:45 2009 -1000 Changed Disk._partitions to a list. Was storing this as a hash table with the key being the partition number and the value being the parted.Partition. I thought that would be a useful data structure, but in every instance that I've been using Disk._partitions, I've only been using the values() part, so change it to just a list of parted.Partition objects. commit 9026b723bfe87bfaa6c188a084a86e4945f063b7 Author: David Cantrell Date: Wed Jan 28 19:29:22 2009 -1000 Handle fs_type as None when creating parted.Partition. A Partition does not have to have a filesystem, so we'll use None for those cases rather than throwing an exception. commit 54524499e8062473b70566489bae5e58873adc05 Author: David Cantrell Date: Wed Jan 28 19:21:24 2009 -1000 Simplify _ped_Partition2PedPartition() We had this all wrong. Make libparted do most of the work here. commit 30c9bd2092c93d0e55633e45b548df56b2e7c2de Author: David Cantrell Date: Wed Jan 28 19:14:19 2009 -1000 Fix _ped_Partition_init as well as PedPartition2_ped_Partition 1) In _ped_Partition_init we were creating a new partition, but not adding it to the disk label. Added the code for that. 2) Partition numbers are always -1 because of our PedPartition2_ped_Partition() function. We were building an arg list and then calling _ped_Partition_init, which was creating a new partition and adding it to the disk label. We don't really want that to happen for the conversion function, so revert the conversion function to just converting the struct members manually. Partition numbers match up now. 3) Do not call ped_disk_destroy() except in our destroy function. This is OS specific and causes the disk buffers to be flushed and other things to happen. We want to free the struct, but not cause anything to happen on the system. We might be leaking memory now, but we can get to that later. commit 1c9f8abec6e8f6fec658ff008cafa99676c3073d Author: David Cantrell Date: Wed Jan 28 18:25:52 2009 -1000 Do not store PedDevice pointers in Python data structures. We were doing this in order to prevent segfaults. I think this is causing more problems now. Looking through libparted, we can call ped_device_get() as many times as we want and it will give us a pointer to the PedDevice in the device list that corresponds to the path we give it. I've modified the code to work this way and got rid of the 'ped_device' struct members for _ped_Device and _ped_Geometry. commit 9b5097449f00425050669f04c4121d2a7223eccd Author: David Cantrell Date: Wed Jan 28 16:24:41 2009 -1000 Again with namespace problems. We've imported every class in the parted module directly in to the namespace of __init__, so change parted.Disk to Disk. commit 173132074fb7e75d9ef0a8ae03a8764a57df2324 Author: David Cantrell Date: Wed Jan 28 12:58:00 2009 -1000 Move __maxLogicalPartitionCount inside getMaxLogicalPartitions() commit 2da7867cf67e8662ed9e2c473e1e4a0a4b863236 Author: David Cantrell Date: Wed Jan 28 12:45:24 2009 -1000 In Disk.getMaxLogicalPartitions(), use self.supportsFeature() Call self.supportsFeature(parted.DISK_TYPE_EXTENDED) rather than the lowlevel check_feature() call. commit da5d05e2f284f82cbdbd9f7facb722061a9b5166 Author: David Cantrell Date: Wed Jan 28 12:41:03 2009 -1000 disk.Disk -> parted.Disk Typo. commit 7967414a2e6db8a91d2784d6b0e64c29443c9019 Author: David Cantrell Date: Wed Jan 28 12:29:23 2009 -1000 Return None if there is no extended partition on the Disk. commit d1e8d3c3e9d1022d7e85dc70566d35a1d8df86ff Author: David Cantrell Date: Wed Jan 28 12:09:01 2009 -1000 Call _ped methods passing the correct types. Need to pass _ped.Partition, _ped.Constraint, and _ped.Geometry objects to the underlying _ped methods. commit 43e8cf6e72055ed1b8b25931674368ad40755271 Author: David Cantrell Date: Wed Jan 28 10:54:21 2009 -1000 This is the constraint.py file. commit b6370d3e97b534aecc18fc9e379485172e2714da Author: David Cantrell Date: Wed Jan 28 10:31:12 2009 -1000 Allow start and end sectors for parted.Geometry init. Allow users to specify start and end sectors and have __init__ compute the length of the geometry. commit c2ff4542b59578add9e66e12b5780ec174ecc996 Author: David Cantrell Date: Tue Jan 27 21:07:24 2009 -1000 Add a device property to Disk. commit 5662b436212af83804a692e8b44af1081f32af2c Author: David Cantrell Date: Tue Jan 27 20:44:15 2009 -1000 Fix the getMaxAvailableSize() method on Partition. Iterate over disk.partitions.values() commit 48dd7bcb08fe1160a2c3928e6169c16f8e033887 Author: David Cantrell Date: Tue Jan 27 20:06:16 2009 -1000 Fix __init__ and attributes on parted.FileSystem Fix the __init__ function so it worked correctly. Fix the attributes to refer to correct values. commit 8e949b585ec7cc40cfd785aa1c253a91d93df5d7 Author: David Cantrell Date: Tue Jan 27 20:04:53 2009 -1000 Create unsetFlag() to accompany setFlag(), typo fixes. The setFlag() function was taking a flag and a boolean. Break this out in to a setFlag() and unsetFlag() option rather than passing in True or False to the setFlag() function. Also fix some typos in this file. commit 68e7a8aa6f15bf543db5837aaf4dcf6c5ebebe0d Author: David Cantrell Date: Tue Jan 27 15:04:31 2009 -1000 Remove number property for Disk, fix type property. The number property does not apply to a Disk. Do not store self._type with the type name (key in diskType). Modify the type property to return the self.__disk.type.name for the getter. For the setter, set the self.__disk.type property to parted.diskType[v]. commit 548277ed2bbae5c6e2c1aeb416fa5ecfebef0815 Author: David Cantrell Date: Tue Jan 27 15:00:30 2009 -1000 Increase flexibility of the parted.freshDisk() function. Allow the type to be a key or value in diskType. commit 410f6802e590bcfd6f103db401f407927c08b341 Author: David Cantrell Date: Tue Jan 27 14:59:54 2009 -1000 Do not require a type argument for parted.Disk.__init__() We don't need type here, so stop allowing it and stop trying to figure it out. commit 6be0f0946fbd93588d39172f14dc8c0e00be95f2 Author: David Cantrell Date: Tue Jan 27 14:28:14 2009 -1000 Updated README file. commit 29056b0fe69d32841ecab53a9159eb7207138916 Author: Chris Lumens Date: Tue Jan 27 17:44:55 2009 -0500 The .tp_init methods all expect a tuple, so provide it with one. Py_BuildValue doesn't make a tuple unless you tell it to or pass it multiple arguments. We'll just have to tell it to do so. commit 5794635a59c1c58e02e4ddec7119c86eb981cd0b Author: David Cantrell Date: Tue Jan 27 11:35:35 2009 -1000 Correct internal syntax in freshDisk() commit 4fb8fc20ecf47e696eeb5cee60ac80dea0dd2cf8 Author: David Cantrell Date: Tue Jan 27 11:30:29 2009 -1000 Add parted.freshDisk() to expose _ped.disk_new_fresh() commit efb3511c3400fa4fd50b7068f0789bbe017efac3 Author: David Cantrell Date: Tue Jan 27 11:25:23 2009 -1000 Add disk_new_fresh() to the _ped module. Allow users to call _ped.disk_new_fresh(Device, DiskType) to create a new Disk with the specified disk label type. Like libparted, the new label is only in-memory. The user has to call commit_to_dev() to write the label to disk. commit de29dcac352bf9f9b8083048b3682570f5805ea4 Author: David Cantrell Date: Tue Jan 27 11:13:46 2009 -1000 Fix up _ped_Disk_init() and PedDisk2_ped_Disk() Back when this function was first written, the thought was that the user could optionally provide a disk type and we could then call ped_disk_new_fresh() underneath to relabel the disk. The problem is this causes our conversion functions to fail and gets us in to difficult situations in the code. A _ped.Disk() now can only be initialized with a _ped.Device as the argument. ped_disk_new() is called. commit 7cdc428fac762d37a2e8143c3ead0ba0229a56c2 Author: Chris Lumens Date: Tue Jan 27 11:19:55 2009 -0500 Add a supportsFeature method on parted.Disk. This exposes the check_feature function for disk types but doesn't require client code to know anything about disk types. I don't even want to know anything about disk types and I'm working on this program. commit 0ec113995c8368eca778b2c7295afd98cdaee5ea Author: David Cantrell Date: Mon Jan 26 17:09:34 2009 -1000 Fix incorrect error string. commit 742d48e212a0598bb1f5b8f4f442c533f8217b1a Author: David Cantrell Date: Mon Jan 26 17:09:09 2009 -1000 Add name attribute to parted.Partition This attribute is read-only and a pass-through to the py_ped_partition_get_name() function. commit 3801904c0844d86496ba82f9dc7ac941d222bcfc Author: David Cantrell Date: Mon Jan 26 17:08:48 2009 -1000 Removed unnecessary from/import line. commit 82848b88aa6cb376c3390505c1787fcdf712b9e2 Author: David Cantrell Date: Mon Jan 26 16:46:13 2009 -1000 Add isFlagAvailable() and nextPartition() methods. The Partition object needed these methods. commit c11601cf361cdce61a728a574503f7bbc8c4ce1f Author: David Cantrell Date: Mon Jan 26 16:14:52 2009 -1000 Create a parted.Geometry from self.__fileSystem.geom commit 5bc8c48a892a38b4dcf06afd1a76f684e45a18ac Author: David Cantrell Date: Mon Jan 26 16:14:34 2009 -1000 Create parted.FileSystem when init'ed with PedPartition commit aaa56b13ec38d2adea5dc03715161238019ed923 Author: David Cantrell Date: Mon Jan 26 16:00:13 2009 -1000 Revert "In _ped_Disk_init(), honor optional type argument first." This reverts commit e008e19d5a5dc2efdecd940b8c16ac127caee7aa. commit e008e19d5a5dc2efdecd940b8c16ac127caee7aa Author: David Cantrell Date: Mon Jan 26 15:45:02 2009 -1000 In _ped_Disk_init(), honor optional type argument first. If the caller gives us a type argument, honor that before probing for the disk type. commit efa1428343cbcae3ce8c2139ad1fd4949d834490 Author: David Cantrell Date: Mon Jan 26 15:43:56 2009 -1000 Add getPartitionByName() Replaces partedUtils.get_partition_by_name() in anaconda. commit ac8742bf1e789ff54545252e0d9c48c8aae8a3f3 Author: David Cantrell Date: Mon Jan 26 14:49:17 2009 -1000 Allow callers to pass True/False to setFlag() on Partition In Python, we have a real boolean type. Make sure we translate it to ints when calling the lowlevel libparted code. commit bd0ad3747f1428ff42d3c21a5d27414b0ffabdb1 Author: David Cantrell Date: Mon Jan 26 14:48:37 2009 -1000 Add more functions from partedUtils.py in anaconda Bring over: getLogicalPartitions() getPrimaryPartitions() getRaidPartitions() getLVMPartitions() commit 18e2c5e0233f06fe9bd13e6797590e6c0ec6d3fe Author: David Cantrell Date: Mon Jan 26 14:14:37 2009 -1000 Cleared out these two bugs. commit 34c9fd835e4cd1df3be7566713453a9483ea6c99 Author: David Cantrell Date: Mon Jan 26 14:13:02 2009 -1000 Pass geom as the PedGeometry parameter. Syntax error: return geometry.Geometry(geom) to: return geometry.Geometry(PedGeometry=geom) commit 433215e89020f20e63dac7573d0ba9a80d7bcfd3 Author: David Cantrell Date: Mon Jan 26 14:11:29 2009 -1000 Make partitions a read-only property on parted.Disk Initializing this hash table in __init__ was causing recursive indefinite looping. Whenever a Disk was created, it would go and create the hash again. Make partitions a property. On the first access, init self._partitions with the hash table. commit d2ed591d6c158c033da845b1ad88783a978eff1d Author: David Cantrell Date: Mon Jan 26 13:57:27 2009 -1000 Make all parameters have default values. In order to initialize a parted.Geometry with just a PedGeometry, all of the other parameters should have a default (that is, be optional). Fix a namespace issue (Device -> parted.Device). commit ce4988d1dca3a579cc3f6886211ae42bbde4a8d1 Author: David Cantrell Date: Mon Jan 26 13:16:29 2009 -1000 Let the Partition parameter on next_partition() be optional. Syncing up with the functionality of libparted. The next_partition() parameter can take NULL and just return the head of the list. commit effe6ae299aabd449525ba5ab421bc006586ff85 Author: David Cantrell Date: Mon Jan 26 12:54:59 2009 -1000 If Py_BuildValue() returns NULL, return to caller. Per Chris' recommendation. If Py_BuildValue() returns NULL, we should return NULL immediately so we can catch exceptions that may have been raised and avoid segfaults. commit 9debe17a89cbe5715544f37d40af28521334a51f Author: Chris Lumens Date: Mon Jan 26 16:28:16 2009 -0500 The fs_type on a partition can be NULL, so don't pack that into a tuple. We also need handle errors from Py_BuildValue by returning NULL so exceptions get propagated up. That's easier to handle than random segfaults. commit 3ca036dba7b2f62b8764ae3876ffa9197cda1a99 Author: Chris Lumens Date: Mon Jan 26 16:00:07 2009 -0500 We really shouldn't ever be calling free on a PyObject. commit e58984bbb8b6fadabd36d4f4fc5a5dca6b538918 Author: David Cantrell Date: Mon Jan 26 10:34:44 2009 -1000 New bugs. commit fd532b9c928d89cb28ef72e542ce402d334219b4 Author: David Cantrell Date: Mon Jan 26 10:27:32 2009 -1000 Add parted.Partition.getDeviceNodeName() Previously implemented in anaconda in partedUtils.py as get_partition_name(). commit ac5c61e2d619e82e0342ef84cdeedfc45bd9924e Author: David Cantrell Date: Mon Jan 26 10:26:58 2009 -1000 Add archLabels hash table. From anaconda's partedUtils.py file. A hash table listing valid disk labels for each architecture. commit e30479fda00638581a9ddb076bb64a976be8033c Author: David Cantrell Date: Mon Jan 26 10:26:10 2009 -1000 Do not call ped_partition_destroy() unless we are destroying. The ped_partition_destroy() function is named poorly. It actually calls the label-specific function to destroy the partition on the disk. We don't want that. commit 7cea44689161ba7044ed18844581da0ae56ffda9 Author: Chris Lumens Date: Mon Jan 26 15:04:50 2009 -0500 Fix the geometry read and write test cases. These were only failing because I didn't understand how the methods were supposed to work. If you try to write more bytes than the buffer contains, it'll only write the contents of the buffer once. It doesn't copy the buffer over and over again into the geometry to meet the byte request. commit aab2b98f1a5e552a9e7610f35aea687a73c8db33 Author: Chris Lumens Date: Mon Jan 26 14:58:34 2009 -0500 Fix a typo in FileSystemTypeGetSetTestCase. commit 77d674028b7b16f8011118aa8d76bac4ea4f10ce Author: Chris Lumens Date: Mon Jan 26 14:56:37 2009 -0500 Fix ConstraintDuplicateTestCase. If an alignment has a non-zero grain_size, libparted sets the offset to be |offset % grain_size|. Since this is a copy test, we need to set grain_size to be 0 in order to not have to deal with this little quirk. commit f7936ff4ac096523412fcb5702d5481033e81511 Author: Chris Lumens Date: Mon Jan 26 12:00:00 2009 -0500 Fix order of parameter passing when building the _ped.Partition arg tuple. The order of the arguments to _ped.Partition.__init__ changed because the filesystem type argument is no longer required. I changed the order everywhere except for this place, it looks like. commit 9b3420af52fbf19ce7d6c3dc518e56a94fc739e8 Author: Chris Lumens Date: Mon Jan 26 10:27:07 2009 -0500 The flag_get_next, flag_get, and flag_get_by_name methods have changed names. In the future, we should always check that we update the test suite at the same time as we are changing the names or locations of methods. commit 27302a44e120a91493658e53358619c8dca8e9fc Author: David Cantrell Date: Sat Jan 24 23:29:02 2009 -1000 Allow type as optional argument to parted.Disk.__init__ If the caller does not provide a type, probe for it. commit 43227749f8569347167c1026fd2280feb18fa250 Author: David Cantrell Date: Sat Jan 24 14:55:54 2009 -1000 getAllDisks() renamed to getAllDevices() This method was named poorly. It returns a list of all Devices in the system. Additionally, it was returning a list of _ped.Device objects, so make sure it's now returning a list of parted.Device objects. commit 2264e16c0f6b11fe95c392f571d0246fe5ce3ef0 Author: David Cantrell Date: Sat Jan 24 14:28:19 2009 -1000 Handle PedPartition initializer for parted.Partition If we initialize a parted.Partition using a PedPartition, be sure to make self._disk a parted.Disk and self._geometry a parted.Geometry. Just pulling the values from the PedPartition gives us _ped types, which we don't want at this level. commit e1a33cbd6fb691a659579eb7dd5047cc21e65f81 Author: David Cantrell Date: Sat Jan 24 14:27:57 2009 -1000 Note the _ped.Disk.get_partition() method failure. commit 6d00f672a8f21550d9e55fefa8fc5d9638554950 Author: David Cantrell Date: Sat Jan 24 11:05:14 2009 -1000 Add getMaxLogicalPartitions() method to parted.Disk Taken from partedUtils.py in anaconda. Returns the maximum number of logical partitions allowed based on the device type. commit f6e9f8808ded0ad69399c668dc01dc99aba9cbd0 Author: David Cantrell Date: Sat Jan 24 10:54:49 2009 -1000 Add getMaxAvailableSize() method to parted.Partition Taken from partedUtils.py in anaconda, this method looks at partitions that come after this partition (self) and add up the ones that are marked as freespace to tell the caller the max size this partition can grow to. commit 882840d56cf90b038027487332eaaa40fec7fd60 Author: David Cantrell Date: Sat Jan 24 10:36:47 2009 -1000 Install parted module to arch site-packages dir. The _pedmodule.so module is installed to either /usr/lib or /usr/lib64. While the Python code is not arch-specific, I think it still makes sense to group the parted module together with the _ped module and keep them either in /usr/lib (for 32-bit arches) and /usr/lib64 (for 64-bit arches). commit e7eb377f3aef26ebe0a97ab072b06b2155ef9643 Author: David Cantrell Date: Fri Jan 23 22:35:47 2009 -1000 Install parted Python module via automake rules. commit 3ce4c3ef23ca1741ade21d3a50bc663f33190dc5 Author: David Cantrell Date: Fri Jan 23 22:24:27 2009 -1000 Ignore py-compile script. commit 340d8ac1b0e0ba8f57897cc2cf1c56890874b579 Author: David Cantrell Date: Fri Jan 23 21:55:30 2009 -1000 Update Makefile.am to include docs and more targets Need to include the other docs and add targets to generate the ChangeLog and to clear out things we don't want in dist archives. commit 426e5105604359910cd05b9bf2c2c5ac673eaf9a Author: David Cantrell Date: Fri Jan 23 18:01:51 2009 -1000 Include exceptions.h in the release tarball. commit 1ae37e3f5d803c202245f028925ac6c307f57922 Author: David Cantrell Date: Fri Jan 23 17:49:01 2009 -1000 Seriously, we forgot to import what we're writing? commit 1d6f65a0d09306213500efc32f495d40542d759b Author: David Cantrell Date: Fri Jan 23 17:48:28 2009 -1000 import math Need the math module here. commit a76334c19340bd784cd5bfa693649dde0dab74bd Author: David Cantrell Date: Fri Jan 23 17:47:31 2009 -1000 __exponent -> _exponent pychecker caught this. I was trying to make the hash too hidden. commit 678309fc7b27bf28009d68cbea9681c5434ec553 Author: David Cantrell Date: Fri Jan 23 17:46:20 2009 -1000 floor -> math.floor pychecker caught this. commit 7d822e3c470a88e095edefa1d54246674e1ff678 Author: David Cantrell Date: Fri Jan 23 14:38:01 2009 -1000 Modify getSize() in Partition to use global __exponent hash. commit b584d61952778dd821104969a2bde68f21d8dcc6 Author: David Cantrell Date: Fri Jan 23 14:36:38 2009 -1000 Make the device property on parted.Geometry give parted.Device The Geometry class had a getPedDevice() method to return the lowlevel _ped.Device object associated with this Geometry. However, at the higher level code, we want to deal with parted.Device as much as possible. Make the 'device' attribute on parted.Geometry return a parted.Device version of the internal _ped.Device. commit 8e60c3405a3de09e0a10a0727ce7c2c09eb1d4f2 Author: David Cantrell Date: Fri Jan 23 14:36:01 2009 -1000 Fix some __init__ problems in parted.Disk This object is still giving me some errors, but I'm committing what I have for now. commit fbff8a39e2ebd1053bb5d5d44338909d93842e06 Author: David Cantrell Date: Fri Jan 23 14:34:30 2009 -1000 Fix __init__ to accept right parameters, add getSize() The __init__() methods takes either a path to a device on the filesystem or an existing _ped.Device object. Removed the device parameter, since it didn't make sense. Added the getSize() method to convert the size of the Device, which is internally stored as a number of sectors, to a value suitable for display to the user. The conversion function defaults to megabytes, but you can pick your unit from bytes up to terabytes. commit 3a12ff1037b74a3b11f01fc6ef637fca16d27702 Author: David Cantrell Date: Fri Jan 23 14:33:02 2009 -1000 Import all objects, rename getDisk() to getDevice() Fill out __all__ and make sure we import everything in to the same namespace. The getDisk() method was a bit of a misnomer. It gives you a parted.Device, so rename it to getDevice(). I've added some getSize() methods to Partition and Device objects, which work sort of the same way. Put the exponents hash at the top level to share it between those methods. commit 1339448516c86da64afe6acc3a5b013e70402b93 Author: David Cantrell Date: Fri Jan 23 14:32:38 2009 -1000 PedPartitionType is an enum, so store it as an int. commit 9d86023f5a65cb41741f29fe8e6b0057e1f89a57 Author: David Cantrell Date: Fri Jan 23 12:35:06 2009 -1000 convertion -> converting Speling error. commit 6c030b0d6832804e3b8475a14adf6783fa576afd Author: David Cantrell Date: Fri Jan 23 12:27:27 2009 -1000 Return boolean types more safely. See commit 0383379fc77b90cfdb3df1f71b3c2888c7a791c5 for more details. commit 927c1f3ad7ea2b72822b3fc8dfec2f82a2628328 Author: David Cantrell Date: Fri Jan 23 12:23:30 2009 -1000 Return a string representation of this partitions flags. Walk the list of partition flags and if a flag applies to our partition, add its string name to a list of flags. After walking the list, return the string representations as a comma-separated list. commit 0383379fc77b90cfdb3df1f71b3c2888c7a791c5 Author: David Cantrell Date: Fri Jan 23 12:21:51 2009 -1000 Use 'int' for partition flag, return booleans more safely. Make sure that pydisk.c uses the int type for all instances of partition flags. Rather than using PyBool_FromLong() to convert an int to a boolean in Python, just test the return value in C and use either the Py_RETURN_TRUE or Py_RETURN_FALSE macro. We've hit some size problems on 32-bit vs. 64-bit platforms when moving between ints and longs, so be safer about returning a boolean value. commit 7f662910e31a4e0d39fd82ce39e8d5ad1019fb43 Author: David Cantrell Date: Fri Jan 23 12:03:03 2009 -1000 Create a partitionFlag hash under parted.partition The partitionFlag hash holds a mapping of partition flag values to string representations. Done so we don't have to call the get next nonsense over and over at runtime. commit 7c2c5325a28e226a946e5fbe8b8322026d965199 Author: David Cantrell Date: Fri Jan 23 11:56:07 2009 -1000 Treat partition flag values as int consistently. We were reading an int and storing it in a long variable, so we never could look up a partition flag name. commit bfbb6222c193cb638efbe033873ecc1dcd15cc70 Author: David Cantrell Date: Fri Jan 23 11:44:22 2009 -1000 Prefix the partition flag functions with partition_ Keeping consistent with the other _ped toplevel functions, prefix the partition flag functions with partition_ commit 6ca5a09e6c3f62cc69998e8e4367b468fdcf79d4 Author: David Cantrell Date: Fri Jan 23 11:10:17 2009 -1000 Add getSize() method to parted.Partition The getSize() method will return the size of the partition in a unit suitable for human consumption. Internally, parted deals with sectors for the partition length, so we have to convert that to something suitable for display. partedUtils.py had a getPartSizeMB() function to convert the length to megabytes. This new getSize() method defaults to megabytes, but can also handle bytes through terabytes. Adding new size conversions should be easy too. commit 1df1e12c5b970913910c7137f6fea9e862f30890 Author: David Cantrell Date: Fri Jan 23 11:09:35 2009 -1000 Bring over cylinder<->sector conversion functions. These functions lived in partedUtils.py in anaconda prior to becoming methods on the parted.Device object. commit c381b769f0e7d1cf623bfb32fc2de7840cd40e3d Author: David Cantrell Date: Fri Jan 23 10:32:35 2009 -1000 Started the fdisk.py command Started the fdisk.py command, which aims to be a more or less compatible replacement for /sbin/fdisk that uses the pyparted module and is written in Python. We may not be able to do everything that fdisk can do, but the point of this program is to exercise the pyparted module. commit ee2e0e65eefbe9acc5287d9d302222ef96d4e01e Author: Chris Lumens Date: Fri Jan 23 17:05:04 2009 -0500 Remove the _register and _unregister methods. pyparted is the wrong place to be adding support for new disk labels and filesystems. commit 996d9292b08cd54c277619f8777c81213f97fab9 Author: Chris Lumens Date: Fri Jan 23 10:01:42 2009 -0500 Add basic test cases for _ped.FileSystemType. Do we want to add test cases for the _register and _unregister methods? Do we even want these in pyparted? commit 8ba8fcb7d920af238bdf2c54cac7736ccf282269 Author: Chris Lumens Date: Fri Jan 23 09:47:42 2009 -0500 Finish the test cases for CHSGeometry. commit cc4205e7902859934c22e5d446b77525e2e7abbd Author: Chris Lumens Date: Thu Jan 22 17:36:54 2009 -0500 Add stub test cases for every method that didn't already have one. commit 06598679fcb9d542e96ecb4d5876f502ad88e782 Author: Chris Lumens Date: Thu Jan 22 16:55:49 2009 -0500 More simple test cases for _ped.Partition. commit ce19a5d5bfcd2d021959448d5fbb4fb4f7a06f49 Author: Chris Lumens Date: Thu Jan 22 16:55:12 2009 -0500 Catch a lot of assertions before they happen in _ped.Partition. commit e32ff4d5f1bbd496136c3927d08a23c915732933 Author: Chris Lumens Date: Thu Jan 22 16:38:17 2009 -0500 You can't call free on a PyObject. That just doesn't make sense. commit af5c76e78775a9f29c7ed9db38e9120ac1bdc001 Author: David Cantrell Date: Wed Jan 21 08:11:04 2009 -1000 Make some PyMemberDef members read-only. Set the READONLY flag for PyMemberDef struct members that need to be read-only. commit e5bcf30fce3c409d0e9c11573c7436d28f936c20 Author: David Cantrell Date: Wed Jan 21 04:29:58 2009 -1000 In __init__ methods for _ped objects, handle kwds=NULL We may call the __init__ with no keywords, so handle object init with the keywords set to NULL. Basically we just use PyArg_ParseTuple and read parameters in the same order. commit bc63f343547226463df57e7a37eafab3ec4cb25d Author: Chris Lumens Date: Wed Jan 21 15:30:23 2009 -0500 Add the beginnings of the _ped.Partition test cases. commit a3e06fb21a7acfbbe7881254ecc4ad5fa277bb27 Author: Chris Lumens Date: Wed Jan 21 15:21:21 2009 -0500 Add base test classes for partitions and mounted filesystems. commit 008eb5e9de4b25aca2e4cbd27e03d86100ab38b3 Author: Chris Lumens Date: Wed Jan 21 15:19:04 2009 -0500 Support having a filesystem type of None on Partition objects. If a _ped.Partition object has a type of PARTITION_FREESPACE or PARTITION_METADATA, it must not have an associated filesystem type. Otherwise, you'll get strange assertions when checking if a partition is active. Now I just need to modify everywhere else we support optional arguments to make sure they get set to a default. commit a3b55dc39b5ad9e9010ece7d947808a69874292e Author: David Cantrell Date: Wed Jan 21 03:21:33 2009 -1000 Fix PyArg argument processing for 32-bit architectures. This was a fun one to track down. After figuring out the code that was failing on my i386 devel box (but works on clumens' x86_64 box), I looked further and saw that our PedSector values were getting lost by the time we hit the next function. Anything that gets converted down to a PedSector needs to be treated as a long long int. We were converting those using "l" in the PyArg parsing functions, which is just long int. We need to use "L" to make sure it always uses long long int. For enum values, I changed those from long int to int. enum values are strange and we can probably leave them as long ints, but I want to be specific and consistent across the code base. commit b28164d7bb0eeff90f26af2cf2acab61f0972013 Author: David Cantrell Date: Wed Jan 21 03:16:09 2009 -1000 Support keywords or no keywords in init, read PedSector as long long. In _ped_Geometry_init(), rewrite the function to support cases where there are no keywords provided. For those instances, we want to use PyArg_ParseTuple(). If there are keywords, we want to call PyArg_ParseTupleAndKeywords(). For all instances where we are reading in a long long int using a PyArg parsing function, use "L" for the format string. We were using "l", which leads to problems on 32-bit architectures. All PedSector values are long long ints and need the "L" conversion. commit f44a4d7c1de69377f2d4f22c388176f856b7db1a Author: David Cantrell Date: Wed Jan 21 01:07:39 2009 -1000 Update README. I am no longer the GNU parted maintainer. commit e55fe762a74ffe22db2397e9480d87009a7d30f4 Author: David Cantrell Date: Tue Jan 20 05:25:04 2009 -1000 Do not override the current exception when calling tp_init If tp_init fails, we should already have an exception. There's no reason to drive over it with an exception that says we couldn't what was just tried. We already know that. We need the exception that caused the tp_init call to fail. commit 5b41aab29729a00d3fb6218af24838dd8a9ca8b9 Author: David Cantrell Date: Mon Jan 19 09:07:52 2009 -1000 docstrings for parted.Disk commit ba5ca3f5b4c8e612d836cbc9846b276586d1e2bc Author: Chris Lumens Date: Wed Jan 21 13:01:38 2009 -0500 After calling ped_disk_new_fresh, flush the changes to the device. This is needed because _ped.Disk.commit_to_dev ends up calling into _ped_Disk2PedDisk, which then calls ped_disk_new, which will fail because there's still no disk label. commit 7a394056a9a2e6734b627d612c31f1f52b82af1d Author: Chris Lumens Date: Tue Jan 20 15:16:48 2009 -0500 In _ped_Disk_init, only call ped_disk_new if there's a label. If we are creating a new disk that has no label, we'll never get around to calling ped_disk_new_fresh and so there will never end up being a disk label. Instead, we need to probe for a label and if there isn't one present, add one. commit 50e62471f8113e5e2cfe9aba33578c979220a0ee Author: Chris Lumens Date: Mon Jan 19 15:23:19 2009 -0500 Add test cases for most of the methods on _ped.Device. commit 52c65b33fa3d9d8565f59bfcc8d4987f1a533660 Author: Chris Lumens Date: Mon Jan 19 15:22:57 2009 -0500 Add another test class for tests requiring a filesystem mounted. commit 3134333cc6817efd632e8676f975e9cba6d3adf8 Author: Chris Lumens Date: Mon Jan 19 15:19:26 2009 -0500 Add checks for libparted assertions. libparted will just raise C assertions for a variety of reasons. However, the assertions are outside the usual exception handling mechanisms so we can't do anything useful when they occur. So we need to duplicate the libparted checks and raise exceptions before calling into libparted. commit fe7a5b0ba7038289b4106c2dd899b9d7621b1714 Author: Chris Lumens Date: Mon Jan 19 14:47:52 2009 -0500 Copy open_count and external_mode back to the _ped.Device object. The open/close and begin/end_external_access functions have side effects. Namely, they modify the open_count and external_mode variables. In order for pyparted to know what's going on, we need to copy these modified values back into our _ped.Device object. commit 0d7c8eeb9cca65dac63f0c8034e33bb8a1ea2155 Author: Chris Lumens Date: Mon Jan 19 13:18:18 2009 -0500 Add test cases for the last of the constraint constructors in _ped. commit 7f2a2c384bbb817d4323c193ff0b021017ff95a8 Author: Chris Lumens Date: Mon Jan 19 13:16:43 2009 -0500 Enforce the assertion that the min geom must be within the max geom. commit 2c0f7e6019b8e88d6f616f305042afe8b7c82670 Author: Chris Lumens Date: Fri Jan 16 13:55:57 2009 -0500 Set all other PyObject pointers to NULL when parsing arguments fails. commit f58c10e45943fe68bc355f302b074923ac46e413 Author: Chris Lumens Date: Fri Jan 16 13:50:30 2009 -0500 Recover from type errors on processing args in _ped.Constraint.__init__. If arguments are provided to the _ped.Constraint.__init__ method in the wrong order such that the types don't line up properly, memory will get scribbled over and you'll get random segfaults. PyArg_ParseTupleAndKeywords appears to work from left to right doing arguments as they come, instead of validating types first and then doing assignments. So, we need to set things back to NULL on error. commit 8cb83d003e1f8df04c8981c1b7ead6d83e9aa6ae Author: Chris Lumens Date: Fri Jan 16 13:39:31 2009 -0500 Add a test for wrong parameter order to _ped.Geometry.__init__. This is causing segfaults in _ped.Constraint.__init__, so add the test case to Geometry as well to make sure it works there. commit 3f90dc6f10bdc08caa7b617d9b428ae99cf5c86b Author: Chris Lumens Date: Fri Jan 16 13:37:05 2009 -0500 Add all the easy test cases for _ped.Constraint. commit e186c33fa9dbe16d0c5baa10c77478cf8215d286 Author: David Cantrell Date: Fri Jan 16 01:22:21 2009 -1000 We are the only authors of this pyparted iteration. commit 56c59dc83100d1657943f320062cda103c20c43b Author: David Cantrell Date: Fri Jan 16 01:20:15 2009 -1000 Updated to do list. commit 3ed9fdfd87020a917bfcb34b3c6b76bf4f9cc133 Author: David Cantrell Date: Fri Jan 16 01:18:01 2009 -1000 Remove py_ped_geometry_destroy() Similar to _ped.Alignment, users can just call 'del' in Python on the object and garbage collection will take over. Remove the destroy() method. Also remove the test case for _ped.Geometry.destroy() commit dbac632202d6fca43bb90708855c85e220ea5480 Author: David Cantrell Date: Fri Jan 16 01:17:25 2009 -1000 Remove py_ped_constraint_destroy() Similar to _ped.Alignment, users can just call 'del' in Python on the object and garbage collection will take over. Remove the destroy() method. commit efb83935cb40a8be5eae2db87d1cda5ef610c576 Author: David Cantrell Date: Fri Jan 16 01:15:39 2009 -1000 Make notes about the device, disk, and partition destroy methods. Leaving these alone for now, but they most likely segfault. Will investigate later. commit 397fb0a2ee937012077ec634ff4de78b46e6aa41 Author: David Cantrell Date: Thu Jan 15 07:27:33 2009 -1000 filesystem -> fileSystem Use Python-like naming for parted properties. Drop a comment in to record brain state so I can stop coding for now. commit ea3643fdbd95ab225e8dad4118e0b14bbab3e2c1 Author: David Cantrell Date: Thu Jan 15 07:11:50 2009 -1000 grainSize -> grain_size We use C-style naming in _ped and Python-style naming in parted. commit 03f9626f470cfb6ded863f7eb4ef84230e9446f0 Author: David Cantrell Date: Thu Jan 15 07:09:54 2009 -1000 Remove _ped_FileSystemType_set and _ped_FileSystem_set You cannot set attributes on _ped.FileSystem or _ped.FileSystemType, so remove the functionality. commit 67aa645969f91dfc92666a182d7030d269ade0d9 Author: David Cantrell Date: Thu Jan 15 07:06:58 2009 -1000 Remove _ped_DiskType_set You cannot set attributes on a _ped.DiskType, so remove the code that does that. commit 07143ebcb3cd712c5739c5a1d3a76462998cbbae Author: David Cantrell Date: Thu Jan 15 07:01:58 2009 -1000 Make properties on parted.Device read-only. commit 452030716c2d5cfa16166e3151475c67b4c5a7b1 Author: David Cantrell Date: Thu Jan 15 06:59:48 2009 -1000 Remove _ped_Device_set None of the attributes on _ped.Device can be set, so remove the function that handles that. commit 1b18f1e33b88420861e855e488bf7ef1157563f7 Author: David Cantrell Date: Thu Jan 15 06:20:25 2009 -1000 Remove AlignmentDestroyTestCase The py_ped_alignment_destroy() function is gone, so remove the associated test case. commit 75ed3c40692ee9382d71b6d76b388b0f67d73aef Author: David Cantrell Date: Thu Jan 15 06:17:47 2009 -1000 Add CHS geometry attributes to parted.Device Allow users to read the hardwareGeometry and biosGeometry attributes on the parted.Device object. The attribute returns a 3-tuple in the order of cylinders, heads, and sectors. commit a534ff79f473178505627c4b394e2d014d26d688 Author: David Cantrell Date: Thu Jan 15 06:13:47 2009 -1000 Removed _ped_CHSGeometry_set The attributes on a _ped.CHSGeometry cannot be set by the user, they are read-only. Removed the set function and all capability for users to set those values. commit 75d094c83273bccfb74b956f2a273355c64589f4 Author: David Cantrell Date: Thu Jan 15 06:11:16 2009 -1000 Use Python-style naming for properties in the parted module. min_size -> minSize max_size -> maxSize commit ab84adeba1062effc1882e68850e1859dc358b1b Author: David Cantrell Date: Thu Jan 15 05:46:36 2009 -1000 Remove py_ped_alignment_destroy, it's pointless. By supporting garbage collection, the py_ped_alignment_destroy function is pointless. Destroying a PedAlignment in libparted just frees the struct, so our method on _ped.Alignment just reaps the Python object. There's really no point to this method since we have 'del' in Python to trigger this functionality. commit ace7608105fd68187dcd758aa64930540d0aea75 Author: David Cantrell Date: Thu Jan 15 04:17:37 2009 -1000 Removed DESIGN file. From a brainstorming session, we no longer need this. commit 06bf6c4c935dab7d089cdc6e80efd8ab54988c41 Author: Chris Lumens Date: Thu Jan 15 16:40:53 2009 -0500 Add more test cases for the _ped module. This includes a new base test case class - BuildList - that can be useful for testing all those stupid _get_next methods. commit 82d112be3f698cf8025327eb209445e7576b1bfe Author: Chris Lumens Date: Thu Jan 15 14:17:19 2009 -0500 Fix up another round of stupid pychecker errors. commit f6a8b67662b2fd0f777e07f609a8e615850f7687 Author: Chris Lumens Date: Thu Jan 15 14:14:04 2009 -0500 Fix a segfault in PyArg_ParseTuple caused by not passing in a double pointer. commit 84dab60f2b6c7f1583b449109202d5b56f038f88 Author: Chris Lumens Date: Thu Jan 15 14:03:38 2009 -0500 Let __init__ methods for each class accept either a low-level _ped type or all of the high-level parted objects that could be used to create the _ped type. commit 8b0d9b4983d6ed15d745f54e0a9026e2a7051775 Author: Chris Lumens Date: Thu Jan 15 14:02:49 2009 -0500 Fix a couple more s. -> self. typos. commit 186756fd91d39656a435eaa5b949d46eb9416e80 Author: Chris Lumens Date: Thu Jan 15 13:56:06 2009 -0500 Create getPed_____() methods to return the low-level _ped type contained within the high-level object. commit 7de64eecda02ba813fa4409b8a142f214b59269f Author: David Cantrell Date: Thu Jan 15 03:53:09 2009 -1000 Yes, there is more to do. commit 6d340b29488a1aef7f48b30122b6f99285254525 Author: Chris Lumens Date: Thu Jan 15 13:44:22 2009 -0500 Add the parted.Alignment class. commit 2fad5c41276b9a06ec6776788d5b437c41927fca Author: Chris Lumens Date: Thu Jan 15 11:05:22 2009 -0500 Fix errors found by pychecker. commit cdd17c32a3ddf3ed7c569d4478bcd150505ed22b Author: Chris Lumens Date: Thu Jan 15 11:05:01 2009 -0500 Allow passing command line arguments to pychecker. commit 03db3a6d977261915535745858a697417ba70dc9 Author: David Cantrell Date: Thu Jan 15 03:27:31 2009 -1000 Put get_create_constraint() and get_copy_constraint() on _ped.Device. Put the get_create_constraint() and get_copy_constraint() as methods on the _ped.Device object. We originally had these as methods on _ped.FileSystem because they are functions in filesys.c in libparted. However, looking at the code in libparted, you can tell it's more obvious that the action is on a specific device, not a specific filesystem. The code for these methods still lives in pyfilesys.c to keep it consistent with the libparted code, but the methods in Python live on the _ped.Device class. commit 643c09a0ab85a9d688db4c93f22c5564723a56f3 Author: David Cantrell Date: Thu Jan 15 01:53:39 2009 -1000 Add parted.FileSystem class. This one will need some more work, but the framework is there. NOTE: The probe and probe_specific functions in libparted exist at the parted level rather than being methods on a FileSystem. Additionally, the get_create_constraint and get_copy_constraint functions in libparted are more closely related to a Device than a FileSystem, so they exist as methods on a Device. commit 6f3714f62db09cd42bdedd1ad249d6cbfabe1768 Author: David Cantrell Date: Thu Jan 15 01:48:42 2009 -1000 Add parted.probeForSpecificFileSystem and parted.probeFileSystem commit bc815d5b16fac9fbda1f1bddb797182daf10fc19 Author: David Cantrell Date: Thu Jan 15 01:44:12 2009 -1000 Add getPedGeometry() method to parted.Geometry This method returns the _ped.Geometry object in the class. commit 10c76ffb4d722f13fb41195c6b9a837fdedd604c Author: David Cantrell Date: Wed Jan 14 08:33:02 2009 -1000 Support calling py_ped_file_system_type_get_next() from _ped. Allow users to pass nothing in to get the first file system type, or to specify an existing file system type to get the next in the list. commit 1ddbc7a4f016036b786bb512236447905d17c068 Author: Chris Lumens Date: Wed Jan 14 16:11:03 2009 -0500 Add a pychecker make target that's callable from the top level. Also, remove all the *.pyc files on maintainer-clean. commit 030b797b1e1f214fc10e94205b811eecb7aa897b Author: Chris Lumens Date: Wed Jan 14 15:57:04 2009 -0500 Add the src/parted directory to the makefiles. Among other things, this makes sure it gets picked up by 'make dist'. In the near future, it'll also mean we have the Makefile framework in place to add a 'make pycheck' target. commit 74d8daebe0e27715d3a5bc4a1f589b015b828797 Author: David Cantrell Date: Wed Jan 14 08:22:30 2009 -1000 Move FileSystemType get functions to _ped Similar to the DiskType get functions commit from yesterday. We need these to live outside of the FileSystemType object. commit bedafbebccd49b38880d6b5f78c4e678bc515eff Author: David Cantrell Date: Wed Jan 14 08:13:44 2009 -1000 Removed test script since we have localtest. commit b6d9e1de3312af4aa7aa303bff5370ee94717f5f Author: David Cantrell Date: Wed Jan 14 08:08:19 2009 -1000 Support instantiating parted.Partition with _ped.Partition. For internal module use, we may want to instantiate a parted.Partition with just a _ped.Partition. You can do that with the PedPartition= named parameter. commit a8d8ff2027dab9151e04a09c08208b66bb3feffa Author: David Cantrell Date: Wed Jan 14 07:47:25 2009 -1000 prop -> property Consistency, but pointless. commit 5b767330af8c1a2d5e70aceb5e1f80a6ebe6a37f Author: David Cantrell Date: Wed Jan 14 07:44:06 2009 -1000 Consistently name the __readOnly() method in our classes. We are using properties and have a method to raise an exception if someone tries to write to a read-only property. Purely for code consistently, make sure this method is named the same in all of our classes. commit 3f9d14872037e3c16378fa1de0144618f03229f2 Author: David Cantrell Date: Wed Jan 14 07:31:01 2009 -1000 Make some variables private and super private in disk.py. The device and type passed in to the object should be private. Do not make the partitions list be private either. Be sure to hide the type variable used to build up diskType. commit 43efffc3607b0a564005979b6f237f5e6e23ced0 Author: Chris Lumens Date: Wed Jan 14 15:45:29 2009 -0500 Add the parted.constraint class. commit d3f584b160bf02e574ed490fff3803040ff2b790 Author: Chris Lumens Date: Tue Jan 13 17:33:40 2009 -0500 Fix the _ped.flag_get_name test case to anticipate the proper exception. commit c6445234bc3d792396220103fd39fe819484e737 Author: David Cantrell Date: Tue Jan 13 10:13:38 2009 -1000 test script commit df3b3b49b2f12b5c5c7f706fbe14925cf0352174 Author: David Cantrell Date: Tue Jan 13 10:12:53 2009 -1000 Take in optional DiskType object for py_ped_disk_type_get_next() Since the get functions for DiskType live at the toplevel now, take an optional DiskType object so that we can iterate over the list and collect all types. commit 4bb25ebd61419c6520ec1abb3dadb7cf20465bd5 Author: David Cantrell Date: Tue Jan 13 10:12:13 2009 -1000 Initialize a diskType hash containing all DiskType objects. key=type name (e.g., 'msdos') value=_ped.DiskType object for that type commit 82b956950ac6054d10295d5b0877bd7f262df5ae Author: David Cantrell Date: Tue Jan 13 10:11:48 2009 -1000 Import from disk.py in parted/__init__.py commit 6ae1b6722f653141cf49d986d530890e131cc598 Author: David Cantrell Date: Tue Jan 13 10:10:09 2009 -1000 Move DiskType get functions to the top level _ped module. DiskTypes are initialized at runtime, but are essentially static. We can remove a type once the library is loaded, but we never really create new types. They are merely for reference purposes to give us things like type names and feature lists of disk types. I moved the get functions for DiskType to _ped so that in the parted/disk.py module, I can initialize a diskType hash table that contains all of the disk types with the type name as the key value of the hash. commit 0eba5dc02f9a6cdfc54fffb8a89a0dbf176cd565 Author: David Cantrell Date: Tue Jan 13 09:39:27 2009 -1000 Docstring fix for device_get_next_doc in _pedmodule.c The method is device_get_next(), not get_next(). commit c58f8589a84b1df3553aea5ae7bd961d70a8aad2 Author: David Cantrell Date: Tue Jan 13 09:35:09 2009 -1000 Started the parted.Disk module. Disk starts to get a bit more complicated past Device. The beginnings are in the class, but there is more to come. commit 6c6efa901fd515515965df48936144b5dea01e2c Author: David Cantrell Date: Tue Jan 13 09:33:54 2009 -1000 Created getPedDevice() method on parted.Device. The point of this method is to give us the _ped.Device low level object that is part of the parted.Device. This might change to a property later, but for now I'm just adding a method to do this. commit 469323dafbb8ebfe9e9ba4053382c16163bb4f30 Author: Chris Lumens Date: Tue Jan 13 16:51:02 2009 -0500 Add stub test case files for all the other _ped classes. commit 331b325cf3704f898a80b4fe1edae6aa3112da3e Author: Chris Lumens Date: Tue Jan 13 16:02:39 2009 -0500 Geometry.read never needed to take in a buffer argument. commit 607e4a895684ba5f70632b626d7810fdf197e2c5 Author: Chris Lumens Date: Tue Jan 13 15:58:09 2009 -0500 Add parted.Partition, though it still needs a few docstrings. commit eb3d4c614b3eaf20c197f6a94c2507b30d004912 Author: Chris Lumens Date: Tue Jan 13 15:46:18 2009 -0500 Add module-level documentation to parted.Geometry. commit fbe3a4f78b9e45af190317dff249620a65565bdd Author: Chris Lumens Date: Tue Jan 13 14:44:36 2009 -0500 Don't assert in libparted if an invalid flag constant is passed. commit 615a6266e403464e972653494cac39fd0131c42f Author: David Cantrell Date: Tue Jan 13 07:22:08 2009 -1000 Add docstrings for the parted.Device class. commit 9108b68d5ace0ecf518466012307af28cd154e61 Author: David Cantrell Date: Tue Jan 13 07:08:01 2009 -1000 Define parted.ReadOnlyProperty exception for Device class. Define the ReadOnlyProperty exception at the top level parted module, raised when users try to write to a read-only property. Also define the __all__ list and import the submodules inside the parted module. commit c6f999a7d0373fabfb9ff4e47c25bf378450f85c Author: David Cantrell Date: Tue Jan 13 08:26:32 2009 -1000 Call setattr() for property set methods, docstrings, typo fixes. 1) Use setattr() for the set part of a property() call. 2) Add docstring for the main class. 3) Inherit 'object' not 'Object'. 4) The busy property needed to accept 'v' in the set lambda expression. commit d133b711087c60143e9fbf78cd396faf7f5420b6 Author: Chris Lumens Date: Mon Jan 12 17:14:53 2009 -0500 Add the parted.Geometry module. commit ea1b6a6f71a0af03c593a9a74a9acdec5afb8ef1 Author: David Cantrell Date: Mon Jan 12 12:08:12 2009 -1000 Started the parted.Device module. Still need to work on docstrings and property handling, and possibly buffer handling as well, but the basic structure is there. commit 40a0a7cf9f7c8e48b21822b0de36e8aa71b8618e Author: Chris Lumens Date: Mon Jan 12 15:26:20 2009 -0500 Add base level getAllDisks and getDisk methods. commit 13676093d2b9da71d964dbb9fe849865669d79f4 Author: Chris Lumens Date: Mon Jan 12 15:00:49 2009 -0500 Move device_get_next to the _ped module. We need this method in the top level _ped module so parted.getAllDisks can have access to the method it needs to build up a list of all devices. Having it be a method on a Device instance doesn't work. commit 6c37f2702969c6c1272ca3f53548ce2e099d94a1 Author: David Cantrell Date: Mon Jan 12 08:55:34 2009 -1000 Rough design notes for the higher level pyparted API. Notes from the discussion of the higher level pyparted API. commit b5df8e5e658b20f5caefb0d097fe3462886302f6 Author: Chris Lumens Date: Thu Jan 8 13:45:02 2009 -0500 Remove the stubs of the old API since we're not going to do that. Implementing the old pyparted API is difficult and seems like a waste of time, plus doesn't really get us all that much. It's being removed so we can focus on designing the new API and get to work testing that. commit 5b43da6b3a385075d11f156828ddca1393737444 Author: David Cantrell Date: Thu Jan 8 08:47:37 2009 -1000 Removed PedDisk.py We are not going to support the old pyparted API. commit 7c955b1070ee2c3a433e805fe4bb7e5e0d83c48f Author: Chris Lumens Date: Tue Dec 16 13:14:40 2008 -0500 Add the parted.PedPartition module. commit 9ce6ffaf00a17be4cc1f79ade05198fae45a2f49 Author: Chris Lumens Date: Mon Dec 15 16:55:00 2008 -0500 Add a more complete PedDisk object, thought it's not yet accessible. parted.PedDisk gets used, but there's also parted.PedDisk.new which we still need to make visible so anaconda can call it. I haven't reconciled that problem yet, but for now here's the full parted.PedDisk class. commit e26ed634786ea460755549318ee96368dfc3026f Author: David Cantrell Date: Mon Dec 15 17:26:46 2008 -1000 Initialize tp_dict to NULL. Python documentation recommends that you set tp_dict to NULL before you call PyType_Ready(). You can modify it after the fact, but you want it to be NULL so that PyType_Ready() can set it up. commit d89022bf420d048beb4f6a9ba8bd00b8fc845729 Author: David Cantrell Date: Mon Dec 15 17:23:07 2008 -1000 Remove tp_print placeholder. Current Python documentation recommends that you do not set tp_print to anything as it may be removed in future versions of Python. The tp_repr and tp_str functions should be used instead. commit 9f3d1c39e12c3e99416497b7e51f3751881426fe Author: David Cantrell Date: Mon Dec 15 17:21:57 2008 -1000 Set tp_call to NULL for all types. None of our types are callable (for now), so set tp_call to NULL. commit 6854ee5552328d131239b811affe6758c6e09fb5 Author: David Cantrell Date: Mon Dec 15 16:48:14 2008 -1000 Remove internal-only PyTypeObject members. tp_mro, tp_cache, tp_subclasses, and tp_weaklist are internal-only, so we don't need to define them in our PyTypeObjects. commit 6e17314f7b60982db2a779a865243e9931238650 Author: David Cantrell Date: Mon Dec 15 16:34:34 2008 -1000 Destroy PedDisk and PedPartitions and associated Python objects. In py_ped_disk_destroy(), call ped_disk_destroy() on the PedDisk and call Py_CLEAR() on self. In py_ped_partition_destroy(), call ped_partition_destroy() on the PedPartition and call Py_CLEAR() on self. commit 98c912382f6e3b04c0b7f56333fd908c3aa6f592 Author: David Cantrell Date: Mon Dec 15 16:33:42 2008 -1000 Call Py_CLEAR() from _destroy functions. Make sure we are calling Py_CLEAR() in the destroy functions for _ped.Geometry, _ped.Alignment, and _ped.Timer. commit 3c06ed94589d23f62f6ded6538b45ef84408147b Author: David Cantrell Date: Mon Dec 15 16:04:47 2008 -1000 Call Py_CLEAR() in py_ped_device_destroy() to reap the object. We successfully destroy the PedDevice in libparted, but we need to decrement the refcount on self to 0 so that Python's garbage collector will destroy the Python object. commit f2dc083afaa7714845a1bf517d7751d426d3f278 Author: David Cantrell Date: Mon Dec 15 16:02:20 2008 -1000 Call Py_CLEAR() in py_ped_constraint_destroy() We do not need to convert the _ped.Constraint to a PedConstraint only to destroy it. Call Py_CLEAR() on the PyObject pointer passed in to the function (self) so we drop its refcount to 0 and then Python should take over from there. commit e6471cade66b429c116848bb085a174ca0cf465f Author: David Cantrell Date: Mon Dec 15 15:52:53 2008 -1000 Use Py_CLEAR() in dealloc and clear functions. Use Py_CLEAR() instead of Py_XDECREF() in dealloc and clear functions for our types. Also set PyObject type members to NULL after we clear them. commit 3d50d6d1aa605211184ae9b42a7256d86f6a2f15 Author: Chris Lumens Date: Mon Dec 15 15:14:59 2008 -0500 Update parted.PedDevice for changes to _ped.Device. It's been a long time since we looked at the parted module. One big thing we changed is that you can no longer create a _ped.Device yourself. We also moved a couple methods around. Update PedDevice to reflect these changes. I have not yet tested this module, but test cases for it will be coming. commit c2d73d53e65a760471304b9d19646f8219a32ae0 Author: David Cantrell Date: Mon Dec 15 10:23:25 2008 -1000 Hate vim autoformatting of comments. commit ac8259c7112d4a2775bfc0c29975da3400b197fa Author: David Cantrell Date: Mon Dec 15 10:19:16 2008 -1000 Move py_ped_disk_probe() to pydevice.c, make method on _ped.Device This is the first of hopefully few 1:1 function shift arounds. We started the _ped module code by created py*.c files that more or less mirrored the libparted code as closely as possible while creating Python objects for the typedefs in libparted. The py_ped_disk_probe() function is the first one that we need to move to a new file. It was part of pydisk.c, but since it operates on _ped.Device objects, it should be a method on that object. As such, it's now in the pydevice.c file and the include components have changed locations in the header files. commit ee144f33bd2599d4eb8e35ae4f1ce86e12a69225 Author: Chris Lumens Date: Mon Dec 15 14:42:00 2008 -0500 Add a test case for _ped.Alignment.is_aligned. commit a678cc5c13f9c376d1c61af19f5b8981d5651e08 Author: Chris Lumens Date: Mon Dec 15 14:03:19 2008 -0500 Handle various libparted assertions inside ped_geometry_{read,write}. libparted will assert if the device is not yet open, so handle that before calling into libparted and raise an exception. Likewise, passing bad arguments also causes assertions and must be caught ahead of time. commit 5df0432165bae9ff3ddf582665239a934c693060 Author: Chris Lumens Date: Fri Dec 12 13:15:12 2008 -0500 Add test cases for most of _ped.Geometry, except the tricky ones at the end. commit 861d97fe1a9c821809d27b255564486619aa89f7 Author: Chris Lumens Date: Fri Dec 12 10:03:26 2008 -0500 Add the initial set of tests for _ped.Alignment. commit 857b5199e7d03cd01e1479d44898f5a6bcebbaa2 Author: David Cantrell Date: Fri Dec 12 18:56:46 2008 -1000 Store PedDevice pointer in _ped.Geometry object. A PedGeometry stores a pointer to a PedDevice, so store a copy of that pointer in the _ped.Geometry object. commit bc38b608a1e5b6742179c3b921738f32afdb6e8d Author: David Cantrell Date: Fri Dec 12 18:51:41 2008 -1000 Do not use ped_geometry_destroy() PedGeometry stores three PedSectors and a pointer to PedDevice. If we call ped_geometry_destroy, we kill that PedDevice, which we don't want to do. PedSectors are long long variables, so those are fine. commit 10492d1ea2ca91efb4ccadc08d6627e2e6c75ee0 Author: David Cantrell Date: Fri Dec 12 13:17:52 2008 -1000 Comment explaining _ped_Device2PedDevice. commit 9825f6f14a171b8654bce110ce5fe8f1f27e5841 Author: David Cantrell Date: Fri Dec 12 13:15:11 2008 -1000 Do not run ped_device_destroy() all over the place. Do not use ped_device_destroy() to free a PedDevice. We are not working with PedDevice types like we do with a PedGeometry or something similar. We are passing the pointers around to the master copies of PedDevice objects. If you are getting a PedDevice by calling _ped_Device2PedDevice(), you are getting a pointer and you SHOULD NOT destroy it. The only place where we should call ped_device_destroy() is in py_ped_device_destroy(). commit 2318284f9569f797601dc2769f495df79f04a516 Author: David Cantrell Date: Fri Dec 12 13:01:31 2008 -1000 Store PedDevice pointer in _ped_Device struct. The PedDevice structs in libparted are unique. Unlike the other types in libparted, PedDevices are generated when the library loads and scans for everything. Things like Constraint and Alignment are can created on the fly and thrown away. But Devices are arch-specific and need to live through the life of the program. In our _ped_Device struct, keep our convenience copies of the data, but also store a pointer to the PedDevice we're mirroring to Python so that we can use that in other calls where we need the PedDevice. This solves the problem of "what to do about the arch_specific data" issue. commit a2e84105e8fed9c98f690ae37decf3f51eb3ee01 Author: David Cantrell Date: Fri Dec 12 12:15:11 2008 -1000 Add AC_CONFIG_MACRO_DIR([m4]) to configure.ac The output from the bootstrap script keeps telling us to do that, so I did that. commit 7ad7f64854f70a67a13bad7424eba38f4e391cab Author: Chris Lumens Date: Fri Dec 12 16:24:12 2008 -0500 Fix errors in converting _ped.Device to and from PedDevice. First, _ped_Device2PedDevice was only doing a ped_device_get, it wasn't actually copying any of the values into the PedDevice object. That's not going to work. Second, we have to copy the arch_specific pointer around since that has function pointers that libparted methods will need to access. commit f9f37e5c5ac9922bb461b66ccf9793b5f46a77ab Author: Chris Lumens Date: Fri Dec 12 16:19:13 2008 -0500 Make sure to copy the open_count attribute when we open a device. This is another instance where a libparted function sets a value on one of the libparted types, and we don't copy it back into the corresponding pyparted type. This should make read/write methods work a little better. commit f94ee259d845c2c26dd04a877f2bd2fd4afe778d Author: Chris Lumens Date: Thu Dec 11 16:28:55 2008 -0500 Add a test case for _ped.device_get. commit df898812d836a00a18c26c516426758b4bd8c73f Author: Chris Lumens Date: Thu Dec 11 16:24:27 2008 -0500 Raise an exception instead of segfaulting on _ped.device_get(None). commit b6c6450a586d7b06e7d6253036d42e5f8f899de0 Author: Chris Lumens Date: Fri Dec 12 10:52:38 2008 -0500 Copy modified attributes from ped_geometry_set_* to the _ped.Geometry object. All our _ped.Geometry.set_* methods were failing because the values were being set on a libparted object, but never being copied back to the _ped.Geometry object. So it's like those method calls never happened. Unfortunately, this causes another GC error to appear. commit 8efcd8c2d713812c49cb51290878906bb5a5fd71 Author: Chris Lumens Date: Fri Dec 12 10:19:57 2008 -0500 Fix a bunch of typos and thinkos in the test cases. The existing _ped.Geometry test cases should now all pass with the right successes and errors, except destroy. But then, we don't know what we want to do with the destroy methods anyway. commit 660f5ec59e3f0c3ab153cdc233772fc4d653da69 Author: David Cantrell Date: Thu Dec 11 19:12:18 2008 -1000 Handle reference counts in _ped.Geometry correctly. In the dealloc and clear functions for _ped.Geometry, use the Py_CLEAR() call to bring the reference count to 0 for PyObject members. In the init method, initialize PyObject members to NULL and increment the reference count on self before returning. commit 7fd719e2dd28638ecf9877a1a0c565754246ee81 Author: David Cantrell Date: Thu Dec 11 16:18:09 2008 -1000 Handle object members in _ped.FileSystem correctly. This object may need some more work later. We allow users to create _ped.FileSystem objects, but we don't want to call libparted from inside the __init__ function to see that what the user provided is valid. Why not? The only function we get in libparted to check that is the ped_file_system_create() function, but the user may not want to create the filesystem on disk yet. The disk and type parameters are required, so make sure to Py_INCREF those correctly. An optional parameter for now is checked. Init it to false. I say this function may need some more work because we may want to change 'checked' to be a boolean. commit 3222d345e2664cc1f8b0994779c0d44e96b509d9 Author: David Cantrell Date: Thu Dec 11 16:15:50 2008 -1000 Initialize members in _ped_Geometry_init() correctly. Call Py_INCREF() on the _ped.Device object. For start, length, and end, just copy those back from the PedGeometry we get. end may or may not be provided by the user, but it doesn't matter because we should be able to just copy the values directly back from the PedGeometry and know we have everything we need. They are long long values in our struct and in libparted, so no special PyObject handling for these. commit acde8a535ff87b945cb5af67b5f40ce354851310 Author: David Cantrell Date: Thu Dec 11 16:11:25 2008 -1000 Copy in missing members in _ped_Disk_init() Similar to the work done for 67fcd69777057e5670e90d523a17d24e3247c6f4 commit 02c2c2764a9ccf42f74c6c40d2808c0318ed6331 Author: David Cantrell Date: Thu Dec 11 15:44:20 2008 -1000 Copy in missing members in _ped_Constraint_init() Similar to the work done for 67fcd69777057e5670e90d523a17d24e3247c6f4 commit 46c72caea7d4fefe30df7086110f0179f88e06a4 Author: David Cantrell Date: Thu Dec 11 15:35:22 2008 -1000 Use tp_alloc for types that must use factory functions. We have some types that users are not allowed to create on their own. The module may hand them one of these object types, but one cannot be created in the usual way. For these types, we have tp_new set to NULL. In convert.c, make sure we call tp_alloc rather than tp_new to allocate the new object and return it to the caller. commit 429a1f09a8c3376ff6e419031e044fa6cf353fc7 Author: David Cantrell Date: Thu Dec 11 13:19:45 2008 -1000 Py_CLEAR sets the argument to NULL, remove redundant line. commit 9501a349c8d24e12e71bbd0722ca4cf3bcc1534d Author: David Cantrell Date: Thu Dec 11 13:12:33 2008 -1000 Copy in missing members in _ped_Partition_init() For some objects, we only require the user to give us a bare minimum of information and then we let libparted compute the other members. To get these additional members back in to the Python object, I was doing a Py_XDECREF on self and then simply converting the newly generated libparted type to a Python object and assigning it to self. On paper, this sounded like a good idea. Several problems: 1) Py_XDECREF only decrements the count by one. We really need to zero out the count. 2) Clearing self and then assigning something new to it while you're inside the object isn't very clean. 3) The new self needed a Py_INCREF. Things I've learned: - PyArg_ParseTuple and PyArg_ParseTupleAndKeywords do not run Py_INCREF for you on PyObjects it reads. The new solution is pretty simple to follow. We keep the parsing using PyArg_ParseTupleAndKeywords and we keep the libparted call because that computes the other members we need, but changes I've made: 1) Py_INCREF the PyObjects read by PyArg_ParseTupleAndKeywords 2) Copy in computed members that are not PyObjects by just using an assignment in C. 3) For computed members that are PyObjects, first I Py_CLEAR the member and set it to NULL, then I convert the computed type to a PyObject, and lastly I call Py_INCREF on that member. 4) Use ped_free() to destroy the PedPartition we made. In short, we need to use Py_INCREF on all PyObjects read by the PyArg_ParseTuple functions. We should not clear self and reconvert the object, instead we should correctly copy in the new members. commit 7e973965a5710fce3d9bce96d5c3b4e848331890 Author: David Cantrell Date: Thu Dec 11 12:46:15 2008 -1000 Use tp_new for PedCHSGeometry2_ped_CHSGeometry() commit 983d4a92bc455e7811ad72d33f42eedabad81598 Author: David Cantrell Date: Thu Dec 11 12:45:12 2008 -1000 Use tp_new for PedFileSystemType2_ped_FileSystemType() commit 461f7c6fbbf2a3bebe63c47050e9df2942ace69e Author: David Cantrell Date: Thu Dec 11 12:43:59 2008 -1000 Use tp_new in PedDiskType2_ped_DiskType() commit 8c27c45f784c09b37dac55444a622f2209d9eca9 Author: David Cantrell Date: Thu Dec 11 12:41:19 2008 -1000 Use tp_new/tp_init for PedDisk2_ped_Disk commit 26aaa738bfadf10ddf206713c0fba988108bef95 Author: David Cantrell Date: Thu Dec 11 12:25:47 2008 -1000 Use tp_new in PedDevice2_ped_Device() Use tp_new to allocate the new _ped.Device object in PedDevice2_ped_Device. Remove some GC function calls and added Py_INCREF calls for the _ped.CHSGeometry objects we create. commit 955e565f943a4ccd1b12870a36d39fc89b85b421 Author: David Cantrell Date: Thu Dec 11 12:17:07 2008 -1000 Use tp_new/tp_init for PedFileSystem2_ped_FileSystem() commit 9b20c54d920b8f92bae69b8a9a4f019c5c9388b6 Author: David Cantrell Date: Thu Dec 11 12:00:43 2008 -1000 Use tp_new/tp_init in PedConstraint2_ped_Constraint commit cca6a1bba756e95bdee30b0d32c80d72340e1830 Author: David Cantrell Date: Thu Dec 11 11:30:27 2008 -1000 Change PedGeometry2_ped_Geometry() to use tp_new and tp_init We changed PedAlignment2_ped_Alignment to use the tp_new and tp_init function pointers for the _ped.Alignment type object and use the Py_BuildValue() function to build the argument list for tp_init. Changed PedGeometry2_ped_Geometry to work the same way. Also fixed up an error message in PedAlignment2_ped_Alignment. commit ab84bf7ea1a0bc598cf6e0a6576b3bbf1c0c5424 Author: David Cantrell Date: Thu Dec 11 11:10:32 2008 -1000 Move #include to typeobjects header files. We only need to include this head for the structs defined in the typeobjects header files. commit c62b331165556e426cc050d643e34321ecf97006 Author: Chris Lumens Date: Thu Dec 11 13:58:19 2008 -0500 If _ped_Geometry_init returns -1, make sure we set an error string. Python requires all NULL/-1/other error code returns from functions to have some error string set as well. Otherwise, it complains. If we are not handling this same case in all the other files, I'll need to go through and change them too. It's probably easiest to do that as I work my way through the test cases. commit 77797fd92681380676a57d21f5a4559b65d333dd Author: Chris Lumens Date: Thu Dec 11 10:38:41 2008 -0500 Fix converting libparted execptions into python exceptions. First, we were always hitting a double free because libparted frees the error string for us, but we were also doing that in partedExnHandler. Second, the exception flag and message need to be moved into a .c file and externed in the header, just like was required for fixing the isinstance problem. commit 62f9fb45609466af0fd032620af9963e27b8aa03 Author: Chris Lumens Date: Thu Dec 11 11:45:13 2008 -0500 Add a test case for CHSGeometry, which is really kind of useless. commit 0268f3bd821ca26c2b919d8131e47a4ec565fe71 Author: Chris Lumens Date: Thu Dec 11 14:08:06 2008 -0500 Add test cases for all the easy methods on _ped.Geometry. These test cases do not all pass yet. There are some GC-related errors and other problems that are keeping them from passing. However, I've only been working on them on a private branch. By pulling the test cases onto master, it's opened up for everyone to see the errors and work on them. commit 1ad2c616060300e021964b7a6a780893641704d9 Author: Chris Lumens Date: Mon Nov 24 15:14:11 2008 -0500 Add a file to contain baseclasses for our test cases. Lots of our test cases will have to do similar operations, like set up a temporary device for testing destructive parted functions against. For these test cases, it's convenient to create a base class with the setUp and tearDown methods that the test cases then subclass. commit e1435fde00c9ef7f3136ec5ebac020d1569e1fbe Author: Chris Lumens Date: Thu Dec 11 14:05:30 2008 -0500 Add test case stubs for the other methods that have moved into _ped. commit d3044a116b60f8a90ca0fdfef0dcfad56337a6e1 Author: David Cantrell Date: Wed Dec 10 18:45:31 2008 -1000 Forgot the \ at the end of the noinst_HEADERS line. commit 25bfe7e3123871a82c770e4bab03069823fb3522 Author: David Cantrell Date: Wed Dec 10 18:44:22 2008 -1000 Created include/typeobjects/*.h to hold PyTypeObject definitions. I like header files for source organization. Since I did that for the docstrings, I decided to move the PyTypeObject stuff in to header files under the include/typeobjects/ subdirectory. That way, we can work on those structs in one location and not end up with huge source files in src/ commit d9ed389c0c0527793c5d1a7e1b989f9d27ec795d Author: David Cantrell Date: Wed Dec 10 18:02:10 2008 -1000 Move all PyDoc_STRVAR() declarations to docstrings header files. We only want docstrings for PyTypeObjects and related structures. The docstrings get long, so put them in their own header files under include/docstrings and then include the per-source docstrings header under src/ Organizational change and also clears up errors where we were defining tons of docstrings each time we included something. commit 6c315acde583b82407559de7260dc83007a7acb0 Author: David Cantrell Date: Wed Dec 10 17:40:10 2008 -1000 Move other static structs to C source for PyTypeObjects. Move PyMemberDef, PyMethodDef, and PyGetSetDef structs to C source files to be next to the PyTypeObject structs. commit a581ac8f90c553206f353254eec19a5770c94a56 Author: David Cantrell Date: Wed Dec 10 16:47:25 2008 -1000 Move PyTypeObject defintions to source files. Do not declare PyTypeObjects as static structures in the header files, because this leads to multiple PyTypeObjects in our Python module, which causes things like isinstance() to fail. commit c8bddd6c58a1d2df8fa973bb91a5f406b05d9214 Author: David Cantrell Date: Wed Dec 10 16:22:33 2008 -1000 Fixed the isinstance bug. commit 8ffddb0a893c93513afbff0fe7724b7138efae95 Author: David Cantrell Date: Wed Dec 10 14:54:06 2008 -1000 PyObject_HEAD_INIT(NULL) -> PyObject_HEAD_INIT(&PyType_Type) Left over from something I was trying earlier, forgot to put back the &PyType_Type component. commit 903d07e55e7685472c1106721499a1e84664085d Author: David Cantrell Date: Wed Dec 10 14:52:47 2008 -1000 Use _ped_Alignment_init() in PedAlignment2_ped_Alignment. Better to use our own code rather than have assignment lines for each class member. commit dbd87ef75c521321ccac02fe2c39cae359d8bc51 Author: David Cantrell Date: Wed Dec 10 14:49:31 2008 -1000 Fix PedAlignment2_ped_Alignment() missing ob_type problem. This fixes the following problem: import _ped x = _ped.Alignment(10, 10) isinstance(x, _ped.Alignment) -> True y = x.duplicate() isinstance(y, _ped.Alignment) -> False The problem was because we have our PyTypeObject declarations in the header files to the project and have them all set to 'static', which means we have too many copies. Only the one seen in _pedmodule.c was getting ob_type set correctly, which is why x is an instance of _ped.Alignment, but not y. The solution is to remove the static part of the declaration and put it in the appropriate source file in the project, but then add this line to the header file: extern PyTypeObject _ped_Alignment_Type_obj; The extern line is so we can use that type object throughout the rest of the code. commit 0c1d36ede75ea0d7f70e9b73f1715611d53b10a7 Author: David Cantrell Date: Wed Dec 10 14:01:12 2008 -1000 Correct some format strings in pydisk.c The device path part of a PedDisk is buried in the PedDevice and finally the path member, so we should have an extra ->dev->path on all of the instances where we want the device path. Use %d for the partition number rather than %s. commit 4306d2b1ed2338718653e747b65a43eda4512f81 Author: David Cantrell Date: Wed Dec 10 13:53:30 2008 -1000 Remove unused tp_new functions. Removed the following tp_new functions: _ped_FileSystem_new(), _ped_Alignment_new(), _ped_Timer_new(). commit 010c4cf64b4eb3770d2fb6fb01a17ea500d7602b Author: David Cantrell Date: Wed Dec 10 13:50:21 2008 -1000 Remove tp_new functions in pydisk.c we aren't using anymore. Removed _ped_Partition_new() and _ped_Disk_new() since we are using the PyObject_GenericNew() function now. commit 7c1c9bf028b5c85a2bf42b3503c6f1d83a6a0c8d Author: David Cantrell Date: Wed Dec 10 13:50:03 2008 -1000 Removed unused variable 'o' in init_ped() commit 277eb92a565daa6e0789513759bc4e8c33d1dd89 Author: David Cantrell Date: Wed Dec 10 13:44:39 2008 -1000 Add prototype for init_ped() function. commit 80908532be80f23df933091ba5602b799155ca08 Author: David Cantrell Date: Wed Dec 10 13:37:25 2008 -1000 Add '-Wall -Wmissing-prototypes' to the CFLAGS. commit 2ed4756c5f01b9cb57d67de4c35d466c8c41fe8d Author: David Cantrell Date: Wed Dec 10 13:06:22 2008 -1000 Ignore autogenerated m4 files. commit db1e50a2ba62e39b919cca980cbfa50c08163d79 Author: Chris Lumens Date: Tue Nov 25 15:11:05 2008 -0500 Update bug list for the latest disaster. commit 39ba3d45851caab5e4c06460d65af7fff7dabd89 Author: Chris Lumens Date: Tue Nov 25 11:22:59 2008 -0500 Make _ped.Device documentation look like all the rest. commit a611d36a117425ee66fb6b81766db6339a41d792 Author: Chris Lumens Date: Tue Nov 25 11:12:06 2008 -0500 Add documentation for everything else in pydisk.h. We're not really going to use Timers in the future, so I'm removing them from the TODO list. Writing a bunch of documentation we're just going to throw away seems like a big waste to me. commit 1504cf1770215c56897a0a445386cb7f6d261f54 Author: Chris Lumens Date: Fri Nov 21 10:52:07 2008 -0500 PyType_GenericNew behaves correctly for GC-managed objects. commit 5036bad08d9fc283fce78f8f9277225d84c7daa0 Author: David Cantrell Date: Wed Nov 19 05:30:34 2008 -1000 Remove Py_TPFLAGS_DEFAULT from PyTypeObjects. We shouldn't be using Py_TPFLAGS_DEFAULT because it sets things that we don't necessarily have defined for our objects. Made sure ours are set to just the things we want. NOTE: We'll have to update the tp_flags when we define more things. commit 60fbfc19b9a3ac923da48f9f0eac5069e9c3b1d5 Author: David Cantrell Date: Wed Nov 19 04:34:07 2008 -1000 Updated TODO list. commit 0508f79eb6ad9d173d856467324ade8f4a3498ae Author: David Cantrell Date: Wed Nov 19 04:29:08 2008 -1000 Use PyObject_GC_Del() for object destruction. Since we are using GC now, we should use PyObject_GC_Del() to destroy objects. Explained when we should and should not use the Py_XDECREF() and PyObject_GC_Del() functions. commit 4df04a561c350386e6166be9508048e69704e049 Author: David Cantrell Date: Wed Nov 19 04:17:08 2008 -1000 Add garbage collection for _ped.Timer That completes the basic layout for the GC support on our custom PyTypeObject definitions. commit cd1983d913989eb32c1d10420a146d79b308d9c9 Author: David Cantrell Date: Wed Nov 19 04:13:25 2008 -1000 Added garbage collection for _ped.Alignment commit 9bb650f0b53706a397d67e9b568afbb2fb53a9d7 Author: David Cantrell Date: Wed Nov 19 04:08:42 2008 -1000 Added garbage collection for _ped.Geometry commit 2973ea1163fe7241750361d6f79e87c4cab53ee6 Author: David Cantrell Date: Wed Nov 19 03:59:39 2008 -1000 Added garbage collection for _ped.FileSystem and _ped.FileSystemType commit 32ea2f9251ce425123f874d7872dd5c660dfeb73 Author: David Cantrell Date: Wed Nov 19 03:44:17 2008 -1000 Rename Device factory functions to more closely match libparted. The names I gave the Device factory functions at the _ped level do not follow the naming style we've been trying to use in _ped. Made the following method renames: _ped.get_device() -> _ped.device_get() _ped.probe_all_devices() -> _ped.device_probe_all() _ped.free_all_devices() -> _ped.device_free_all() commit 984ee859e3dd7676ad571e0dcf3e345d4b38593b Author: David Cantrell Date: Wed Nov 19 00:09:17 2008 -1000 Updated TODO list. commit 9a4350fc4e839bdb237738e65ce97c9df6a37830 Author: David Cantrell Date: Tue Nov 18 23:57:03 2008 -1000 Add garbage collection support for pydisk Add GC functions for _ped.Partition, _ped.Disk, and _ped.DiskType. commit 024c4afd6b838e60801cedaeb12722b6f056ef7a Author: David Cantrell Date: Tue Nov 18 23:26:27 2008 -1000 Add garbage collection support for _ped.Constraint commit d22816b44fb7dc03e5bc08eaf8d48ad720605c2f Author: David Cantrell Date: Tue Nov 18 23:12:51 2008 -1000 Forgot the Py_TPFLAGS_HAVE_GC in the _ped.CHSGeometry PyTypeObject commit 6a587ca361b4588c19f674a580e583b65cfe300c Author: David Cantrell Date: Tue Nov 18 23:09:53 2008 -1000 Add garbage collection support for _ped.CHSGeometry commit 3347d820465a9ac07df0d1ad2acb08312a99598e Author: David Cantrell Date: Tue Nov 18 22:43:15 2008 -1000 Add garbage collection support to pydevice Seems pretty straightforward. Use a different _New function, add traverse and clear functions, fix up the dealloc function to work with GC, set the GC flag in the PyTypeObject. Will be carrying this over to the other types. commit 2ff371dd7f5ac0c51b3036a548ed58b8640fed08 Author: Chris Lumens Date: Thu Nov 20 15:04:57 2008 -0500 Use PyType_GenericNew instead of our own function, where appropriate. On our object types where you can create them manually, the generic new method both allocates space for the type and sets up default values. With just using our own new functions, we'd have to set the defaults ourselves to prevent tracebacks. This new mechanism has the pleasant side effect of enforcing the number of arguments required to be passed to the __init__ method. commit fa2e1debb6f58c599b16a133e3051b50db126b9c Author: Chris Lumens Date: Thu Nov 20 13:38:32 2008 -0500 Add test cases for a couple more things in _ped. commit d0dc99794457e72f5e740c3fd73df39062d9cbdc Author: Chris Lumens Date: Thu Nov 20 13:19:51 2008 -0500 PedPartitionType and PedPartitionFlag are enums, so treat them as longs. This fixes the same stack alignment problems as we were having with PedUnit. commit cc187567619a7aa29e01564bdd207a0665a84423 Author: Chris Lumens Date: Thu Nov 20 12:17:01 2008 -0500 Move flag and type methods into _ped. The following methods do not operate on an object, but instead return flags and types based on integers/strings: py_ped_partition_type_get_name py_ped_partition_flag_get_name py_ped_partition_flag_get_by_name py_ped_partition_flag_next commit c37be24f677a7d0b6f9347b8734d6b4c513b721c Author: Chris Lumens Date: Thu Nov 20 12:12:03 2008 -0500 Add documentation for _ped.Partition. commit 36d9c7cb6c370be9f13322aad6338abeeabcb21c Author: Chris Lumens Date: Thu Nov 20 10:36:46 2008 -0500 Fix dir() not wowrking on all other object types, too. commit 87b477c1b032e139ea1fd692deb57e4a5d9594e4 Author: Chris Lumens Date: Thu Nov 20 10:21:06 2008 -0500 Use the right types when calling PyObject_New. I don't think this actually affects anything, but all examples I've seen use a real type as the first argument instead of PyObject, and it certainly does make things a little more clear. commit 0b25435d23f57609e6fd3e84d956c51a1c33a99c Author: Chris Lumens Date: Thu Nov 20 10:14:09 2008 -0500 Fix the problem of _ped.Device objects not having any attributes. What do you know, we needed to add a method to the type struct for how to get attributes. I suppose this now needs to be done for our other types, too. commit 2f1c2663419e77277e1dbba7b12029189f64e3ff Author: David Cantrell Date: Tue Nov 18 12:44:51 2008 -1000 Updated TODO list. commit 31b5810c0f89161d0dd23fdceff5b3422a00737c Author: David Cantrell Date: Tue Nov 18 12:44:18 2008 -1000 Add docstrings to include/pydevice.h commit 22c7bc670bd8f018e043129244d0904c064651af Author: David Cantrell Date: Tue Nov 18 05:50:52 2008 -1000 __init__() cleanups in _ped.Disk Throw CreateException if the user does not provide the required parameters. Remove the extra conversion checks since libparted will error out anyway if those are invalid. Lastly, delete self if we failed to create the PedDisk. commit d3360bc7d08058338dc3fabeb6498f7f38b70cea Author: David Cantrell Date: Tue Nov 18 05:50:17 2008 -1000 Delete self if __init__ fails in _ped.Constraint Make sure to delete self if __init__() fails in _ped.Constraint. commit bb9f2cbbe35bc4a5d8a5860ed2c5541b1f551d2e Author: David Cantrell Date: Tue Nov 18 05:49:34 2008 -1000 Convert newly created PedGeometry to _ped.Geometry in __init__ After we create the new PedGeometry, convert it to self before returning. commit 61e1fced555ea567e99ca330b8393f9c4e099761 Author: David Cantrell Date: Tue Nov 18 02:48:57 2008 -1000 Use PyObject_New() and PyObject_Del() Found out the difference between the _New/_NEW and _Del/_DEL functions. The all caps versions eliminate some safety checks and NULL tests in favor of speed. I figure we can use all the safety checks we can get, so use the more safer versions. commit 3353848efb11e08b0a10f9ed973a599d86ddafe1 Author: David Cantrell Date: Tue Nov 18 02:45:19 2008 -1000 Expanded _ped_Disk_init() Removed the py_ped_disk_new() and py_ped_disk_new_fresh() functions. Several problems discovered. First, those functions weren't that correct. More importantly, they should not be per-Disk methods. Since they deal with creation of Disks, merged their functionality in to the __init__() function for _ped.Disk. The __init__() function will have one required parameter, the Device. If you also provide the option DiskType paramter, the behavior changes. Providing just the Device causes us to call ped_disk_new() in libparted. If you provide a Device and a DiskType, we call ped_disk_new_fresh() in libparted. commit fcb736f0838695ca124a4c63ab27fc03c997acf8 Author: David Cantrell Date: Tue Nov 18 02:16:10 2008 -1000 In *_destroy() functions, destroy the Python object. In our py_ped_*_destroy() pass-through functions, we were converting the Python object to a libparted type, then destroying and returning None. While this works, it seems pointless. We don't need to create and destroy a libparted type, we only need to destroy our Python type, so call PyObject_DEL() and return None. The py_ped_file_system_clobber() function is special. We have to call the file system clobber function to actually destroy it on disk and then destroy the object. commit d3af4a4709284a9879ffa1dc1c4b12359044a3b6 Author: David Cantrell Date: Tue Nov 18 02:00:39 2008 -1000 Move py_ped_timer_new() functionality to init for _ped.Timer Timers are not entirely implemented, but keep the current code in line with the rest of the module. The ped_timer_new() function should not be a method on a Timer object. Moved its functionality to the __init__() function for _ped.Timer. commit 8eff9b3f561a9e3a3294a03ae82df79d13e80a71 Author: David Cantrell Date: Tue Nov 18 01:54:58 2008 -1000 Move py_ped_partition_new() functionality to init for _ped.Partition The py_ped_partition_new() function should not be a method on the _ped.Partition type. Moved the functionality it was providing to the __init__() function for a _ped.Partition. commit cbff35f5e82c0814468e9be29a9354d716f1ce94 Author: David Cantrell Date: Tue Nov 18 01:12:58 2008 -1000 Updated TODO list. Explain the bug where we create new objects with factory functions, but they appear empty until we just access them in the interactive environment. commit b9b030ba7950314943c540051d809acac3148ae5 Author: David Cantrell Date: Tue Nov 18 01:10:48 2008 -1000 Use PyObject_NEW() in convert.c The *_new() functions for non-instantiatable objects are not longer available, so use PyObject_NEW() to create them in convert.c commit d9f6850e130b83f8d3fd93bead73c6e1eff22e7b Author: David Cantrell Date: Tue Nov 18 11:02:30 2008 -1000 Make some types non-instantiatable by users, clean up tp_allocs. The following objects cannot be instantiated by users directly: _ped.Device, _ped.CHSGeometry, _ped.DeviceType, _ped.DiskType, _ped.FileSystemType, _ped.PartitionType These are valid objects with other methods and attributes, but _ped will hand the user that object when necessary. Remove all of the tp_alloc assignments from _pedmodule.c and put those in the PyTypeObject structs. commit 0de87041d8e72734b007105ba522d35704e70c9f Author: Chris Lumens Date: Tue Nov 18 15:34:07 2008 -0500 And there's more stuff to write test cases for in _ped. commit 77430fcf085841b4f863b0729d5ad5cab06f863c Author: Chris Lumens Date: Tue Nov 18 14:06:28 2008 -0500 Add documentation for FileSystem and FileSystemType. commit 7c7b7988e3540a4e7735e097830a1047ca9f5b2b Author: Chris Lumens Date: Tue Nov 18 11:58:36 2008 -0500 Use a much more simplified way to add constants to the _ped module. commit 91310740c94b4ce0999565338191893d31ff719d Author: Chris Lumens Date: Tue Nov 18 11:34:41 2008 -0500 Add documentation for Alignment objects. commit ebe1ff4a1ef5ed7c7fc7ef2fb2896dcb337f3692 Author: Chris Lumens Date: Tue Nov 18 11:07:59 2008 -0500 Finish up Constraint documentation with an object-level blurb. commit 992501cd194a4e5d0772b8f114bfa72cb6d38cae Author: David Cantrell Date: Mon Nov 17 08:10:39 2008 -1000 Updated TODO list. Added a note to make sure that all of the destroy pass-through functions also destroy the corresponding Python object. commit 727f7e980683c48ae1e653399984cf8482aec52b Author: David Cantrell Date: Mon Nov 17 08:06:57 2008 -1000 Add docstrings for pyconstraint, remove py_ped_constraint_done. Added docstrings for the pyconstraint attributes and methods, based on what I found reading the libparted source. Saw that the ped_constraint_done() function is a helper function for ped_constraint_destroy() and we don't need a pass-through for it in Python, so I removed it. commit b95edbe306be325aafdc572dc858e44e5b14a691 Author: David Cantrell Date: Mon Nov 17 02:04:32 2008 -1000 Updated TODO list. Note the stupid git ref error we're getting and make a note to fix it at some point in the future. commit 01006816d5948172bd05368398bca646c9af18f8 Author: David Cantrell Date: Mon Nov 17 11:46:26 2008 -1000 Last try and then I just give up on git. commit c9922161280c751b014dbbba356081ad9d35128a Author: David Cantrell Date: Mon Nov 17 11:45:08 2008 -1000 One more test. commit e072231d39e058cb702057789accb7ffde98513e Author: David Cantrell Date: Mon Nov 17 11:42:37 2008 -1000 And do we have those annoying errors gone? commit 01a5336c282b4b92d75ed97c3154c50598410778 Author: David Cantrell Date: Mon Nov 17 11:37:37 2008 -1000 Testing the git gc fix. commit 9b476401989d216b5c31363a425477eee9573118 Author: Chris Lumens Date: Mon Nov 17 14:42:48 2008 -0500 Fix _ped.Device object creation so you can only create them from functions. The tp_alloc function we had was causing segfaults, so I replaced it with PyType_GenericAlloc which is what the python docs recommend we do anyway. Also we were setting a tp_new in _pedmodule.c which was overriding our NULL setting in the type object. That'd explain why a = _ped.Device() still worked. commit 6b597a0487ff10fc4dbd711a35c24ab73577d017 Author: David Cantrell Date: Sat Nov 15 07:29:38 2008 -1000 Move Constraint factory functions to _ped. The following Constraint functions are not specific to a single Constraint instance, they are factory functions. Move them to _ped and add docstrings for them: py_ped_constraint_new_from_min_max() py_ped_constraint_new_from_min() py_ped_constraint_new_from_max() py_ped_constraint_any() py_ped_constraint_exact() commit 5817ca6229bf492f16d177850ebe7b7cef56d6ed Author: David Cantrell Date: Sat Nov 15 07:15:08 2008 -1000 Remove init and new functions for Constraint, Geometry, & Alignment. The py_ped_OBJECT_init and py_ped_OBJECT_new functions for the Constraint, Geometry, and Alignment OBJECTS were sort of pointless. The object's tp_new and tp_init methods are sufficient. Remove the pass-through calls for the unnecessary functions. Added calls to the ped_OBJECT_new() function inside each type's tp_init function. We can use this to have libparted validate whether or not the user is creating a correct object. Catch exceptions from libparted like we were doing in the new and init function pass throughs. The __init__() functions now for Constraint, Geometry, and Alignment require all arguments since it's now like calling the ped_OBJECT_new() function in libparted. commit e7916a123e32241955f5e37ae8d281f6fea5a649 Author: Chris Lumens Date: Sun Nov 16 17:35:12 2008 -0500 And trim off the leading py_ped_ from everything too. commit 65ddcc8f82b1e791873c09966681720d4eef604b Author: Chris Lumens Date: Sun Nov 16 17:32:19 2008 -0500 Add documentation for pygeom.h. commit 138da9bcf549e170a2b24b798c11cb7611d15d3b Author: Chris Lumens Date: Sun Nov 16 13:47:59 2008 -0500 Update docs for pyparted_version_doc to match new return type. commit 9f7a43566c266cb33fa892c339f5cf213eba1a82 Author: David Cantrell Date: Sat Nov 15 17:48:14 2008 -1000 Updated TODO list. Figured the difference between the ped_TYPE_new() and ped_TYPE_init() functions in libparted. The former is a convenience wrapper for the latter. We really only need to call the _init() function in the corresponding Python type's tp_init function. commit 1981ce4726d60c67acef7e287d604644d481b632 Author: David Cantrell Date: Sat Nov 15 17:23:24 2008 -1000 Return pyparted version number as a 3-tuple. Returning the version number as a string will make it more difficult to do version enforcement in other programs (e.g., anaconda). Return the pyparted version number as a 3-tuple of major, minor, and update. commit 74cc37d10a6267779380a8fd38898bd2cf07fc82 Author: David Cantrell Date: Sat Nov 15 17:10:05 2008 -1000 Fix comments in header files. _partedmodule.h -> _pedmodule.h _exceptions.h -> exceptions.h commit 1671ace99bf1ea09ab752a483cdbe557ac33ab71 Author: David Cantrell Date: Sat Nov 15 02:11:25 2008 -1000 Update pyfilesys.c functions to use object members. For a number of methods in _ped.FileSystem, we were taking in the params in the method call rather than reading them from self. Fixed that in a number of functions. Take the value from self if we can, otherwise, assume it's a parameter to the method. commit ca47117a698643d6901cbeaa28a4ade6e1108b82 Author: David Cantrell Date: Sat Nov 15 02:00:25 2008 -1000 Move file system probing functions to _ped module. The py_ped_file_system_probe() and py_ped_file_system_probe_specific() functions are not tied to any particular FileSystem instance. They take in a Geometry (and in the case of the _specific function, also a FileSystemType), and return a FileSystem. Moved them up to the _ped module where the rest of the non-object-specific functions live. commit cb2377a82067741d776de7a8e0820b6b5d966e3e Author: David Cantrell Date: Sat Nov 15 01:15:08 2008 -1000 Updated TODO list. More brainstorming leads to more things on the TODO list. commit 04327b439f3938a5978685ad5eb7fe2cc83c7e5e Author: David Cantrell Date: Sat Nov 15 01:14:22 2008 -1000 Remove prototype for _ped_Device_new() function. The .tp_new function for _ped_Device does not exist anymore since we are not letting users instantiate the object, they have to call _ped.get_device() to create a _ped_Device object. commit d41af5f9c0fe1d0bc15b58d99502d98ee116686f Author: David Cantrell Date: Thu Nov 13 14:45:34 2008 -1000 Bring _ped.Device closer to the way we want it to function. That is, the type needs to exist and we need to be able to create internally in _ped, but users cannot instantiate a new _ped.Device and build it up programatically. Here's where we are as of this commit: In [1]: import _ped In [2]: a = _ped.get_device("/dev/sda") In [3]: a.length --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) /home/dcantrel/pyparted/ in () AttributeError: '_ped.Device' object has no attribute 'length' In [4]: a Out[4]: <_ped.Device object at 0xb7b7bd40> In [5]: a.length Out[5]: 20480544L In [6]: b = _ped.Device() ./localtest: line 3: 13230 Segmentation fault ipython [dcantrel@electron pyparted]$ You can see we have quite a bit more work to do. commit a267589f8c3ca4bb14bb105e0b189cf5c5c6942b Author: David Cantrell Date: Thu Nov 13 13:43:52 2008 -1000 Add placeholders for the other PyTypeObject members. PyTypeObjects are complex structs. Fill out our structs with placeholders for everything we are not currently using. commit 218057c283de329c9640e991b014397c71b6799c Author: David Cantrell Date: Thu Nov 13 11:00:40 2008 -1000 Use Py_XDECREF() on PyMemberDef members that are PyObjects. For every type we have, there exists a PyMemberDef structure that defines the type members, which are attributes. Everything is ultimately a PyObject in the C API, which means that a PyMemberDef struct can contain C primitives or other PyObjects. Structure members that are PyObjects need to have a Py_XDECREF() call made on them in the type's dealloc function so that the Python machine will clean up properly. commit 217d8232bc6a316258ea7d237f12a91611f7cdc1 Author: David Cantrell Date: Thu Nov 13 10:51:15 2008 -1000 Renamed PyDoc_STRVAR names to match method names. commit 3373ec12a36b6c35336174ae65ee7c09f26367ba Author: David Cantrell Date: Thu Nov 13 10:48:57 2008 -1000 Use underscore naming convention for _ped members. _ped is a lowlevel module, meant to make writing the new parted module easier. Keep the functions named closely to what they are named in libparted for consistency. Updated TODO list as well. commit ee72fd75eb555ea63e09f183664ba5d3fa4cb263 Author: David Cantrell Date: Thu Nov 13 10:12:44 2008 -1000 Convert self to a PedDevice in py_ped_disk_clobber_exclude(). commit 102dff7a2f6cf912397d8a776f3d97fa8c13de49 Author: David Cantrell Date: Thu Nov 13 10:06:46 2008 -1000 Updated TODO list. In object types where some of the members are PyObjects, we need to make sure to Py_XDECREF() those members from the dealloc function. commit 9f938e49cc25a18b42329ebe319a89c32a62cd86 Author: David Cantrell Date: Thu Nov 13 10:06:06 2008 -1000 Reference self object correctly in remaining source files. Like the previous change to pydevice.c, do not read self through PyArg_ParseTuple(). Use the first argument to the function, which is PyObject *self. commit 3c1d0438ab2f9c282d90ca5792a8730619947901 Author: David Cantrell Date: Thu Nov 13 04:23:59 2008 -1000 Fix vim control comments at the end of source files. The vim in rawhide does not like the trailing */, so move that to a new line. commit 492c5d8c0586fb738b3b1fb54ee25780f0a81a38 Author: Chris Lumens Date: Fri Nov 14 15:32:05 2008 -0500 Require being root to import _ped. commit 5f9a8aaa6308a06728b4dbc3026d48185c554885 Author: Chris Lumens Date: Fri Nov 14 11:32:02 2008 -0500 Make sure I don't forget to make test cases for the new _ped stuff. commit 91de244235b4b64575dfa8f32d4068f0d80056c4 Author: Chris Lumens Date: Fri Nov 14 11:31:48 2008 -0500 Added another bug we need to think about. commit 497e50922c50db965c4f0da2bab4d66ecaf7ed89 Author: Chris Lumens Date: Fri Nov 14 11:29:05 2008 -0500 Fix build warnings. commit 0f76ab2b9b9f11493a3687b5a4355c2734ea56c1 Author: Chris Lumens Date: Fri Nov 14 11:20:04 2008 -0500 Update the _ped module documentation and reformat it in the source. This completes the _ped documentation for everything that's currently in the module and adds a description of the module itself. I also modified the formatting of the docs in the source so it's easier to tell where line breaks should go. commit 51ac17d8ea8a515345418edf60475699bc452347 Author: David Cantrell Date: Thu Nov 13 14:45:52 2008 -1000 In pydevice.c, reference self correctly. We were using PyArg_ParseTuple() and assuming that self would be the first argument in the args array. This is incorrect. Self is given to us as the first argument to the function, so use that any time we need to reference self. commit f02668abdc26cd4a1e403609e688f144c588fae6 Author: David Cantrell Date: Thu Nov 13 02:19:14 2008 -1000 Updated TODO list with non-instantiatable objects. These objects need to be able to exist with methods, attributes, and so on, but users should not be able to instantiate them. Setting .tp_new to NULL as stated in the Python documentation does not work. Might have to create a special .tp_new or .tp_init function that throws an exception if the user tries to instantiate the object. commit 74208a0d322385b8aae5e15c3c102d3bcfb76692 Author: David Cantrell Date: Thu Nov 13 02:13:39 2008 -1000 Move higher level Device methods to _ped module. With the Device type, we have determined that in order to match the functionality of libparted, we need to prevent the Device type from being instantiated, but allow the user to get a Device type when calling ped_device_get(). The following functions have moved to the _ped module: _ped.Device.get() -> _ped.getDevice() _ped.Device.probe_all() -> _ped.probeAllDevices() _ped.Device.free_all() -> _ped.freeAllDevices() Also fixed a double free that was occuring in py_ped_device_get(). commit 0444ce39b64490ccd24e7064c515f151b7b93d7c Author: Chris Lumens Date: Thu Nov 13 15:59:27 2008 -0500 Update TODO list for test suite. commit 09b3d24aedbf1a9da7964a65d11750cf5f6816fc Author: Chris Lumens Date: Thu Nov 13 15:42:38 2008 -0500 Add test cases for all the easy stuff in pyunit. The outstanding test cases to write all require PedDevice or PedGeometry, so we should make sure those are working first. commit 09a58f03c86b7af9de9f89308b3d0f3f96fffeec Author: Chris Lumens Date: Thu Nov 13 15:42:07 2008 -0500 Fix checking for whether ped_unit_get_by_name gives us a good value. commit 191587d6ebc7d46d49ffddd3fd6f6631b6225954 Author: Chris Lumens Date: Thu Nov 13 15:29:37 2008 -0500 Add more docs strings for all the unit stuff. commit c52602afa516d7e278cb0acb4d26e96da866a22b Author: David Cantrell Date: Thu Nov 13 10:08:06 2008 -1000 Do bounds checking for PedUnit input values. We need to make sure a user hands us a valid PedUnit value, which has to within the PED_UNIT_FIRST and PED_UNIT_LAST bounds, inclusive. commit 8b523d4645c370d6eaeb85fadd233f3ecf3ac277 Author: David Cantrell Date: Thu Nov 13 10:02:50 2008 -1000 Use 'long' for PedUnit. PedUnit is an enum in libparted, which introduces size problems on 64-bit architectures. commit 94af346006e2dffd9c1995102ce47e509966838e Author: David Cantrell Date: Thu Nov 13 09:36:28 2008 -1000 Updated TODO list. commit b48b2bc0a243853d5a062d330c79bea5a50670d1 Author: Chris Lumens Date: Thu Nov 13 11:16:41 2008 -0500 The beginnings of a working test suite for pyparted. The test suite is all organized under tests/, one directory per major API in the package (one for _ped, one for the old API, one for the new), one file per module in each API. Then each file contains one test class per method with multiple tests in each. The entire test suite will use python's unittest framework and can easily be run with "make check" in the top level of the source directory. commit db938a876cd529d8cb3a4068c2d2a38865b1fefd Author: Chris Lumens Date: Thu Nov 13 11:14:37 2008 -0500 Raise ZeroDivisionError when the divisor is zero, instead of exploding. These were all found by writing the initial test suite. Seems like this is going to be time well spent. commit 4497cdb258d895363ef6d073c3bb8fbb1da2e99f Author: Chris Lumens Date: Thu Nov 13 10:54:27 2008 -0500 Add docs strings for the natmath methods in the _ped module. commit cd4a598842565bdbc682eb5f49a1f9aee891d437 Author: David Cantrell Date: Wed Nov 12 17:39:59 2008 -1000 Add vim settings to all of the .h and .c files. commit c3582e877680a2e2bd02331a035f8add2ed064a5 Author: David Cantrell Date: Wed Nov 12 17:14:26 2008 -1000 Initialize all pointers to NULL. Whenever we declare a new pointer, init it to NULL. commit f3453f26c15b68543fa63e0d3513b711b172eb8e Author: David Cantrell Date: Wed Nov 12 15:41:00 2008 -1000 Use 'z' in PyArg_ParseTuple() when reading strings. Use 'z' in the PyArg_ParseTuple() format string. The 'z' value will convert string values of None to NULL in C, which is what we want. commit a2c9998c1ed98f6be9483bf2c6dc77a831ecc27d Author: David Cantrell Date: Wed Nov 12 15:08:44 2008 -1000 Use 'z' in PyArg_ParseTuple and fix ped_device_*_all() functions. When reading strings with PyArg_ParseTuple(), use 'z' for the format string so we get NULL if the string value is None in Python. Fixed py_ped_device_probe_all() and py_ped_device_free_all() so they do not expect any input parameters and always return None. The functions in libparted take no parameters and are void. commit eb7e97bff7f336718901ec91607d0845bb24f458 Author: David Cantrell Date: Wed Nov 12 13:07:30 2008 -1000 Updated TODO list. Add entry for adding docstrings to _ped. Basically, every Py* struct needs docstrings. commit 2a3df9076398f7d7e718dac8c2858e2442339bec Author: David Cantrell Date: Wed Nov 12 12:54:23 2008 -1000 Updated TODO list. Moved all the exception things to be next to each other. Removed some items that I don't think apply anymore. Everything pretty much has a place now in the code. commit e6d0f402f71dfa1dd2e315560c3328ff8454bf97 Author: Chris Lumens Date: Wed Nov 12 16:44:13 2008 -0500 Raise ZeroDivisionError instead of letting C explode if divisor is 0. commit f85f2009ce9087d3a6cec8d74178c861e0906555 Author: David Cantrell Date: Wed Nov 12 10:28:28 2008 -1000 Wrote py_ped_unit_parse() and py_ped_unit_parse_custom() These were the last of the pyunit.c functions that needed to be written. commit 585d1d2f225be64228e589b4670c57bd5b9eb9a6 Author: Chris Lumens Date: Wed Nov 12 15:27:55 2008 -0500 Add a skeleton PedDisk file. commit f29ec18065f1a87a4db301bbf43288689128e799 Author: David Cantrell Date: Wed Apr 16 10:18:12 2008 -1000 Remove 'All rights reserved' from the boilerplates. Apparently we aren't supposed to have this in the standard license boilerplate. commit f4c99102bcb30fd9063c795e8b65190315718dd6 Author: David Cantrell Date: Wed Nov 21 15:50:59 2007 -0500 Add my email address. commit 9a69e5b3d0743fdeed9ec81b5851218b1bd015bc Author: David Cantrell Date: Wed Nov 21 15:45:56 2007 -0500 Sign the WHY document. commit b138cf73e79ada450833765d8f27ba78053b8e76 Author: Chris Lumens Date: Thu Nov 15 15:50:07 2007 -0500 Make sure the deprecated API raises the same exceptions as existing parted. commit ad0cb4c5438d9ce7de0daf76991c4f7b922356e2 Author: Chris Lumens Date: Thu Nov 15 13:55:57 2007 -0500 Make properties for accessing the underlying _ped.Device attributes. commit a021de4661dd6f24c1b8f7c6e8bb27246dc0005f Author: Chris Lumens Date: Thu Nov 15 13:31:15 2007 -0500 Make sure the deprecated API raises the same exceptions as existing parted. commit c107478bd8c4b5571902fd2c7886a348678f63a6 Author: Chris Lumens Date: Thu Nov 15 10:29:55 2007 -0500 Use the convenience functions for building return values throughout. commit 2790ac2f961e8dea3b8102f9cbededf8a55a3b61 Author: David Cantrell Date: Wed Nov 14 17:22:31 2007 -0500 Added missing PedDevice.py commit 8c91e5d1342bae3c58cdddff8da699da347a5b4e Author: David Cantrell Date: Wed Nov 14 17:17:16 2007 -0500 Updated the to do list! commit d09b6843dc6e1335590a746eb008b9dca9a9e3b2 Author: David Cantrell Date: Wed Nov 14 17:16:27 2007 -0500 Started PedDevice old-API class Moved PedDevice class to the PedDevice module. The old pyparted named the modules and classes the same, which makes it a bit confusing. This is the start of the implementation of the deprecated API for PedDevice. Visibility is key for these old modules, which will require some fun meta class tricks. commit 2a66fbe47f3e875799eb72f124a57b0f47a6f8cf Author: David Cantrell Date: Wed Nov 14 17:15:14 2007 -0500 Remove double free. commit 0061e8816334f081ac12c73bd312138657cb4c6d Author: David Cantrell Date: Wed Nov 14 16:04:40 2007 -0500 Fix a typo Py_DECRET -> Py_DECREF commit b58221e4fc853d587203727af5c11294fd8acb52 Author: Chris Lumens Date: Wed Nov 14 15:55:10 2007 -0500 Free previously allocated memory in error handlers. commit 0fc0992e98d6d45d28b4d950bb776178520fa656 Author: David Cantrell Date: Wed Nov 14 15:37:33 2007 -0500 Updated to do list. commit 3a760e1c77a42bf137c68efe584e76c9a145c5fd Author: David Cantrell Date: Wed Nov 14 15:33:55 2007 -0500 Add my email address to configure.ac commit d78575836e2caf3e2141e12a92fa558b4fc48022 Author: Chris Lumens Date: Tue Nov 13 18:13:37 2007 -0500 Add lots more exception raising and error handling code. commit 2851ef6bf357e0054101bf7b801fc57cf046f440 Author: David Cantrell Date: Tue Nov 13 16:41:56 2007 -0500 Use convenience function for handling strings. Change occurencens of Py_BuildValue() for 's' to PyString_FromString(). commit a2c49fe08804dc765bf69b3566b8d20552db17ba Author: David Cantrell Date: Tue Nov 13 16:35:24 2007 -0500 Moved methods to the object they correspond to Reduced the _ped methods struct and moved most of the methods out to the corresponding object. Removed the Py_InitModule3() calls in the init function for _ped because those were driving over our method struct. Updated the to do list with some new ideas. We have a problem with some of the natmath functions. Not sure where to put those. Also, we lack _ped_Unit now, but should we bring it back as a method container and nothing more? commit 6b6431e169d1b056a02877923d05413c7ee1a191 Author: David Cantrell Date: Tue Nov 13 15:32:00 2007 -0500 Added set method for _ped_Timer and updated to do list commit 22f97bc90fa931ef10583e2b132e1e5637354894 Merge: bf945c3 5f53c45 Author: David Cantrell Date: Tue Nov 13 15:25:26 2007 -0500 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted * 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted: Use more exact exceptions and error messages. Use more exact exceptions and error messages. Add a CreateException for all those places where we can't create an object. commit bf945c3ef2c5c028117582f759491e3655ec8d98 Author: David Cantrell Date: Tue Nov 13 15:25:18 2007 -0500 Implemented set methods for a number of types in _ped The following set methods have been written: _ped_Device_set() _ped_Partition_set() _ped_DiskType_set() _ped_FileSystemType_set() _ped_FileSystem_set() _ped_Geometry_set() _ped_Alignment_set() commit 5f53c456d35741761b0171f3c453a79ce2b677e7 Author: Chris Lumens Date: Tue Nov 13 14:16:31 2007 -0500 Use more exact exceptions and error messages. commit 5bb98097e5fe45f25e7a53ded9875f7a15628d19 Author: Chris Lumens Date: Tue Nov 13 13:56:32 2007 -0500 Use more exact exceptions and error messages. commit 12b245a260a89f8829ee4c71834e043f081e9724 Author: Chris Lumens Date: Tue Nov 13 13:56:06 2007 -0500 Add a CreateException for all those places where we can't create an object. commit e22eef5203ceb26df6690810010614f0712cd6cd Merge: 4935d63 68acd57 Author: David Cantrell Date: Tue Nov 13 13:44:24 2007 -0500 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted * 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted: Add error handling and exception raising to pynatmath.c. Add an AlignmentException as well. Add error handling and exception raising code to pytimer.c. Add a new TimerException. Add error checking to the results of the conversion functions. Fix up some -Wall warnings. commit 4935d63525c873c8998858135073c01e0a9cef4c Author: David Cantrell Date: Tue Nov 13 13:44:16 2007 -0500 get and set method updates The following changes are encapsulated in this diff: - Update get method prototypes to use a void * as the last arg. We create the member variable in the function, casted from closure. - Add prototypes for set methods - Stub out all set method functions in the C files - Wrote the set method for _ped_CHSGeometry commit 68acd576a5bdee3847eace6923b80693509957f8 Author: Chris Lumens Date: Tue Nov 13 13:36:08 2007 -0500 Add error handling and exception raising to pynatmath.c. commit 161b9cbcca8ce9d3444341c8fd8267f60d6ded6c Author: Chris Lumens Date: Tue Nov 13 13:35:52 2007 -0500 Add an AlignmentException as well. commit 5eddc3b8f2a837aa92ebedb363e8f534024243e5 Author: Chris Lumens Date: Tue Nov 13 13:24:05 2007 -0500 Add error handling and exception raising code to pytimer.c. commit 1b328c2cdab6e62f703a82af320f97f5f9ea2d8c Author: Chris Lumens Date: Tue Nov 13 13:23:48 2007 -0500 Add a new TimerException. commit 28bc88d4c2eb6825978a9ae7cfd71f7167bb5e4e Author: Chris Lumens Date: Tue Nov 13 13:15:51 2007 -0500 Add error checking to the results of the conversion functions. commit dcb320a73b15c4f2b3d4101efc3ce86d91ac0025 Author: Chris Lumens Date: Tue Nov 13 13:03:59 2007 -0500 Fix up some -Wall warnings. commit 6f2aa642637a24e2347ac4b5f7a5792d7397258b Merge: e33639e 53be40b Author: David Cantrell Date: Tue Nov 13 13:02:48 2007 -0500 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted * 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted: Fill in the _ped_Whatever_init methods with kwarg processing code. Fix raising an exception at the end of the parted lists. commit e33639e70a094bced7afa0c64e66ded968e63092 Author: David Cantrell Date: Tue Nov 13 13:02:36 2007 -0500 Started set method for _ped_Device commit 6407f5a218e8a7b0d93547a61daf12ade9f7e8fe Author: David Cantrell Date: Tue Nov 13 13:01:33 2007 -0500 Wrote set method for _ped_Constraint The setter for _ped_Constraint handles setting the max_size and min_size structure members. commit 53be40b0e4264dbf9772b607521e1ea218d2aa1b Author: Chris Lumens Date: Tue Nov 13 12:58:29 2007 -0500 Fill in the _ped_Whatever_init methods with kwarg processing code. commit 2c5c5a61b1abe0815650abde5c98612106dc0b05 Author: Chris Lumens Date: Tue Nov 13 11:45:43 2007 -0500 Fix raising an exception at the end of the parted lists. commit 28a8765d464e57d9cc16ef64d6fb2c0db5039542 Author: David Cantrell Date: Tue Nov 13 11:43:19 2007 -0500 Pass string as 2nd arg to PyDict_SetItemString() For all of the ENUM() macro replacement code, pass a string as the second argument to PyDict_SetItemString(). commit 602e164737e3fd3e2980f6ff456730f8f9473d3c Author: David Cantrell Date: Tue Nov 13 11:34:54 2007 -0500 Removed ENUM() macro Just put the code in _pedmodule.c because I can't stand the ENUM() macro. I also updated the to do list and added some more fun things to work on. commit acd31c7273ceb43353d243bed1e24f3e0e7d5ef4 Author: David Cantrell Date: Tue Nov 13 11:13:25 2007 -0500 Updated to do list. commit 66fdeb6817e3660ebeb82f0f8665378c00c44c9f Author: David Cantrell Date: Tue Nov 13 11:12:47 2007 -0500 Remove tp_alloc() use in _ped_Disk_new() Use PyObject_New() rather than tp_alloc() when creating a new _ped_Disk object. commit fbb9444a2e0272afb9fd4ea158073859701042f6 Author: Chris Lumens Date: Tue Nov 13 01:04:02 2007 -0500 Pass the member name as the optional data to the getset method. Also, I cleaned up the structure definitions to only specify what we are overriding and updated the TODO list. commit 9b5a440d90eaf7de97e64bb141e46591a491a62f Author: Chris Lumens Date: Tue Nov 13 00:11:27 2007 -0500 Use _new functions instead of calling PyObject_New directly. This fixes a bug where the conversion functions were returning objects of the correct types, but the type checking on PyArg_ParseTuple was failing anyway. I don't quite know why this was happening, but not going through the type object functions must have confused Python's internals. commit 522ee692f3335fefa028e54ff83a3e261058fa24 Author: Chris Lumens Date: Mon Nov 12 17:51:35 2007 -0500 Added ipython sessions showing a couple weird bugs. commit 3c0b7a45aede993f228203f694e2c285a33de6d3 Author: Chris Lumens Date: Mon Nov 12 17:26:13 2007 -0500 Compare to NULL, not Py_None for optional arguments. commit 04d79a37a5164df4693bb1bb54c303f526a36769 Author: Chris Lumens Date: Mon Nov 12 17:25:51 2007 -0500 Fix the build. commit a0de38ac5a71258d8dca9a0e0be19f9173a44eee Author: David Cantrell Date: Mon Nov 12 17:16:58 2007 -0500 get() methods for _ped_Partition and _ped_Timer Wrote get methods for _ped_Partition and _ped_Timer to return the C primitives in the Python data type. commit ebf09297372f140f2359d97785f3a6da9f84824d Author: David Cantrell Date: Fri Nov 9 18:00:09 2007 -0500 Add get method for C primitives in _ped_Alignment commit 3f769cccb274634652657ab6fbdac388a46a439a Author: Chris Lumens Date: Mon Nov 12 17:14:18 2007 -0500 Check the return values for all conversion functions. commit e7a2fb7c8df37434d90a4b0d565bef8acc647075 Author: Chris Lumens Date: Mon Nov 12 17:01:48 2007 -0500 Add error handling and exception raising to pyconstraint.c. commit 7650f25992e3dc23c27aa08112d46e3228f91770 Author: Chris Lumens Date: Mon Nov 12 15:17:50 2007 -0500 Timers are usually optional arguments, so treat them as such. commit d79e69e26a4750e7d9bf77b6203aedc011f1f8eb Merge: e5b33b6 f1604be Author: Chris Lumens Date: Fri Nov 9 17:59:01 2007 -0500 Merge branch 'master' of ssh://clumens@git.fedoraproject.org/git/hosted/pyparted commit e5b33b694ddf1f96a04d0a24282a3fa9d5c5d9da Author: Chris Lumens Date: Fri Nov 9 17:58:50 2007 -0500 We have a lot more to work on now. commit f1604bedc76e2961002e865dd8cb104fec09974b Merge: c17adb8 258466a Author: David Cantrell Date: Fri Nov 9 17:56:50 2007 -0500 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted * 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted: Add getattr methods for FileSystem and FileSystemType. Add error checking. Create new ConstraintException and IOException. commit c17adb80fa6259fb381ba515d1e8c629225325d4 Author: David Cantrell Date: Fri Nov 9 17:56:41 2007 -0500 Add get method for C primitives in _ped_Geometry commit 258466a747c53408e4760d7e880eea6364fb322f Author: Chris Lumens Date: Fri Nov 9 17:50:54 2007 -0500 Add getattr methods for FileSystem and FileSystemType. Add error checking. commit 8ab545deabe1ff520f180faf87bd7c11f8695288 Author: Chris Lumens Date: Fri Nov 9 17:48:50 2007 -0500 Create new ConstraintException and IOException. commit 50fcced4affd8507d26d3c79aa576fe39e552af5 Author: David Cantrell Date: Fri Nov 9 17:48:29 2007 -0500 Add get method for C primitives in _ped_Device commit 81ea6822719d253b43ca88ee8150105c4fe90dc1 Author: David Cantrell Date: Fri Nov 9 17:38:00 2007 -0500 Add get method for C primitives in _ped_CHSGeometry commit 56d8ebf8c3c71445c43ffb03a1f7a2697d025640 Author: David Cantrell Date: Fri Nov 9 17:31:06 2007 -0500 Fix line wrapping Do not carry long lines for _ped_DiskType_getset[] commit 709d4f8db3d0a454ebf8bc069e53632eba5e9b01 Author: David Cantrell Date: Fri Nov 9 17:24:28 2007 -0500 Move C primitives to get/set method for _ped_Constraint commit 3fc411c04d1dd62dd7b7b79c64e1b4822516c6a0 Merge: ca66c57 761284d Author: David Cantrell Date: Fri Nov 9 14:55:16 2007 -0500 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted * 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted: Add error handling to the pyunit functions. commit ca66c57bfe3d3398d19254502162cfb549075f3e Author: David Cantrell Date: Fri Nov 9 14:55:02 2007 -0500 Fix problems with the _ped_DiskType type Removed C primitives from the members structure. Added a get method to return Python data types for the C primitives stored in the _ped_DiskType structure. Reduced the big type object structure so we don't have so many 0 lines. commit 39daf3900306c619517608a6c1ef107d7d7c74f9 Author: David Cantrell Date: Fri Nov 9 14:53:05 2007 -0500 Correct PYTHONPATH in localtest script. commit fe199044fc6f5918b00901d47052501ad448e359 Author: David Cantrell Date: Fri Nov 9 14:52:39 2007 -0500 Create new version functions Changed version functions in _ped to libparted_version() and pyparted_version(). They return strings containing the version numbers of either libparted or pyparted. Added a function to parted called version() that returns a dict containing both the libparted and pyparted versions. commit 761284de2aeb952233b37f124a84d10d382724e7 Author: Chris Lumens Date: Fri Nov 9 13:45:57 2007 -0500 Add error handling to the pyunit functions. commit b2fdc8caba509a440696643417f63edacee9578c Author: Chris Lumens Date: Fri Nov 9 11:56:30 2007 -0500 Raise an exception when there are no more DiskTypes or flags. commit 9aab8d1746aea3ff1a69c1145755157b996b1036 Author: Chris Lumens Date: Fri Nov 9 11:54:04 2007 -0500 All the py_ped_partition_flag_* functions now work. commit ebe52af02e483625eaadf85915d99ea661893445 Author: Chris Lumens Date: Fri Nov 9 11:53:20 2007 -0500 If no optional argument is given, don't pass None to the C bindings. commit bed0e65716e1485c6ca69e7570bd637b1d9b549f Author: Chris Lumens Date: Fri Nov 9 11:06:52 2007 -0500 Handle the provided disk type not being found by raising an exception. commit a9223a6a9be49cd99e4dd83f7c7d427b00768b69 Author: Chris Lumens Date: Fri Nov 9 11:04:28 2007 -0500 Merge the two Unknown*TypeExceptions into a single one. We're probably going to have a lot of places where a parted function can get an unknown disk type, filesystem type, or whatever. It makes sense to just have one exception for all these events instead of a whole pile. The caller will know what sort of thing is Unknown. commit 05a13e154594fc5c22c69066ee0f6a806389d323 Author: Chris Lumens Date: Thu Nov 8 22:56:39 2007 -0500 The correct way to specify a type is with O!, not O&. commit 7c0ec2db8f0b3a839314c15578e6405a75459cde Author: Chris Lumens Date: Thu Nov 8 22:45:57 2007 -0500 Add type checking to PyArg_ParseTuple. This commit adds type references to most of the PyArg_ParseTuple calls so that passing the wrong kind of PyObject as an argument will result in a TypeError instead of unintended results. commit b9f74f3dcf24077a53b12cfe0506180f13125749 Author: Chris Lumens Date: Thu Nov 8 20:56:54 2007 -0500 No more need for a .cvsignore file. Ignore compiled Python. commit be83990f9f3147fc4594657c656ded280d3e4007 Author: David Cantrell Date: Wed Nov 7 18:02:14 2007 -0500 Make sure we return at the end of partedExnHandler() commit f33df4f803fb88f0b48e363b052ecb5f566637d6 Author: David Cantrell Date: Wed Nov 7 17:31:07 2007 -0500 Remove remaining type handling calls for the long long types. commit 98f9867bc7455a662a9aa5abfd2f30550e69f4be Author: Chris Lumens Date: Wed Nov 7 17:26:11 2007 -0500 Handle libparted exceptions as well. Added a libparted exception handler that catches those exceptions and then either immediately raises python exceptions for serious bugs or sets global flags so parted module methods can raise more specific exceptions with some useful information. This is untested. commit f2a773b3030500e907e82b16c235167a95bff6d4 Author: David Cantrell Date: Wed Nov 7 16:48:47 2007 -0500 Modify _ped types to account for long long changes. Modified _ped_Constraint, _ped_Device, _ped_Partition, _ped_Geometry, and _ped_Alignment to store long longs as long long and not their old _ped type. commit 08bcbd93b55be9b708a6163d1cebf899ea31740f Author: Chris Lumens Date: Wed Nov 7 16:24:39 2007 -0500 Add lots of error checking to the conversion funtions. commit abeadcc87a7807752a4cbfe46b9ac8b454c330ff Author: David Cantrell Date: Wed Nov 7 16:04:31 2007 -0500 Change simple Python _ped types back to just long longs. Change the following back to just long longs: _ped.PartitionType _ped.Sector _ped.PartitionFlag _ped.DiskTypeFeature commit 08483005be8e2c26d6394b000969f0486edd4998 Author: David Cantrell Date: Wed Nov 7 15:04:06 2007 -0500 Removed conversion functions for long long types Removed conversion functions for: _ped.PartitionType _ped.Sector _ped.PartitionFlag _ped.DiskTypeFeature commit b9f98f78af6a709cec1ffc3604fef089707a5bc4 Author: David Cantrell Date: Wed Nov 7 14:59:19 2007 -0500 Removed the _ped.Unit type from the _ped module Removed the _ped.Unit type and all calls to conversion routines. We are just storing the PedUnit as a long long in Python since that maps easily over to the C API. commit d174ae2e4706d93c100284317aa484f47a870366 Author: David Cantrell Date: Wed Nov 7 14:58:38 2007 -0500 Removed the _ped.Unit() and PedUnit conversion functions commit 3972cc74a27d06a8ec0dca48408f5372b395d61a Author: David Cantrell Date: Wed Nov 7 14:57:58 2007 -0500 Remove _ped.Unit() functions Removed _ped_Unit2PedUnit() and PedUnit2_ped_Unit() since those are just long longs in both C and Python. commit 88f1773c50df192384dcf0285c7afd6edf87f683 Author: David Cantrell Date: Wed Nov 7 14:30:09 2007 -0500 Use libparted functions to make libparted types in convert.c Rather than manually creating the libparted structures in convert.c, use the appropriate new function in libparted if it exists for that type. The following changes were made: in _ped_Disk2PedDisk(), call ped_disk_new() to make the return type in _ped_DiskType2PedDiskType(), use ped_disk_type_get() in _ped_FileSystem2PedFileSystem(), use ped_file_system_open() in _ped_Partition2PedPartition(), use ped_partition_new() commit 66a120a22a6a8428c22eda58d027cb3776e950c6 Author: David Cantrell Date: Wed Nov 7 14:26:39 2007 -0500 Updated to do list with old pyparted stuff Walked the old pyparted API and updated the to do list with what I found in all the MethodDef structures. We may or may not have to implement all of them, but I don't know right now. commit 19664a0c33f43659a525cbdc5b778d0da5a24dfb Author: Chris Lumens Date: Wed Nov 7 14:24:25 2007 -0500 Raise an exception if an unknown filesystem type is requested. commit 8d39ca5202df44fbe122c1e772b91fb4f1c9229b Author: Chris Lumens Date: Wed Nov 7 14:15:45 2007 -0500 Move exceptions into their own header file so they are visible in all sources. commit 0e4a6346d2f4de3a8a10b0de8d0fc9655a73c4fc Author: Chris Lumens Date: Wed Nov 7 13:41:48 2007 -0500 Never call ped_file_system_type_destroy. All the file_system_type code in libparted returns pointers to elements in an internal linked list, so we never want to free those things. commit e6c127f2a37959681b4d3c83cd7c20afd6de6724 Author: Chris Lumens Date: Wed Nov 7 10:24:29 2007 -0500 Fill in all the easy methods in the old API and add docs strings. commit 58f09f31f275f6d8c81df749d3dc7b8c9b8eb6ec Author: Chris Lumens Date: Tue Nov 6 17:23:39 2007 -0500 Fix syntax errors on deprecated API functions. commit 9952c820317d6209b7248d24d443b2751dc25ba8 Author: Chris Lumens Date: Tue Nov 6 17:17:25 2007 -0500 Define away functions that are not yet implemented in libparted. commit 235f3f5885fe8c30a3c4ff03cb21589122611d5b Author: Chris Lumens Date: Tue Nov 6 17:16:16 2007 -0500 Revert "ped_file_system_destroy isn't implemented in libparted, so define it away for now." This reverts commit fe6e58b9d2f6b3277793d38f22925181063d9548. commit 15746bef1a537a5cb740dd30747c86376bd5136c Author: Chris Lumens Date: Tue Nov 6 17:13:51 2007 -0500 ped_file_system_clobber_exclude is a static function. We can't implement it. commit e418868b2b5556172bc6397f8819b5c7ba5bd8dd Author: Chris Lumens Date: Tue Nov 6 17:10:49 2007 -0500 Fix typos. commit fe6e58b9d2f6b3277793d38f22925181063d9548 Author: Chris Lumens Date: Tue Nov 6 17:10:41 2007 -0500 ped_file_system_destroy isn't implemented in libparted, so define it away for now. commit 5d0c0fe7f2c3593d38ae4c8c5525c3b31c5fc8bf Author: Chris Lumens Date: Tue Nov 6 17:09:27 2007 -0500 Fix typo. commit b9acfcbd92887f9f51c1b8a63811c013df90ae19 Author: David Cantrell Date: Tue Nov 6 16:40:53 2007 -0500 Remove block_sizes code and implemented remaining functions. Implemented: _ped_FileSystemType2PedFileSystemType() _ped_PartitionFlag2PedPartitionFlag() _ped_PartitionType2PedPartitionType() PedPartitionType2_ped_PartitionType() Removed code referencing the block_sizes structure members. We're not storing those in the Python data types. commit 6a2018108d635bab346d7261d410c5e5e4607ec4 Author: David Cantrell Date: Tue Nov 6 16:39:31 2007 -0500 Remove block_sizes from the Python data types. We don't want to store the block_sizes array in the Python data type. Why? Several problems. First, it's not really used in libparted that I can find, so maybe it's a good candidate for removal. Second, it is stored as an array of ints, but with no NULL terminator and no count of how many elements are in the array. So how does that even work? My guess is it doesn't because it was never used elsewhere in the libparted code. commit cd05d80c863c242fe4ef45ffe589b9836206f5e6 Author: David Cantrell Date: Mon Nov 5 17:03:06 2007 -0500 More conversion functions. Implemented: PedFileSystemType2_ped_FileSystemType() PedPartition2_ped_Partition() PedPartitionFlag2_ped_PartitionFlag() Also made some other code cleanups for types and casting and stuff. commit e1f42c4c8a6901af756c7be34430a9481170b11f Author: David Cantrell Date: Mon Nov 5 16:16:34 2007 -0500 Remove unused members and add more conversion functions Implemented PedDisk2_ped_Disk(), _ped_DiskType2PedDiskType(), and _ped_Partition2PedPartition(). Removed assignments to structure members that no longer exist (next, prev, part_list, and so on). commit 191a20e543d767d9b4f17eea21f8ab356d470095 Author: David Cantrell Date: Mon Nov 5 16:14:38 2007 -0500 Clean up Python members structure Removed next, prev, part_list, and other members that have been removed from the Python _ped data structures. commit 189b64d3105b656bfc95ea32f97ed4eeda1a8d09 Author: David Cantrell Date: Mon Nov 5 16:05:38 2007 -0500 Removed libparted-specific data structure elements Removed libparted-specific data structure elements from the Python _ped data structures. commit 6850c95dbe9b3fb01a2b0d58f9b5beb067dc2661 Author: David Cantrell Date: Mon Nov 5 13:41:07 2007 -0500 Implemented convert functions for PedDiskTypeFeature Write _ped_DiskTypeFeature2PedDiskTypeFeature() and PedDiskTypeFeature2_ped_DiskTypeFeature(). Modified prototypes since we only deal with PedDiskTypeFeature as an enumerated value. commit 09457882d144cfc721d16a220c2da11d71b51bd3 Author: David Cantrell Date: Wed Oct 31 17:29:22 2007 -0400 Remove pyexception.c stuff from TODO. We are not reimplementing the code from exception.c in libparted in pyparted. We will use native Python exceptions. commit 5c7cef0c17ca24ef2b480d484f51bd3cd3fa284b Author: David Cantrell Date: Wed Oct 31 17:22:13 2007 -0400 Started _ped_Disk2PedDisk() and more comments. Started the _ped_Disk2PedDisk() function. Need to figure out how to copy over the block_sizes and disk_specific struct members correctly. Added a more detailed comment at the top explaining the approach to memory management with these functions. commit 73eda002485e3b6a4811d3b888921f2168979a35 Author: David Cantrell Date: Wed Oct 31 16:10:21 2007 -0400 Remove unncessary return value checking code. commit f5894b1f4fe60a069e636eb4ec8f1cb0f8af5ebe Author: David Cantrell Date: Wed Oct 31 16:06:30 2007 -0400 Remove unnecessary return value checking code. Formatting fixes. commit defa8e5304228808eb611c9a026607aa59102341 Author: David Cantrell Date: Wed Oct 31 16:04:48 2007 -0400 Remove unnecessary return value checking code. commit e57d76e0f932658537e50e7162129c2814bfa242 Author: David Cantrell Date: Wed Oct 31 16:03:05 2007 -0400 free() -> ped_file_system_type_destroy() commit 70401be8e65ac48ff8255d87676db822910e09ff Author: David Cantrell Date: Wed Oct 31 16:01:11 2007 -0400 Free up libparted types that we create commit 7adea50e7318026331e5c4c185e1072ce70cbd48 Author: David Cantrell Date: Wed Oct 31 13:53:38 2007 -0400 Remove unnecessary return value checking. Formatting fixes. commit e91a592114a8622928aef3f6ecabef2ce8b3505e Author: David Cantrell Date: Wed Oct 31 13:47:28 2007 -0400 Remove unnecessary return value checking at the end of the functions. We want Python exceptions to be in force. commit 781accc1902a9aad4801f250545365bc74087ae3 Author: David Cantrell Date: Tue Oct 30 17:11:04 2007 -0400 Add the many many many methods definitions for the _ped module so we have ways to run the C functions we've been working on. commit 12c4dbc69b46d53167965b8e3957cb350bdb69dd Author: David Cantrell Date: Tue Oct 30 16:11:46 2007 -0400 Implemented the remaining functions in pydisk.c: py_ped_disk_add_partition() py_ped_disk_remove_partition() py_ped_disk_delete_partition() py_ped_disk_delete_all() py_ped_disk_set_partition_geom() py_ped_disk_maximize_partition() py_ped_disk_get_max_partition_geometry() py_ped_disk_minimize_extended_partition() py_ped_disk_next_partition() py_ped_disk_get_partition() py_ped_disk_get_partition_by_sector() py_ped_disk_extended_partition() Updated to do list (remove fdasd.h and vtoc.h stuff). Stubbed out new convert functions: _ped_PartitionFlag2PedPartitionFlag() PedPartitionFlag2_ped_PartitionFlag() _ped_PartitionType2PedPartitionType() PedPartitionType2_ped_PartitionType() commit 1d92887fae843c0d6189c5d0470d5e180f5a99e4 Merge: e9acf9b e497675 Author: David Cantrell Date: Tue Oct 30 11:50:57 2007 -0400 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted commit e9acf9b4398b6aea451826cad952550e93045758 Author: David Cantrell Date: Tue Oct 30 11:50:51 2007 -0400 Removed functions that have been implemented. commit cf946bb286c2cbbc60aa7a2286881b4699170333 Author: David Cantrell Date: Tue Oct 30 11:49:56 2007 -0400 Implemented the following functions in pydisk.c: py_ped_disk_duplicate() py_ped_disk_print() py_ped_disk_get_primary_partition_count() py_ped_disk_get_last_partition_num() py_ped_disk_get_max_primary_partition_count() py_ped_partition_new() py_ped_partition_destroy() py_ped_partition_is_active() commit e497675810e4f9266a37f2c47a00a890abca1f92 Author: Chris Lumens Date: Tue Oct 30 11:33:32 2007 -0400 Add more error checking to pynatmath.c and return booleans where appropriate. commit e5bd76c459970c9c8c5ecf273375469c7283ed7b Author: Chris Lumens Date: Tue Oct 30 11:27:20 2007 -0400 Fill in the remaining functions in pygeom.c. Also added more error checking and changed most functions to return booleans instead of integers where it makes sense. commit 9501e344cfa7742a7a060ff9fe61dbc8daf39183 Author: Chris Lumens Date: Tue Oct 30 11:00:37 2007 -0400 Finish the last of the pydevice.c functions, add more error checking. commit 600e37dbe808fa73d677a47477fe2db67aafda66 Merge: bba36c0 e6b346f Author: David Cantrell Date: Tue Oct 30 10:31:35 2007 -0400 Merge commit 'e6b346f40b9e9b9d3060f2ddbcf76455c5efd3c5' Conflicts: src/pytimer.c commit bba36c05450b29becbb8ad4fa4599f12512e5f42 Author: David Cantrell Date: Tue Oct 30 10:27:17 2007 -0400 Fuck git. commit 3552b977629885bd8c9e6fc8c4b77206661d4b5a Author: David Cantrell Date: Tue Oct 30 10:21:12 2007 -0400 Updated to do list. commit c2b6bdcedf82b711e098e381f2bd4b11b6611499 Author: David Cantrell Date: Tue Oct 30 10:20:46 2007 -0400 Formatting cleanups and change FIXME comment to XXX. commit 308298e2a47ed7ee0855d20cde711fe4e4a3c144 Author: David Cantrell Date: Tue Oct 30 10:20:18 2007 -0400 Change FIXME comments to XXX so we are consistent throughout the code. For 1:1 functions that have not been implemented yet, throw a NotImplementedError exception rather than just returning None. commit 18115bd6d06f11127e2b5258b818510bfc7f046e Author: David Cantrell Date: Tue Oct 30 10:17:47 2007 -0400 Define the NotNeededException exception. commit e6b346f40b9e9b9d3060f2ddbcf76455c5efd3c5 Author: Chris Lumens Date: Tue Oct 30 10:14:02 2007 -0400 Add all the simple pytimer.c functions. commit 7302df6f621efd902ac60e9332314b1901f5e617 Author: Chris Lumens Date: Tue Oct 30 10:13:52 2007 -0400 Build fixes. commit 4d12564f3cd4d511af3cacd482d9adacfa70aaf4 Author: Chris Lumens Date: Tue Oct 30 10:13:32 2007 -0400 Add more conversion function stubs. commit 31c3469f8af1dce0f47c82c9b729a2567d3b7dd3 Merge: 4d85ec7 e11b5ec Author: David Cantrell Date: Mon Oct 29 16:58:40 2007 -0400 Merge branch 'master' of ssh://dcantrel@git.fedoraproject.org/git/hosted/pyparted commit 4d85ec77604f9283752d2151e79edc16667fd60e Author: David Cantrell Date: Mon Oct 29 16:58:27 2007 -0400 Added the following functions to pydisk.c: py_ped_disk_type_register(), py_ped_disk_type_unregister(), py_ped_disk_type_get_next(), py_ped_disk_type_get(), py_ped_disk_type_check_feature(), py_ped_disk_probe(), py_ped_disk_clobber(), py_ped_disk_clobber_exclude(), py_ped_disk_new(), py_ped_disk_new_fresh(), py_ped_disk_destroy(), py_ped_disk_commit(), py_ped_disk_commit_to_dev(), py_ped_disk_commit_to_os(), py_ped_disk_check(), py_ped_partition_is_busy(), py_ped_partition_get_path() commit e11b5ec3fed017020ad6d84b3e98c85caf58d9dc Author: Chris Lumens Date: Mon Oct 29 16:53:08 2007 -0400 Filled in loads of the conversion functions. commit 43c037d9c9fb0e5f2146952a324859d9ed54d4d0 Author: David Cantrell Date: Mon Oct 29 15:39:38 2007 -0400 Clean up the AUTHORS file a bit. commit 87e5e98bbcac4bcf184415faf64cd27f335fe234 Author: Chris Lumens Date: Mon Oct 29 15:14:24 2007 -0400 Fill in all the functions in pyfilesys.c. commit c46614865f3c69500d91102604b32c2792e1ee3d Author: Chris Lumens Date: Mon Oct 29 15:14:10 2007 -0400 More stubs for conversion functions. commit c161e52d6e8d68717e97f8320803e753625bc543 Author: Chris Lumens Date: Mon Oct 29 14:27:50 2007 -0400 Fix a typo. commit c95bb72e6675698a09acb325eaf2ad9a790d1d6a Author: Chris Lumens Date: Mon Oct 29 14:07:33 2007 -0400 Use "s" instead of "S". Return booleans from functions where it makes sense. commit 8c0e6a667ee4f19f96c742ddb44aee010cb3b019 Author: Chris Lumens Date: Mon Oct 29 13:48:36 2007 -0400 Forgot to add myself as an author. commit 6667c491cb9130e17e5afb2268a2740b62a95d2a Merge: 5cc1b13 3782a4f Author: Chris Lumens Date: Mon Oct 29 13:48:18 2007 -0400 Merge branch 'master' of ssh://clumens@git.fedoraproject.org/git/hosted/pyparted commit 5cc1b13be23dac97ae53dad78a1d4adedbcea60c Author: Chris Lumens Date: Mon Oct 29 13:46:13 2007 -0400 Fill in all the easy functions in pydevice.c. commit ea1c2b5d65fb87844061e7efea29c5c5eee59255 Author: Chris Lumens Date: Mon Oct 29 13:46:00 2007 -0400 Add another stub conversion function. commit 3782a4fe77e2ae487c3843e1a13cab6c8cf63538 Author: David Cantrell Date: Mon Oct 29 13:38:00 2007 -0400 Updates to the WHY document. commit 14b6432eba5a0186e874b8891dec1a8d36141efc Author: David Cantrell Date: Mon Oct 29 13:35:39 2007 -0400 More to do. commit a3415d7e2206620c78d7efc008ec8f69e3e90cb5 Author: Chris Lumens Date: Mon Oct 29 12:12:20 2007 -0400 Fix up a couple more of the redundant calls to PyObject_New. commit 9055ed7f42bea23dbc5b15ac067069ed31f329f2 Author: David Cantrell Date: Sun Oct 28 17:27:45 2007 -0400 Remove the pyparted.spec.in file. We don't need to ship a spec file. commit 0f7325fd1b7bb28d049b7b543ef8144030c39d48 Author: David Cantrell Date: Sun Oct 28 17:27:25 2007 -0400 Add email addresses to the boilerplates. commit 521116d8182829751666f7d176569284f650de60 Author: David Cantrell Date: Sun Oct 28 13:07:59 2007 -0400 Remove -Werror from the AM_INIT_AUTOMAKE macro. commit 7f18e8065648c3af5d6bf3218cb7f07789e579c7 Author: David Cantrell Date: Sun Oct 28 13:07:41 2007 -0400 Ignore *.o files and files that end with tilde. commit c36ee910aa138533ac172ab230670eb218ba3187 Author: David Cantrell Date: Mon Oct 22 14:34:32 2007 -0400 Remove WHY from the MAINTAINERCLEANFILES list. commit 18bf2a8ee41caa8d7cb4241097fab1d61559650b Author: Chris Lumens Date: Fri Oct 12 12:14:51 2007 -0400 Add my name to that file I did stuff in. commit db9e85b24d89e0e1ac245a61a3316d2a48ca1b54 Author: Chris Lumens Date: Fri Oct 12 12:13:11 2007 -0400 Remove redundant calls to PyObject_New. commit 8ad7da28af35e75a47659ddbf5e32c4ed7db1e7c Author: Chris Lumens Date: Fri Oct 12 11:56:09 2007 -0400 Fix the typos that were causing compile warnings in pynatmath.c commit dda8576289e3825af209c554e43125e570ac495b Author: Chris Lumens Date: Fri Oct 12 11:39:33 2007 -0400 And remove the completed functions from the list. commit 16cd8a879cf6c804edbcf37ef7763bef0c5c1e6b Author: Chris Lumens Date: Fri Oct 12 11:38:54 2007 -0400 Added all the easy functions in pygeom.c. commit 2801638d132148883a8f152adcee49dca4764d32 Author: Chris Lumens Date: Fri Oct 12 11:38:40 2007 -0400 Use the libparted functions for freeing objects instead of our own. commit 6cd28e7e499305358c0413aa1a7a9e8c85104090 Author: David Cantrell Date: Thu Oct 11 17:35:34 2007 -0400 Ignore the ChangeLog. commit c74f62e4261d2d8eb46c7bd8d16079a5d38953cd Merge: d36f433 a3e0281 Author: David Cantrell Date: Thu Oct 11 14:10:21 2007 -0400 Fixed merge conflicts. commit a3e0281a4aefc19fcc1fbdff61901e808aaa0d0c Author: Chris Lumens Date: Thu Oct 11 14:06:51 2007 -0400 Fix copy-and-paste errors. commit 3bb296f0acad4a06b416a915570979345a2d4e20 Author: Chris Lumens Date: Thu Oct 11 14:00:10 2007 -0400 And remove everything I did from the TODO list. commit 89d0412796a11c1d58173e4382ba8bc136b2d4a4 Author: Chris Lumens Date: Thu Oct 11 13:59:28 2007 -0400 Add most of the functions for the pynatmath.c module. commit d36f43359c11cc50b491dc1fa8f9dee6582677a8 Author: David Cantrell Date: Thu Oct 11 13:53:44 2007 -0400 In the convert.c functions, use the ped_*_new() functions to create new libparted objects. Except for the conversion to PedDevice where we use ped_device_get() and hand it the device path. Removed all of the _free_*() functions since libparted provides those already. commit b0a4f50b29be24a5bd8c8aef8fabe8210005768b Author: David Cantrell Date: Thu Oct 11 13:20:11 2007 -0400 Remove calls to _free_Ped*() functions and just use the ped_*_destroy() functions to handle free'ing those objects. commit 7d882fef5f83f49bac0f01047bff788569be221d Author: Chris Lumens Date: Thu Oct 11 10:35:25 2007 -0400 Add funtions for converting to _ped_Sector, _ped_Unit, and a stub for _ped_Geometry. commit c0def772c5a4e6bb1bd63ea3e3043b832951946f Author: Chris Lumens Date: Thu Oct 11 10:34:20 2007 -0400 Add all the pynatmath.c functions to the module list. commit 7f2d9fcbb5a8dc95940242ec73bb341fcbda48a6 Author: Chris Lumens Date: Wed Oct 10 19:31:30 2007 -0400 Fix a couple typos that were breaking import. commit dc16d3827df5eae2764cf223df3fd349472e81c1 Author: David Cantrell Date: Wed Oct 10 17:22:20 2007 -0400 Added missing '_new' part of the libparted library calls. commit 7a00cc71fc2d589003a175690f26ea0dbd9e14ca Author: David Cantrell Date: Wed Oct 10 17:21:50 2007 -0400 Stub for PedConstraint2_ped_Constraint(). commit e45712ed08ccac72a50e5f78ba8741b1fbb497dd Author: David Cantrell Date: Wed Oct 10 17:21:27 2007 -0400 _ped_PartitionFlagType_Type_obj -> _ped_PartitionFlag_Type_obj commit 9667343ce42c353a2a337e9b6ec3b7e871544ae9 Author: David Cantrell Date: Wed Oct 10 16:54:38 2007 -0400 Call appropriate object destroy function in libparted to take care of objects allocated by libparted library calls and returned to us. commit 8592987fdd64144809ceb55f1a6b639dc8b83059 Author: David Cantrell Date: Wed Oct 10 16:39:10 2007 -0400 Correct some typos in pyconstraint.c. commit c0d12f107644ecc1c7dd52bc03a59709b1c5ada8 Author: David Cantrell Date: Wed Oct 10 15:18:00 2007 -0400 Updated the to do list. commit 7d33b2f5206d5671f0e3df5458f012afd6514af9 Author: David Cantrell Date: Wed Oct 10 15:17:35 2007 -0400 Wrote the pyconstraint.c 1:1 pass-thru functions. commit 318bedf0775d807ce71303cde2ce7225fa83f060 Author: David Cantrell Date: Wed Oct 10 15:17:12 2007 -0400 Ignore *.loT files. commit 1167b0c8e001f57e9e986b9b5490aa174ce3bb0c Author: David Cantrell Date: Tue Oct 9 15:29:26 2007 -0400 Use PyObject_New() in the _init() functions. commit c8073b4a3109956a986fe0a30570d9d878a720f5 Author: David Cantrell Date: Tue Oct 9 15:28:25 2007 -0400 Call _free_X() functions to free libparted types. commit 2af1314fa81e05597aba51583d9ee0123dbe3f0d Author: David Cantrell Date: Tue Oct 9 13:09:07 2007 -0400 Use PyObject_Del() in the dealloc functions. commit 90ca2d341c82403ddc54f53131d468d64d1dd381 Author: David Cantrell Date: Tue Oct 9 13:05:25 2007 -0400 New coding style in place for the 1:1 function mappings. Basically, the functions will call PyArg_ParseTuple() to get the input parameters. Any sanity checking will take place and exceptions raised as necessary. The object return will be allocated using PyObject_New(). The _ped_X2Y() functions will be used to convert input parameters to libparted types so that libparted can be called. After the libparted call, the appropriate _free_X() function will be called to free memory that was allocated for the libpared call. The conversion functions all take in PyObject pointers now and cast within, so when we call PyArg_ParseTuple(), we store directly to PyObjects rather than the specific _ped_X types. commit 6c0e3e3d4585d404adb95a1773a0b4bbc9f7d031 Author: David Cantrell Date: Tue Oct 9 13:02:11 2007 -0400 Can't return NULL in a void function, just return. commit c823f0ee730bd78c37d9a8072500631fd11b3d00 Author: David Cantrell Date: Tue Oct 9 13:01:46 2007 -0400 Include pyunit.h. commit e19c756a7aece0e97e27f6b93cf39f03e87252cf Author: David Cantrell Date: Tue Oct 9 11:21:48 2007 -0400 Remove bad #endif comment from the end of the file (copied prototypes over from the header to use to start functions). commit 77e3e26d435a07dd362815bea0eb7e5a1f43e12a Author: David Cantrell Date: Tue Oct 9 11:21:06 2007 -0400 Note that start_range and end_range are _ped.Geometry() objects. commit 87855382865fe831044e2180b9da44f9633f2504 Author: David Cantrell Date: Tue Oct 9 11:20:00 2007 -0400 Implemented remaining conversion functions and added a few more. commit 4ceee4db7286cf4911b3ba6888c93e7457cfcf14 Author: David Cantrell Date: Tue Oct 9 01:11:41 2007 -0400 Working on _ped_X object to libparted typedef conversion functions to easily handle converting between the Python and C datatypes. Not done yet (in fact, this won't compile, but I'm going to bed right now). commit d17da76f857e808e722a73c4145d5d44f6dd9e26 Author: David Cantrell Date: Tue Oct 9 00:58:07 2007 -0400 Documentation updates. commit 9e31ac6ef2fa430321921204e20a5115ca058e14 Author: David Cantrell Date: Tue Oct 9 00:57:46 2007 -0400 Stubs for: _ped_Alignment2PedAlignment() _ped_Constraint2PedConstraint() _ped_Geometry2PedGeometry() _ped_Sector2PedSector() commit 3bd42c271fb9de7d2d97bbfe223992586b85806a Author: David Cantrell Date: Tue Oct 9 00:56:45 2007 -0400 Implemented py_ped_constraint_init(). commit 860bc65faf0bbd0d9302bf833d17676bbbf8596c Author: David Cantrell Date: Tue Oct 9 00:56:23 2007 -0400 Minor style cleanups and added a note to remind myself to check in to whether or not I need a particular incref. commit a41cceea24392ed9eb30528305e9fab17a6bf451 Author: David Cantrell Date: Mon Oct 8 15:31:34 2007 -0400 Some comments to make it a little more clear as to what _ped_Device2PedDevice() is doing. commit 588ff6b65d66332f60ce8568445ed0e55e1185d4 Author: David Cantrell Date: Mon Oct 8 15:31:05 2007 -0400 Upper bounds check with PED_UNIT_LAST rather than PED_UNIT_TEBIBYTE. commit 3c8220a060a4c0c7c352e98ff9dba406d60bf255 Author: David Cantrell Date: Mon Oct 8 15:20:43 2007 -0400 Stubbed out all remaining pass-thru functions that need to be written. All functions that have not been written yet have a FIXME comment and they return a Py_None right now. commit 4447442e823e25b6a5f4592d27644d4e27582866 Author: David Cantrell Date: Mon Oct 8 14:37:09 2007 -0400 Ignore the generated pyparted.spec file. commit e7439b657ebbbf91a14c4c3d81175dc239127423 Author: David Cantrell Date: Mon Oct 8 14:36:40 2007 -0400 Implemented the following pass-thru functions for unit.h in libparted: py_ped_unit_set_default() py_ped_unit_get_default() py_ped_unit_get_size() py_ped_unit_get_name() py_ped_unit_get_by_name() py_ped_unit_format_custom_byte() py_ped_unit_format_byte() py_ped_unit_format_custom() py_ped_unit_format() commit 07975c95017f62ec4c335d4090e41f67133ea78e Author: David Cantrell Date: Mon Oct 8 14:35:16 2007 -0400 Store the next struct member of _ped_Device as a PedDevice pointer rather than a _ped_Device pointer. commit bdbeb4fa45369a3c70d8fc18f24bc2395e1f7f76 Author: David Cantrell Date: Mon Oct 8 14:34:33 2007 -0400 The convert.c file (and header file) define a set of convenience functions that convert to and from the Python types and libparted typedefs. For example, converting a _ped.Device(), which is a _ped_Unit pointer in C, to a PedDevice for libparted. commit 9b0feaa10f4c9112a2e7ce26484cd985ceb70b50 Author: David Cantrell Date: Mon Oct 8 14:32:39 2007 -0400 Add WHY to the list of files to include in the distribution. commit dca7a5476a813e776afff0dbf1591ba58fd5f2d6 Author: David Cantrell Date: Sat Oct 6 18:34:22 2007 -0400 Stubbed out the new __init__.py for the new parted module. For starters, we need to reimplement all of the existing pyparted functionality as-is and mark it as deprecated (with the exception of the many enums). commit ebdd22501530a9dc9623d7cb127b6ac1a926537f Author: David Cantrell Date: Fri Oct 5 17:20:19 2007 -0400 Stubbed out _ped.FileSystemType() and _ped.FileSystem() objects. commit d55690c16b2c4c035aaf32c15b00733ec55eb376 Author: David Cantrell Date: Fri Oct 5 16:31:37 2007 -0400 Stubbed out _ped.Disk(), _ped.DiskType(), and _ped.DiskTypeFeature() objects. commit 83819f97d6fee528052957c30b0641e898a5c300 Author: David Cantrell Date: Fri Oct 5 15:14:52 2007 -0400 Renamed all static PyTypeObject types to the format _ped_NAME_Type_obj where NAME would be Device, Disk, Partition, PartitionType, Unit, Sector, and so on. commit 6a58be32b2cf8836bc489c93e63863a8fbcc965d Author: David Cantrell Date: Fri Oct 5 14:57:41 2007 -0400 Ignore autom4te.cache. commit a949266055b4e2a8ce32da428887e987cd29b337 Author: David Cantrell Date: Fri Oct 5 14:57:10 2007 -0400 Stubbed out _ped.PartitionType(), _ped.PartitionFlag(), and _ped.Partition() objects. commit e23a6fa47689725c075a560972a9ba26a6e7e0c0 Author: David Cantrell Date: Fri Oct 5 14:13:30 2007 -0400 Explaining why I'm not using an automatic binding generator. commit 365a5084b406dc0bdcb9175509a258e53eebf477 Author: David Cantrell Date: Fri Oct 5 11:51:41 2007 -0400 Stubbed out _ped.Constraint() object. commit 66e355984344b058c521c2a97e309fc1e16db09d Author: David Cantrell Date: Fri Oct 5 11:37:22 2007 -0400 Stubbed out _ped.Alignment() object. commit 96210b61f9d28bafe1e1ea7c52b7df431ebdacbb Author: David Cantrell Date: Fri Oct 5 11:29:12 2007 -0400 Implemented stub for the _ped.Geometry() object type. commit 65061557e29760b0886a3567ac4700ce104011ea Author: David Cantrell Date: Thu Oct 4 22:56:41 2007 -0400 PyObject_HEAD_INIT(NULL) -> PyObject_HEAD_INIT(&PyType_Type) commit e61295131be5834e187e9307da8799d0b2ee1185 Author: David Cantrell Date: Thu Oct 4 22:56:18 2007 -0400 Added _ped.Timer() object. Stub mostly for now. commit ec0df169e57183bc726e8b8b11c9dd3fbd5d9d5c Author: David Cantrell Date: Thu Oct 4 22:55:06 2007 -0400 Move AC_PROG_LIBTOOL to after AC_HEADER_STDC. commit 1878c34ee71bd791fc0f3b576bdf7fc33a59817e Author: David Cantrell Date: Thu Oct 4 22:25:43 2007 -0400 Make sure Python module files are installed directly to site-packages and not a subdirectory in site-packages. commit 25c99a42d6f89aa97c5ac47f0b266f0442807c5e Author: David Cantrell Date: Thu Oct 4 21:51:05 2007 -0400 Return -1 instead of NULL on error for int functions. Fix py_ped_unit_get_default() so it returns a new _ped.Unit() object containing the PedUnit we get from libparted. commit fe0b19c4ddbe38e6ff3e5924be7c710a0869a3f8 Author: David Cantrell Date: Thu Oct 4 21:50:34 2007 -0400 Return -1 instead of NULL for error conditions on int functions. commit 2f71538cf7a3efd97f5719a362d62848584cc72c Author: David Cantrell Date: Thu Oct 4 21:49:54 2007 -0400 Change PyObject_HEAD_INIT(NULL) to PyObject_HEAD_INIT(&PyType_Type). commit b784ac7267ae166c16ee6a73123866af56b603c2 Author: David Cantrell Date: Thu Oct 4 19:31:26 2007 -0400 Ignore vim swap files. commit 3d673361db82774abda159686d23558d82a14358 Author: David Cantrell Date: Thu Oct 4 19:31:02 2007 -0400 Stubbed out pydevice.h types (PedSector, PedCHSGeometry, and PedDevice). Enough so that pyunit.c can be finished out later. commit eb0fbf3c7a7f109e54ec0666d2239031a496b1f6 Author: David Cantrell Date: Thu Oct 4 19:30:01 2007 -0400 Removed what was in pyconstraint.h for now since it'll all change shortly. commit e4b4427996bc5b6c116aa6f9b9de883f6b0fde43 Author: David Cantrell Date: Thu Oct 4 19:29:34 2007 -0400 Updated to do list (this happens a lot). commit 0436ae5e907348e3af3211f9986c33d9ed6a5a3f Author: David Cantrell Date: Thu Oct 4 16:24:49 2007 -0400 Yeah, I did this. commit a91ef22a105fd918c6e1c7fca5f3283eea55b66b Author: David Cantrell Date: Thu Oct 4 16:12:31 2007 -0400 Removed items that have been completed. commit 84b41ff53041d22d0e2d809490b7281cdeae1a3e Author: David Cantrell Date: Thu Oct 4 16:12:17 2007 -0400 Added the _ped.Sector and _ped.CHSGeometry types to the module initialization. commit 1a015b58e7ce8d785e63ccd8aa6ea3c951448eb6 Author: David Cantrell Date: Thu Oct 4 16:08:10 2007 -0400 Forgot to commit the pydevice.c changes for src/Makefile.am. commit ba76de0f8f2413cb9caccfd407cf2e12bb23bef6 Author: David Cantrell Date: Thu Oct 4 16:07:44 2007 -0400 Added pydevice.c and pydevice.h and made modifications to the automake files as necessary. pydevice defines the PedSector and PedCHSGeometry types for now, but will be the main 1:1 point for device.h in libparted. commit 715f74288aa0f0c908ddce2a3bdfd990a6683798 Author: David Cantrell Date: Thu Oct 4 16:01:14 2007 -0400 More detailed listing of the types and functions that need to be created for the _ped module. commit 9fd5648a5b06583b75be7be5c765e7651617ddab Author: David Cantrell Date: Thu Oct 4 16:00:47 2007 -0400 Simple script to fire up Python so I can find the _ped module just built here. commit f105bb05a2971de650da1e335ebda14c8e3e6ad8 Author: David Cantrell Date: Thu Oct 4 16:00:25 2007 -0400 Ignore even more junk. commit 28aabfc6268181ca0236bf16f9ec6c28df121401 Author: David Cantrell Date: Thu Oct 4 14:50:56 2007 -0400 Ignore more junk. commit f08e3305e63fb0ddf621dea6d71b49117e5ed126 Author: David Cantrell Date: Thu Oct 4 14:49:36 2007 -0400 Ignore junk. commit 1a4fcc693563c5b764ecde7b98df7935b3dd7bf0 Author: David Cantrell Date: Thu Oct 4 14:31:54 2007 -0400 Added get/set methods for the 'val' member of the _ped.Unit object. Also started filling in more functions in pyunit.c. commit 4f8f10eb904dfdcf77fc3319b0a6012b0fee3d81 Author: David Cantrell Date: Thu Oct 4 13:25:37 2007 -0400 Some docs. commit f3dc0fa50627f32270de58314feb54f64ec1e2a8 Author: David Cantrell Date: Thu Oct 4 13:25:16 2007 -0400 Define the ENUM macro for easily bringing over enumerated types from libparted. This is more or less the same as REGISTER_ENUM from the previous pyparted. commit 21db8d925033964d6946cfeb2c27fd5361e1f99b Author: David Cantrell Date: Thu Oct 4 13:24:47 2007 -0400 Renamed partedmodule.c to _pedmodule.c. Added pyunit stuff. commit 8768ea1934119f38594cf63bfc8f9180e9a9da44 Author: David Cantrell Date: Thu Oct 4 13:23:45 2007 -0400 automake/autoconf updates. commit f90a34efc5fd14939e8a3ff34612ea20c525b0c8 Author: David Cantrell Date: Thu Oct 4 13:23:02 2007 -0400 Added pyunit.h and pyunit.c, which are the Python 1:1 function mappings for libparted/unit.c. commit 9283a5de2bd5f3f5d74958a94a46fff1a54afc6e Author: David Cantrell Date: Wed Oct 3 11:36:01 2007 -0400 Updated to do list again. commit 23b0d1eb9745a93cb7dc69426d10126d02102b07 Author: David Cantrell Date: Tue Oct 2 15:06:31 2007 -0400 Forgot one other change to _pedmodule.c. commit aa3b79eef334088d96f3f31e031de9b5004e5e4c Author: David Cantrell Date: Tue Oct 2 15:02:32 2007 -0400 _partedmodule -> _pedmodule commit 0bdfd5b30954555a5f0ecb44e96d9c780fe5d3b7 Author: David Cantrell Date: Tue Oct 2 15:00:09 2007 -0400 Change the name of the _parted module to _ped. commit 10498deece42a4cf37d25b8abe9973119bbca4cc Author: David Cantrell Date: Tue Oct 2 14:59:51 2007 -0400 Change argument list to PyObject *s, PyObject *args. commit 8aae12f58a251692273d368f050a7eb10b369fd7 Author: David Cantrell Date: Tue Oct 2 14:48:00 2007 -0400 Removed old files. commit c484bc02c4e858fc3017d0ad04e1c512a012ad44 Author: David Cantrell Date: Tue Oct 2 14:47:53 2007 -0400 Added new pyconstraint.h and pygeom.h, which are in progress. commit f7d3488d1a82d378643df05000cf9dd6b558cd0e Author: David Cantrell Date: Tue Oct 2 14:47:17 2007 -0400 Started new _partedmodule.c in src directory. Shortened source file list in Makefile to only those sources that are valid now. commit 3706c385b36abc76adebb3f1979fc2ed656c08d9 Author: David Cantrell Date: Tue Oct 2 14:46:32 2007 -0400 Renamed partedmodule.h to _partedmodule.h, started over. Removed references to old include files. commit dd9f5eef5df52d7105060d7c3b696d9a832f4cf8 Author: David Cantrell Date: Tue Oct 2 14:20:11 2007 -0400 Removed old headers. commit 73d15059d5de348b58ea3a9a686c9195eb74ab24 Author: David Cantrell Date: Tue Oct 2 13:52:31 2007 -0400 Renamed mkrelease.sh to the more appropriate bootstrap name. commit 81a160c747ac3e35f64b898c66b69cdc6ace8271 Author: David Cantrell Date: Tue Oct 2 13:51:34 2007 -0400 In my projects, I let the revision control system track the ChangeLog. Removed the static ChangeLog. commit 1ba58164334fe40e36e7ee217b53bc8794accbd4 Author: David Cantrell Date: Tue Oct 2 13:47:27 2007 -0400 So I think we're getting somewhere. The automake checks are in place to find Python and libparted and we actually link the module correctly. commit a91c92d2c541a249e8e80c53b916544314f53798 Author: David Cantrell Date: Tue Oct 2 13:46:58 2007 -0400 More things on the to do list. commit a1700f1835a91f92d128443a0ca7b81b9a3e0e58 Author: David Cantrell Date: Tue Oct 2 13:46:36 2007 -0400 Make _partedmodule.so. commit f55054c657834eda229e0c7b12b3e3356df033cc Author: David Cantrell Date: Tue Oct 2 13:43:48 2007 -0400 Do not run make distclean. Also, the m4 directory will be there. commit b281109f8dce5bc0ef33391fe75d6a3c07bc55de Author: David Cantrell Date: Tue Oct 2 13:29:08 2007 -0400 Updates for libparted automake checks. commit 1ba56ea7102bf61f3c5bf8b84e60208d30652e16 Author: David Cantrell Date: Tue Oct 2 13:08:20 2007 -0400 Renamed partedmodule.c to _partedmodule.c. The C module is only going to be used by the higher level Python modules rather than directly by users. commit 2faea7bcf8ab42139deb3373de05191f55888061 Author: David Cantrell Date: Tue Oct 2 13:05:39 2007 -0400 More automake clean ups. commit 881e6564b6d1bce6ce7d04eab96a6ab7a31ee5ee Author: David Cantrell Date: Tue Oct 2 13:05:14 2007 -0400 Documentation updates. commit 676ec45f549bc9a8aa95bd199ae9a1bdb3e907b3 Author: David Cantrell Date: Mon Oct 1 22:48:40 2007 -0400 We won't be tracking the INSTALL file since the standard one will work fine. Expand the list of files for 'make maintainer-clean'. commit c21c275f479fa94b7210511b3242486be6b155f4 Author: David Cantrell Date: Mon Oct 1 16:10:57 2007 -0400 Shorten the MAINTAINERCLEANFILES list. commit f0d53d1c75c2f3a2dca2b4e418b58e9909d9fe36 Author: David Cantrell Date: Mon Oct 1 16:10:28 2007 -0400 LIBPARTED -> libparted in configure output. commit 4b6a944b7e2f07969f33c9febc89e76244cbd284 Author: David Cantrell Date: Mon Oct 1 16:09:13 2007 -0400 A to do list, like there's some sort of point to it. commit 5a0e625f6d577d02fd2fbd74cde463b9e59912b5 Author: David Cantrell Date: Mon Oct 1 15:52:30 2007 -0400 Now have it generating the autoconf/automake environment without any errors, but I'm not sure it actually works now. commit 078ffbc9eb2d4c711d0d689d2220be07c3607b2a Author: David Cantrell Date: Mon Oct 1 15:51:45 2007 -0400 Checks for Python on the system (m4 macros from gnome-python-2.20.0). commit 884f2971add70470640fab734dc4cb55396b479c Author: David Cantrell Date: Mon Oct 1 15:26:32 2007 -0400 Correct src and include makefile targets. commit 502a7f5147100d5c220b3308fabbaf82cb4fb54d Author: David Cantrell Date: Mon Oct 1 15:21:39 2007 -0400 More autoconf/automake changes. commit 521d9c330d15c5cb57dc9921c21b57745b4f2b9e Author: David Cantrell Date: Mon Oct 1 14:42:52 2007 -0400 Create the m4 subdirectory if it doesn't exist. commit 8b3955ef5be6ad6d4c37796e96866984d1fecad0 Author: David Cantrell Date: Mon Oct 1 14:40:00 2007 -0400 Beginnings of the autoconf/automake transition for the source code. commit c81068394e7bfda870e3bcdf704081ccb96e2219 Author: David Cantrell Date: Mon Oct 1 14:38:47 2007 -0400 Updated documentation (or at least indicated that I'm working on it). commit 5502f996cb593d02da523250d8fc1001915f25f1 Author: David Cantrell Date: Mon Oct 1 13:50:35 2007 -0400 Moved source files to src/ subdirectory. commit 4b98c68eede18659bf32946dbb666467f49606bc Author: David Cantrell Date: Mon Oct 1 13:49:56 2007 -0400 Moved header files to include/ subdirectory. commit 37130fbdbbd78bae98fccc637557bcfe00fdb7e9 Author: David Cantrell Date: Fri Aug 10 17:08:29 2007 -0400 tar is no longer a target, remove from .PHONY commit 9bd4c817d7036b242842b22b580231a607551c23 Author: David Cantrell Date: Fri Aug 10 16:55:37 2007 -0400 Modify the TAG variable so it doesn't replace . with _ in tag names. commit 5639e79bdf225a70ff5cd4c2e43aa81a7c4f7a92 Author: David Cantrell Date: Fri Aug 10 16:50:49 2007 -0400 New version. commit cc4ae2ac3aefdc21f9b111ed57cd9ecc10e27e5f Author: David Cantrell Date: Fri Aug 10 16:22:26 2007 -0400 List other authors of the software. commit 4e4c238352f34189d052c0754839a1f3f1c4bd7c Author: David Cantrell Date: Fri Aug 10 16:22:15 2007 -0400 Relicense pyparted as GPL version 2 or any later version. commit da593c8fc451b791134e04e00b2f035286e544d8 Author: David Cantrell Date: Thu Jun 14 19:12:54 2007 +0000 * pyparted.spec: Bump version. * pyparted.spec: Clean up wording in package description, BuildRequire pkgconfig (#226337). commit d227e6958fe6f82d6e554fae5c300da1796586bb Author: David Cantrell Date: Thu Jun 14 18:19:15 2007 +0000 * pyparted.spec: Bump version. * pyparted.spec: Remove buildroot as first step in install section. commit ae0f5cb13331d4a46001bb123e65244f69ba1abe Author: David Cantrell Date: Mon Apr 23 17:20:22 2007 +0000 * Makefile: Inherit CC and CFLAGS from calling environment. * pyparted.spec: Pass %{optflags} and %{__cc} to make process in %build. commit b6fe07a7cd87b0046b13d9838a32ce4821c9986a Author: David Cantrell Date: Fri Apr 20 16:31:54 2007 +0000 Forgot DESTDIR. commit fd258cdbe1bec03dbc9b06eddc287d9497252670 Author: David Cantrell Date: Fri Apr 20 15:51:03 2007 +0000 Use correct libdir (/usr/lib or /usr/lib64). commit 29118a97b22de57234b8c0ad9875bd299691af47 Author: David Cantrell Date: Fri Apr 20 15:43:59 2007 +0000 Add -fPIC to CFLAGS. commit 56289951e3a8ec24eaefb2a8bf3370e793b87b2c Author: David Cantrell Date: Fri Apr 20 15:35:05 2007 +0000 gz -> bz2 commit 4b8224de4d9036a4338b5fc63ed8fad2259be70c Author: David Cantrell Date: Fri Apr 20 15:31:58 2007 +0000 Correct changelog entry date. commit ed29069b02defe4ac34dd3208650e76216ec403a Author: David Cantrell Date: Fri Apr 20 15:29:56 2007 +0000 Remove stuff related to PedConstraint object (work in progress, post-F7). commit faffe420d84d4eb9c2c269c6ddbe768c0a83f806 Author: David Cantrell Date: Fri Apr 20 15:28:36 2007 +0000 Bump version. commit 7e8f8ae2b0e64b8dcaac24d2070bd669ac53c47f Author: David Cantrell Date: Fri Apr 20 15:28:07 2007 +0000 * partedmodule.c: Whitespace cleanups. * AUTHORS: Note where to file bugs. * pyparted.spec: Merge review cleanups. commit 9888d49cf576de822ec1dc974ac7ff7d6abcac82 Author: David Cantrell Date: Mon Mar 19 20:19:44 2007 +0000 * Makefile.am, configure.in, autogen.sh: Remove autotools stuff. * Makefile: Add simple Makefile. * pyparted.spec: Clean up for Fedora packaging guidelines, require parted >= 1.8.4. commit f718e19e40e3314b5add9d4c6016415500b685ba Author: David Cantrell Date: Fri Mar 16 21:13:12 2007 +0000 Bump release. commit 0feef9f07b91809ba4286ba73ed629769ebab150 Author: David Cantrell Date: Thu Feb 8 17:28:48 2007 +0000 * Thu Feb 08 2007 David Cantrell - 1.8.5-1 - Define and use python_sitearch rather than python_sitelib commit 1f2971923144a49ff8468d5c82c188f79a3fbbb4 Author: David Cantrell Date: Thu Feb 8 16:30:30 2007 +0000 * Thu Feb 08 2007 David Cantrell - 1.8.4-1 - Use preferred BuildRoot (package review) - Define and use python_sitelib macro (package review) commit 743cceb0ec72e78cabbc4850f3260125e9395f7a Author: David Cantrell Date: Fri Jan 12 19:45:33 2007 +0000 Required parted-1.8.2 or higher. commit 3efeffc3650dc5cfd51a96327b144e5e9c4c54d7 Author: Jeremy Katz Date: Wed Jan 10 20:38:29 2007 +0000 2007-01-10 Jeremy Katz * pyparted.spec: Bump version. * pygeometry.c (py_ped_geometry_dealloc): Use PyObject_DEL instead of PyMem_DEL * pyfilesystem.c (py_ped_file_system_type_dealloc): Likewise. * pyexception.c (py_ped_exception_dealloc): Likewise. * pydisk.c: Likewise. * pydevice.c: Likewise. * pyconstraint.c (py_ped_constraint_dealloc): Likewise. commit 9de99c4a28d0e51451e4ed1226668b6f3b979b98 Author: David Cantrell Date: Thu Dec 14 16:49:51 2006 +0000 Pass s->dev rather than s to ped_device_destroy(). commit 668e67a1f685af28aa492007d994d9bf8c04e242 Author: David Cantrell Date: Thu Dec 14 16:05:40 2006 +0000 Require parted-1.8.1. commit f142930628f72d28383f0f50a64752cfc1704d95 Author: David Cantrell Date: Thu Dec 14 16:04:55 2006 +0000 * pydevice.c: Add py_ped_device_destroy() function so we can call ped_device_destroy() in libparted. commit 99120149f55740c90fd98800091c51e895bf326c Author: David Cantrell Date: Thu Nov 30 20:50:10 2006 +0000 * pyparted.spec: Bump version. Determine Python version to enable in the %build section rather than assuming 2.4. Makes the source RPM more easy to move between distribution releases. commit 6ce55879a9b501b789c93bd3845acdaaecd04fe6 Author: David Cantrell Date: Fri Nov 17 21:13:05 2006 +0000 Bump version. commit 9e84031f77c63cd51d64b5b01e81828bf0cc7610 Author: David Cantrell Date: Wed Aug 30 18:02:06 2006 +0000 Reference source URL on people.redhat.com commit b3fc15cfabaeca55814bb049e042eedc7606c07c Author: David Cantrell Date: Wed Aug 30 17:59:18 2006 +0000 Tag for real. commit e720e4e56b112ca99e2589e3e57ee573573ce40d Author: David Cantrell Date: Wed Aug 30 17:57:38 2006 +0000 Make sure we don't get %{?dist} in CVS tags. commit 792cfdf062b054b7aec3b17bb42b7dc30dd41570 Author: David Cantrell Date: Wed Aug 30 17:54:58 2006 +0000 * Wed Aug 30 2006 David Cantrell - 1.7.3-1 - Include parted/constraint.h in required header files * Wed Aug 30 2006 David Cantrell - 1.7.2-2 - Require parted-1.7.1 or higher commit d2fa7dc73a072dc7a19ac69c202475329c393636 Author: David Cantrell Date: Tue Jul 25 17:22:53 2006 +0000 Add HPSERVICE, PALO, PREP, and MSFT_RESERVED to partition types list. commit ce7370e3e395c57acccaee1d900da65d63886460 Author: David Cantrell Date: Mon May 29 14:31:42 2006 +0000 Bump version. commit c5bb884efb45077f3cd89fa3329eb5124d7e6ef9 Author: David Cantrell Date: Fri May 19 20:44:16 2006 +0000 * pyparted.spec: Bump version. Require parted >= 1.7.0. commit 9f06f76055d61b037c95d80f65f9d0ae4a3e4ded Author: Chris Lumens Date: Mon Dec 12 14:54:17 2005 +0000 Resync with package cvs. commit b038c1d8cbae08e3796c0150b7b36a4da9f7e1b8 Author: Peter Jones Date: Fri Nov 11 22:04:01 2005 +0000 bump version to 1.6.10-1 commit 3b807d517143356096f3a3cf8fa3c217b4069406 Author: Peter Jones Date: Fri Nov 11 22:02:29 2005 +0000 update lots of classes to be new-style so I stop pulling my hair out. commit 180f7247974322839522da3e9a830fe7541c9a47 Author: Peter Jones Date: Fri Nov 11 22:00:51 2005 +0000 update cflags and changelog commit e8e58b2cb9d4850e6d02f1d2f33759f107526a97 Author: Peter Jones Date: Fri Nov 11 21:13:15 2005 +0000 - minor fixes for PedGeometry - convert PedPartition to a new-style class commit 0893a2464d1c26896386f690e1ebd1890017452b Author: Peter Jones Date: Fri Nov 11 20:41:01 2005 +0000 - make PedGeometry a new-style class - use descriptors for start/end/length commit db00f08f950d4d5ef60bd0496068bb9184308769 Author: Chris Lumens Date: Wed Aug 31 21:12:04 2005 +0000 Rebuilt for new parted library version. commit 63374432354c408e42f9cda8f4250d524ac778a3 Author: Chris Lumens Date: Wed Mar 16 20:07:55 2005 +0000 Doesn't hurt to update the changelog too. commit 807038a2e8aa78154c347ab38c8bc772cb2a9e06 Author: Chris Lumens Date: Wed Mar 16 20:02:58 2005 +0000 Updated for gcc4 and python2.4. Fixed build warnings. commit 8f66960a81453ff64cc3cf0aa19433ea5234d4f1 Author: Jeremy Katz Date: Tue Dec 14 20:55:48 2004 +0000 2004-12-14 Jeremy Katz * pyparted.spec: Bump version. * partedmodule.c: add support for sx8 devices commit be0aca3c9b03b334c44027fe8f212e551447ab99 Author: Jeremy Katz Date: Tue Aug 17 14:55:03 2004 +0000 2004-08-17 Jeremy Katz * pyparted.spec: Bump version and requirements * pydevice.c (py_ped_device_getattr): heads, sectors, cylinders are for the bios geometry form (update to work with parted >= 1.6.12) commit 2fedd8383aa642d6b1cc7c4f5de3c1126aa314fe Author: Jeremy Katz Date: Thu Jul 22 07:39:54 2004 +0000 already been built as -2. *sigh* commit 2d7718078ae4958949801a9b1ed6d43fc9682387 Author: Jeremy Katz Date: Thu Jul 22 07:38:09 2004 +0000 build on ppc64 again commit 3577fb18cb2d4422f7126a23ce1ca57419fb91bc Author: Jeremy Katz Date: Thu May 13 19:36:01 2004 +0000 fix build for newer versions of gcc (fix from Jeff Law) commit 45b9730ef48668a52d983bba80bc6af0ff2299f4 Author: Jeremy Katz Date: Tue Mar 16 21:32:34 2004 +0000 2004-03-16 Jeremy Katz * partedmodule.c: make sure PARTITION_PROTECTED gets defined (#118451) commit 6b054beac29b7927540214d6492208dfdfd70d0f Author: Jeremy Katz Date: Fri Mar 12 21:50:15 2004 +0000 actually do the tagging commit 9cfe9e5587805b771fcfa1bd2a781e6bd3c1c205 Author: Jeremy Katz Date: Fri Mar 12 21:49:53 2004 +0000 excludearch ppc64 for now commit 61a6870559b4ccce591ba164c4756586f45bd979 Author: Jeremy Katz Date: Fri Mar 12 21:48:18 2004 +0000 add .cvsignore commit 6c33ee6924ce8b82d14993c3b6b3856f967db2ad Author: Jeremy Katz Date: Fri Mar 12 21:46:05 2004 +0000 use that autofoo commit 168abb796d414f1d2538a60a6a4d3599c8cb2e94 Author: Jeremy Katz Date: Fri Mar 12 21:06:23 2004 +0000 versioned requires, buildrequire python-devel commit 5c2add338e2b9b722a37ed24d780de8f08f1c435 Author: Jeremy Katz Date: Fri Mar 12 21:02:27 2004 +0000 Initial revision pyparted-3.6/BUGS0000664000076400007640000000215211170721127010627 00000000000000List bugs in this file, numbered, with an example that can be run from the Python interactive shell. --------------------------------------------------------------------------- 1) parted.Disk.getLogicalPartitions() crashes device = parted.getDevice("/dev/sda") disk = parted.Disk(device) disk.getLogicalPartitions() 2) pydisk.c: py_ped_disk_type_get() returns NULL without setting an exception when called with None argument # XXX: NEED REPRODUCER py_ped_disk_type_check_feature() returns NULL without setting an exception when called with 0 as argument # XXX: NEED REPRODUCER py_ped_geometry_read() and py_ped_device_read() do not work properly when the read data contains 0 bytes # XXX: NEED REPRODUCER 3) import parted # create a parted.Disk for part in disk.partitions: print part "argument 2 must be _ped.Geometry, not _ped.Geometry" 4) import parted device = parted.Device(path="/dev/dm-0") disk = parted.Disk(device=device) disk.partitions[0].getDeviceNodeName() expected: "unrecognized disk label" get: "/dev/dm-01" pyparted-3.6/README0000644000076400007640000000512411137723233011027 00000000000000pyparted Python bindings for libparted ----------------------------- OVERVIEW pyparted is a set of native Python bindings for libparted. libparted is the library portion of the GNU parted project. With pyparted, you can write applications that interact with disk partition tables and filesystems. The Python bindings are implemented in two layers. Since libparted itself is written in C without any real implementation of objects, a simple 1:1 mapping of externally accessible libparted functions was written. This mapping is provided in the _ped Python module. You can use that module if you want to, but it's really just meant for the larger parted module. _ped libparted Python bindings, direct 1:1: function mapping parted Native Python code building on _ped, complete with classes, exceptions, and advanced functionality. The _ped module is written and maintained by hand. I chose to do this rather than rely on a tool like SWIG or Pyrex for several reasons. Mostly because I was the GNU parted maintainer, but also because libparted is sort of complex. It's a lowlevel system tool and I found it doesn't translate well in the tools I tried. This is nothing against those tools, I just don't think libparted is ideal to go through SWIG or Pyrex. By writing my own bindings, I can also find bugs in libparted that I may have overlooked before. See the WHY file for more explanation as to why I wrote the bindings by hand. HISTORY pyparted started life at Red Hat and continues there today. The main reason for writing it was to let anaconda (the Red Hat installation program, now used by RHEL and Fedora and many other distributions) interact with libparted. Anaconda is written in Python, so native bindings made sense. pyparted went through many rewrites, but the end result was always the same. And incomplete API via Python with just enough provided for anaconda to do its job. The latest iteration of pyparted aims to be a complete API mapping and even provide a nice set of classes so that people might want to integrate it in to other installers or even write other applications (maybe a Python based alternative to parted(8) or fdisk(8)). QUESTIONS If you are reporting a pyparted failure in Fedora, it's most useful if you file a bug at http://bugzilla.redhat.com/ against the appropriate Fedora release you are using. Alternatively, you can file bugs directly on the project page: http://fedorahosted.org/pyparted/ If you just have questions about pyparted, you can email us directly using the contact information in the AUTHORS file. We will do our best to help you. pyparted-3.6/m4/0000775000076400007640000000000011542323614010546 500000000000000pyparted-3.6/m4/libparted.m40000664000076400007640000000406211540541734012703 00000000000000dnl libparted.m4 - libparted checks dnl dnl Copyright (C) 2007 Red Hat, Inc. dnl dnl This copyrighted material is made available to anyone wishing to use, dnl modify, copy, or redistribute it subject to the terms and conditions of dnl the GNU General Public License v.2, or (at your option) any later version. dnl This program is distributed in the hope that it will be useful, but WITHOUT dnl ANY WARRANTY expressed or implied, including the implied warranties of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General dnl Public License for more details. You should have received a copy of the dnl GNU General Public License along with this program; if not, write to the dnl Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA dnl 02110-1301, USA. Any Red Hat trademarks that are incorporated in the dnl source code or documentation are not subject to the GNU General Public dnl License and may only be used or replicated with the express permission of dnl Red Hat, Inc. dnl dnl Red Hat Author(s): David Cantrell dnl Check for GNU parted dnl $1 Minimum version of libparted we require (e.g., 1.8.6) AC_DEFUN([AM_CHECK_LIBPARTED],[ PKG_CHECK_MODULES(libparted, libparted >= $1) AC_SUBST(LIBPARTED_LIBS) AC_CHECK_LIB([parted], [ped_get_version], [:], [AC_MSG_FAILURE([*** Unable to find requested library libparted])]) AC_CHECK_HEADERS([parted/parted.h], [], [AC_MSG_FAILURE([*** Header file $ac_header not found.])]) dnl Use pkg-config to gather compile flags LIBPARTED_LIBS="$(pkg-config --libs libparted)" LIBPARTED_VERSION=$1 ]) dnl Check for PED_PARTITION_LEGACY_BOOT in parted header files AC_DEFUN([AM_CHECK_PED_PARTITION_LEGACY_BOOT],[ AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[#include &AC_FD_CC 2>&AC_FD_CC then eval "py_cv_mod_$py_mod_var=yes" else eval "py_cv_mod_$py_mod_var=no" fi ]) py_val=`eval "echo \`echo '$py_cv_mod_'$py_mod_var\`"` if test "x$py_val" != xno; then AC_MSG_RESULT(yes) ifelse([$3], [],, [$3 ])dnl else AC_MSG_RESULT(no) ifelse([$4], [],, [$4 ])dnl fi ]) dnl a macro to check for ability to create python extensions dnl AM_CHECK_PYTHON_HEADERS([ACTION-IF-POSSIBLE], [ACTION-IF-NOT-POSSIBLE]) dnl function also defines PYTHON_INCLUDES AC_DEFUN([AM_CHECK_PYTHON_HEADERS], [AC_REQUIRE([AM_PATH_PYTHON]) AC_SUBST(PYTHON_INCLUDES) AC_SUBST(PYTHON_LIBS) AC_SUBST(PYTHON_EMBED_LIBS) AC_SUBST(PYTHON_LDFLAGS) AC_MSG_CHECKING(for headers required to compile python extensions) dnl deduce PYTHON_INCLUDES py_prefix=`$PYTHON -c "import sys; print sys.prefix"` py_exec_prefix=`$PYTHON -c "import sys; print sys.exec_prefix"` if test -x "$PYTHON-config"; then PYTHON_INCLUDES=`$PYTHON-config --includes 2>/dev/null` else PYTHON_INCLUDES="-I${py_prefix}/include/python${PYTHON_VERSION}" if test "$py_prefix" != "$py_exec_prefix"; then PYTHON_INCLUDES="$PYTHON_INCLUDES -I${py_exec_prefix}/include/python${PYTHON_VERSION}" fi fi dnl check if the headers exist: save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $PYTHON_INCLUDES" AC_TRY_CPP([#include ],dnl [AC_MSG_RESULT(found) AC_MSG_CHECKING(for python libraries) dnl Check whether python was compiled as shared library link_pymodules_libpython=false; if egrep "^#define Py_ENABLE_SHARED" "${py_exec_prefix}/include/python${PYTHON_VERSION}/pyconfig.h" > /dev/null ; then if test x`uname -s` != xDarwin; then PYTHON_LDFLAGS="-no-undefined" link_pymodules_libpython=true; fi fi dnl use distutils to get some python configuration variables.. PYTHON_LIB_DEPS=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('SYSLIBS'), sysconfig.get_config_var('SHLIBS')"` PYTHON_LIBDIR=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('LIBDIR')"` PYTHON_LIBPL=`$PYTHON -c "from distutils import sysconfig; print sysconfig.get_config_var('LIBPL')"` save_LIBS="$LIBS" PYTHON_EMBED_LIBS="-L${PYTHON_LIBDIR} ${PYTHON_LIB_DEPS} -lpython${PYTHON_VERSION}" LIBS="$LIBS $PYTHON_EMBED_LIBS" AC_TRY_LINK_FUNC(Py_Initialize, dnl [ LIBS="$save_LIBS"; if $link_pymodules_libpython; then PYTHON_LIBS="$PYTHON_EMBED_LIBS"; fi AC_MSG_RESULT([$PYTHON_EMBED_LIBS]); $1], dnl [ PYTHON_EMBED_LIBS="-L${PYTHON_LIBPL} ${PYTHON_LIB_DEPS} -lpython${PYTHON_VERSION}" LIBS="$save_LIBS $PYTHON_EMBED_LIBS"; AC_TRY_LINK_FUNC(Py_Initialize, dnl [ LIBS="$save_LIBS"; if $link_pymodules_libpython; then PYTHON_LIBS="$PYTHON_EMBED_LIBS"; fi AC_MSG_RESULT([$PYTHON_EMBED_LIBS]); $1], dnl AC_MSG_RESULT(not found); $2) ]) $1],dnl [AC_MSG_RESULT(not found) $2]) CPPFLAGS="$save_CPPFLAGS" ])