pax_global_header00006660000000000000000000000064140067237320014516gustar00rootroot0000000000000052 comment=909a0636665ed96f97babc3b887f9badc88875c3 createrepo_c-0.17.0/000077500000000000000000000000001400672373200142365ustar00rootroot00000000000000createrepo_c-0.17.0/.git-commit-template000066400000000000000000000006521400672373200201240ustar00rootroot00000000000000 # In addition to regular commit message, you can uncomment and fill in the # following to include this change in the released RPM package changelog: # = changelog = # msg: # type: # resolves: # related: # msg = message to be included in the changelog # type = one of: bugfix/enhancement/security # resolves = URLs to bugs or issues resolved by this commit # related = URLs to any related bugs or issues createrepo_c-0.17.0/.gitignore000066400000000000000000000012261400672373200162270ustar00rootroot00000000000000# "Garbage" from compilation *.o *.so *.pyc *_wrap.c snippets/ # Packages and tarball createrepo*.rpm python-createrepo*.rpm createrepo_c-*.tar.xz # Makefile generated files Makefile cmake_install.cmake install_manifest.txt CMakeCache.txt CMakeFiles CPackConfig.cmake CPackSourceConfig.cmake _CPack_Packages/ src/version.h src/createrepo_c.pc src/deltarpms.h src/python/createrepo_c/ _skbuild/ # Python distribution stuff dist/ MANIFEST *.egg-info/ # Devel stuff notes build/ tags *_ # Deltarepo acceptance tests deltarepo/acceptance_tests/repos/repo*/repodata # Bug fixing and feature request temp directories BUG*/ JIRA*/ ISSUE*/ # vim swap files *.sw? createrepo_c-0.17.0/.tito/000077500000000000000000000000001400672373200152735ustar00rootroot00000000000000createrepo_c-0.17.0/.tito/packages/000077500000000000000000000000001400672373200170515ustar00rootroot00000000000000createrepo_c-0.17.0/.tito/packages/.readme000066400000000000000000000002371400672373200203110ustar00rootroot00000000000000the .tito/packages directory contains metadata files named after their packages. Each file has the latest tagged version and the project's relative directory. createrepo_c-0.17.0/.tito/tito.props000066400000000000000000000002231400672373200173340ustar00rootroot00000000000000[buildconfig] builder = tito.builder.Builder tagger = tito.tagger.VersionTagger changelog_do_not_remove_cherrypick = 0 changelog_format = %s (%ae) createrepo_c-0.17.0/AUTHORS000066400000000000000000000006561400672373200153150ustar00rootroot00000000000000Tomas Mlcoch Mathieu Bridon Tom Prince Scott K Logan Ville Skyttä Luke Macken Jarek Polok Neal Gompa Ralph Bean Frank Schreiner Daniel Alley createrepo_c-0.17.0/CMakeLists.txt000066400000000000000000000136311400672373200170020ustar00rootroot00000000000000PROJECT (createrepo_c C) CMAKE_MINIMUM_REQUIRED (VERSION 2.8.10) include(GNUInstallDirs) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS} -ggdb -g -Wall -Wextra -Og") IF(NOT CMAKE_BUILD_TYPE) SET(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) ENDIF(NOT CMAKE_BUILD_TYPE) IF(CMAKE_BUILD_TYPE AND CMAKE_BUILD_TYPE STREQUAL "DEBUG") MESSAGE("Build type is set do DEBUG! (Used flags: \"${CMAKE_C_FLAGS_DEBUG}\")") ENDIF() option(BUILD_LIBCREATEREPO_C_SHARED "Build libcreaterepo_c as a shared library" ON) if(NOT BUILD_LIBCREATEREPO_C_SHARED) set(CMAKE_POSITION_INDEPENDENT_CODE 1) endif() option(CREATEREPO_C_INSTALL_DEVELOPMENT "Install createrepo_c development files." ON) option(CREATEREPO_C_INSTALL_MANPAGES "Install createrepo_c man-pages." ON) # Add path with own cmake modules INCLUDE_DIRECTORIES (${CMAKE_SOURCE_DIR}) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") SET(G_LOG_DOMAIN "C_CREATEREPOLIB") # Find necessary libraries find_package(BZip2 REQUIRED) find_package(CURL REQUIRED) find_package(GLIB2 REQUIRED) find_package(GTHREAD2 REQUIRED) find_package(Libmagic REQUIRED) find_package(LibXml2 REQUIRED) find_package(LZMA REQUIRED) find_package(OpenSSL REQUIRED) find_package(Sqlite3 REQUIRED) find_package(ZLIB REQUIRED) # Add include dirs include_directories(${BZIP2_INCLUDE_DIRS}) include_directories(${CURL_INCLUDE_DIRS}) include_directories(${GLIB2_INCLUDE_DIRS}) include_directories(${LIBXML2_INCLUDE_DIR}) include_directories(${OPENSSL_INCLUDE_DIR}) include_directories(${ZLIB_INCLUDE_DIR}) IF (RPM_PATH) SET (RPM_PATH "/home/tmlcoch/git/rpm") include_directories("${RPM_PATH}/include/") SET(RPMDB_LIBRARY "${RPM_PATH}/rpmio/.libs/librpmio.so" "${RPM_PATH}/lib/.libs/librpm.so") message("Using custom RPM: ${RPMDB_LIBRARY}") ELSE (RPM_PATH) # rpm: FIND_LIBRARY (RPMDB_LIBRARY NAMES rpmdb) IF (NOT RPMDB_LIBRARY) FIND_LIBRARY (RPMDB_LIBRARY NAMES rpm) IF (NOT RPMDB_LIBRARY) MESSAGE(FATAL_ERROR "No Rpm library installed") ENDIF (NOT RPMDB_LIBRARY) ENDIF (NOT RPMDB_LIBRARY) # rpmio: FIND_LIBRARY (RPMIO_LIBRARY NAMES rpmio) IF (RPMIO_LIBRARY) SET(RPMDB_LIBRARY ${RPMIO_LIBRARY} ${RPMDB_LIBRARY}) ELSE (RPMIO_LIBRARY) MESSAGE(FATAL_ERROR "No Rpmio library installed") ENDIF (RPMIO_LIBRARY) message("Using system RPM: ${RPMDB_LIBRARY}") ENDIF (RPM_PATH) # SuSE/Mageia/Mandriva legacy weak deps support OPTION (ENABLE_LEGACY_WEAKDEPS "Enable legacy SUSE/Mageia/Mandriva weakdeps support?" ON) IF (ENABLE_LEGACY_WEAKDEPS) ADD_DEFINITIONS("-DENABLE_LEGACY_WEAKDEPS=1") ENDIF (ENABLE_LEGACY_WEAKDEPS) # drpm OPTION (ENABLE_DRPM "Enable delta RPM support?" OFF) IF (ENABLE_DRPM) pkg_check_modules(DRPM REQUIRED drpm>=0.4.0) include_directories (${DRPM_INCLUDE_DIRS}) ADD_DEFINITIONS("-DCR_DELTA_RPM_SUPPORT") ENDIF (ENABLE_DRPM) # option to enable/disable python support OPTION (ENABLE_PYTHON "Enable python support?" ON) OPTION (WITH_ZCHUNK "Build with zchunk support" ON) IF (WITH_ZCHUNK) pkg_check_modules(ZCK REQUIRED zck) include_directories(${ZCK_INCLUDE_DIRS}) SET (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWITH_ZCHUNK") SET (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DWITH_ZCHUNK") ENDIF (WITH_ZCHUNK) OPTION (WITH_LIBMODULEMD "Build with libmodulemd support" ON) IF (WITH_LIBMODULEMD) find_package(LIBMODULEMD REQUIRED) include_directories(${LIBMODULEMD_INCLUDE_DIRS}) SET (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWITH_LIBMODULEMD") SET (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DWITH_LIBMODULEMD") ENDIF (WITH_LIBMODULEMD) # Threaded XZ Compression # Note: This option is disabled by default, because Createrepo_c # parallelize a lot of tasks (including compression) by default, this # only adds extra threads on XZ library level which causes thread bloat # and for most usecases doesn't bring any performance boost. # On regular hardware (e.g. less-or-equal 4 cores) this option may even # cause degradation of performance. OPTION(ENABLE_THREADED_XZ_ENCODER "Enable threaded XZ encoder?" OFF) IF (ENABLE_THREADED_XZ_ENCODER) ADD_DEFINITIONS("-DENABLE_THREADED_XZ_ENCODER=1") ENDIF (ENABLE_THREADED_XZ_ENCODER) # Get package version INCLUDE (${CMAKE_SOURCE_DIR}/VERSION.cmake) SET (VERSION "${CR_MAJOR}.${CR_MINOR}.${CR_PATCH}") message("Package version: ${VERSION}") # Other files OPTION(ENABLE_BASHCOMP "Install Bash autocompletions?" ON) IF (ENABLE_BASHCOMP) pkg_check_modules(BASHCOMP bash-completion) IF (BASHCOMP_FOUND) execute_process(COMMAND ${PKG_CONFIG_EXECUTABLE} --variable=completionsdir bash-completion OUTPUT_VARIABLE BASHCOMP_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) message("Bash completion directory: ${BASHCOMP_DIR}") INSTALL(FILES createrepo_c.bash DESTINATION ${BASHCOMP_DIR} RENAME createrepo_c) INSTALL(CODE " execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/mergerepo_c) execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/modifyrepo_c) execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink createrepo_c \$ENV{DESTDIR}${BASHCOMP_DIR}/sqliterepo_c) ") ELSEIF (BASHCOMP_FOUND) INSTALL(FILES createrepo_c.bash DESTINATION "/etc/bash_completion.d") message("Bash completion directory: /etc/bash_completion.d") ENDIF (BASHCOMP_FOUND) ENDIF (ENABLE_BASHCOMP) # Gen manpage #message("Gen manpage") #execute_process(COMMAND bash gen_manpage.sh src/cmd_parser.c ../doc/ WORKING_DIRECTORY utils/) # Add custom target for tests ADD_CUSTOM_TARGET(tests) # Subdirs ADD_SUBDIRECTORY (src) ADD_SUBDIRECTORY (doc) ENABLE_TESTING() ADD_SUBDIRECTORY (tests EXCLUDE_FROM_ALL) createrepo_c-0.17.0/COPYING000066400000000000000000000432541400672373200153010ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. createrepo_c-0.17.0/README.md000066400000000000000000000263671400672373200155330ustar00rootroot00000000000000# createrepo_c C implementation of createrepo Run `createrepo -h` for usage syntax. # Devel tips ## Building Package build requires - Pkg name in Fedora/Ubuntu: * bzip2 (http://bzip.org/) - bzip2-devel/libbz2-dev * cmake (http://www.cmake.org/) - cmake/cmake * drpm (https://github.com/rpm-software-management/drpm) - drpm-devel/ * file (http://www.darwinsys.com/file/) - file-devel/libmagic-dev * glib2 (http://developer.gnome.org/glib/) - glib2-devel/libglib2.0-dev * libcurl (http://curl.haxx.se/libcurl/) - libcurl-devel/libcurl4-openssl-dev * libmodulemd (https://github.com/fedora-modularity/libmodulemd/) - libmodulemd-devel/ * libxml2 (http://xmlsoft.org/) - libxml2-devel/libxml2-dev * python (http://python.org/) - python3-devel/libpython3-dev * rpm (http://www.rpm.org/) - rpm-devel/librpm-dev * openssl (http://www.openssl.org/) - openssl-devel/libssl-dev * sqlite3 (https://sqlite.org/) - sqlite-devel/libsqlite3-dev * xz (http://tukaani.org/xz/) - xz-devel/liblzma-dev * zchunk (https://github.com/zchunk/zchunk) - zchunk-devel/ * zlib (http://www.zlib.net/) - zlib-devel/zlib1g-dev * *Documentation:* doxygen (http://doxygen.org/) - doxygen/doxygen * *Documentation:* sphinx (http://sphinx-doc.org/) - python3-sphinx/python3-sphinx * **Test requires:** check (http://check.sourceforge.net/) - check-devel/check * **Test requires:** python3-nose (https://nose.readthedocs.org/) - python3-nose/python3-nose * **Test requires:** xz (http://tukaani.org/xz/) - xz/ * **Test requires:** zchunk (https://github.com/zchunk/zchunk) - zchunk/ From your checkout dir: mkdir build cd build/ cmake .. make To build the documentation, from the build/ directory: make doc **Note:** For build with debugging symbols you could use (from the build/ directory): cmake -DCMAKE_BUILD_TYPE:STRING=DEBUG .. && make ## Building from an rpm checkout E.g. when you want to try weak and rich dependencies. cmake .. && make **Note:** The RPM must be built in that directory Commands I am using for building the RPM: cd /home/tmlcoch/git/rpm CPPFLAGS='-I/usr/include/nss3/ -I/usr/include/nspr4/' ./autogen.sh --rpmconfigure --with-vendor=redhat --with-external-db --with-lua --with-selinux --with-cap --with-acl --enable-python make clean && make ## Other build options ### ``-DENABLE_LEGACY_WEAKDEPS=ON`` Enable legacy SUSE/Mageia/Mandriva weakdeps support (Default: ON) ### ``-DENABLE_THREADED_XZ_ENCODER=ON`` Threaded XZ encoding (Default: OFF) Note: This option is disabled by default, because Createrepo_c parallelizes a lot of tasks (including compression) by default; this only adds extra threads on XZ library level which causes thread bloat and for most usecases doesn't bring any performance boost. On regular hardware (e.g. less-or-equal 4 cores) this option may even cause degradation of performance. ### ``-DENABLE_DRPM=ON`` Enable DeltaRPM support using drpm library (Default: ON) Adds support for creating DeltaRPMs and incorporating them into the repository. ### ``-DWITH_ZCHUNK=ON`` Build with zchunk support (Default: ON) ### ``-DWITH_LIBMODULEMD=ON`` Build with libmodulemd support (Default: ON) Adds support for working with repos containing [Fedora Modularity](https://docs.fedoraproject.org/en-US/modularity/) metadata. ## Build tarball utils/make_tarball.sh [git revision] Without git revision specified HEAD is used. ## Build Python package To create a binary "wheel" distribution, use: python setup.py bdist_wheel To create a source distribution, use: python setup.py sdist Installing source distributions require the installer of the package to have all of the build dependencies installed on their system, since they compile the code during installation. Binary distributions are pre-compiled, but they are likely not portable between substantially different systems, e.g. Fedora and Ubuntu. Note: if you are building a bdist or installing the sdist on a system with an older version of Pip, you may need to install the ```scikit-build``` Python package first. To install either of these packages, use: pip install dist/{{ package name }} To create an "editable" install of createrepo_c, use: python setup.py develop Note: To recompile the libraries and binaries, you muse re-run this command. ## Build RPM package Modify createrepo_c.spec and run: utils/make_rpm.sh Note: [Current .spec for Fedora rawhide](http://pkgs.fedoraproject.org/cgit/createrepo_c.git/plain/createrepo_c.spec) ## Testing All unit tests run from librepo checkout dir ### Build C tests && run c and python tests make tests && make test Note: For a verbose output of testing use: ``make ARGS="-V" test`` ### Run only C unittests (from your checkout dir): build/tests/run_gtester.sh Note: The C tests have to be built by ``make tests``)! ### Run only Python unittests (from your checkout dir): PYTHONPATH=`readlink -f ./build/src/python/` nosetests -s tests/python/tests/ Note: When compiling createrepo_c without libmodulemd support add ``WITH_LIBMODULEMD=OFF`` ### Links [Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=MODIFIED&bug_status=VERIFIED&component=createrepo_c&query_format=advanced) ### Important notes In original createrepo ``sha`` is a nickname for the ``sha1`` checksum. Createrepo_c mimics this behaviour. ## Contribution Here's the most direct way to get your work merged into the project. 1. Fork the project 1. Clone down your fork 1. Implement your feature or bug fix and commit changes 1. If the change fixes a bug at [Red Hat bugzilla](https://bugzilla.redhat.com/), or if it is important to the end user, add the following block to the commit message: = changelog = msg: message to be included in the changelog type: one of: bugfix/enhancement/security (this field is required when message is present) resolves: URLs to bugs or issues resolved by this commit (can be specified multiple times) related: URLs to any related bugs or issues (can be specified multiple times) * For example:: = changelog = msg: Enhance error handling when locating repositories type: bugfix resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1762697 * For your convenience, you can also use git commit template by running the following command in the top-level directory of this project: git config commit.template ./.git-commit-template 1. In a separate commit, add your name and email into the [authors file](https://github.com/rpm-software-management/createrepo_c/blob/master/AUTHORS) as a reward for your generosity 1. Push the branch to your fork 1. Send a pull request for your branch --------------------------------------------------- # Differences in behavior between createrepo_c and createrepo ## Checksums after update ### Use case: - Repodata in repo/ are has checksum xxx - Params: --update --checksum=yyy repo/ ### createrepo_c result: - All package checksums are recalculated into yyy ### original createrepo result: - Only new and changed packages has yyy checksums other packages has still xxx checksums ## Skip symlinks param ### Use case: - Some packages in repo/ are symlinks - Params: --skip-symlinks repo/ ### createrepo_c result: - Symlinked packages are ignored ### original createrepo result: - Symlinked packages are processed (https://bugzilla.redhat.com/show_bug.cgi?id=828848) ## Base path from update-md-path repo ### Use case: - A somebody else's repo is somewhere - The repo items have set a base path to http://foo.com/ - We want to create metadata for our repo - Some packages in our repo are same as packages in somebody else's repo - We want to speed up creation of our repodata with combo --update and --update-md-path=somebody_else's_repo - Params: --update --update-md-path=ftp://somebody.else/repo our_repo/ ### createrepo_c results: - All our packages have no base path set (if we don't set --baseurl explicitly) ### original createrepo result: - Some packages in metadata (which was same in our repo and in somebody else's repo) have base path set to http://foo.com/ - (https://bugzilla.redhat.com/show_bug.cgi?id=875029) ## Crippled paths in filelists.xml after update ### Use case: - A repo with old metadata exists - We want to update metadata - Params: --update repo/ ### createrepo_c results: - All is fine ### original createrepo result: - Some paths in filelists.xml are crippled (https://bugzilla.redhat.com/show_bug.cgi?id=835565) ## --update leaves behind some old repodata files ### Use case: - A repo with repodata created with --simple-md-filenames exists - We want to update repodata to have checksums in filenames - Params: --update repo/ ### createrepo_c results: - All repodata contains checksum in the name ### original createrepo result: - All repodata contains checksum in the name - There are old metadata without checksum in the name too - (https://bugzilla.redhat.com/show_bug.cgi?id=836917) ## Mergerepo_c ### Default merge method - Original mergerepo included even packages with the same NVR by default - Mergerepo_c can be configured by --method option to specify how repositories should be merged. - Additionally its possible to use --all option to replicate original mergerepo behavior. ## Modifyrepo_c Modifyrepo_c is compatible with classical Modifyrepo except some misbehaviour: * TODO: Report bugs and add reference here ### Batch file When there is need to do several modification to repository (``repomd.xml``) a batch file could be used. > Batch file is Modifyrepo_c specific. It is not supported by the classical Modifyrepo - at least not yet. #### Example # Add: # [] # # Metadata that use a bunch of config options [some/path/comps.xml] type=group compress=true compress-type=gz unique-md-filenames=true checksum=sha256 new-name=group.xml # Metadata that use default settings [some/path/bar.xml] # Remove: # [] # remove=true [updateinfo] remove=true #### Supported options | Option name | Description | Supported value(s) | Default | |---------------|-------------|--------------------|---------| | path | Path to the file. When specified it override the path specified in group name (name between [] parenthesis) | Any string | group name (string between '[' ']') | | type | Type of the metadata | Any string | Based on filename | | remove | Remove specified file/type from repodata | ``true`` or ``false`` | ``false`` | | compress | Compress the new metadata before adding it to repo | ``true`` or ``false`` | ``true`` | | compress-type | Compression format to use | ``gz``, ``bz2``, ``xz`` | ``gz`` | | checksum | Checksum type to use | ``md5``, ``sha``, ``sha1``, ``sha224``, ``sha256``, ``sha384``, ``sha512`` | ``sha256`` | | unique-md-filenames | Include the file's checksum in the filename | ``true`` or ``false`` | ``true`` | | new-name | New name for the file. If ``compress`` is ``true``, then compression suffix will be appended. If ``unique-md-filenames`` is ``true``, then checksum will be prepended. | Any string | Original source filename | #### Notes * Lines beginning with a '#' and blank lines are considered comments. * If ``remove=true`` is used, no other config options should be used createrepo_c-0.17.0/VERSION.cmake000066400000000000000000000000671400672373200163700ustar00rootroot00000000000000SET(CR_MAJOR "0") SET(CR_MINOR "17") SET(CR_PATCH "0") createrepo_c-0.17.0/acceptance_tests/000077500000000000000000000000001400672373200175465ustar00rootroot00000000000000createrepo_c-0.17.0/acceptance_tests/run_nosetests.sh000077500000000000000000000014431400672373200230220ustar00rootroot00000000000000#!/bin/bash CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" BUILDDIR="$( cd "$CURDIR/../build" && pwd )" # All use cases PATH="$BUILDDIR/src/:/home/tmlcoch/git/yum-metadata-diff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v ./tests/ --processes 4 --process-timeout=300 # Single module: #PATH="$BUILDDIR/src/:/home/tmlcoch/git/repodiff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v --processes 4 --process-timeout=300 tests/test_sqliterepo.py # Single test: # PATH="$BUILDDIR/src/:/home/tmlcoch/git/repodiff/:$PATH" LD_LIBRARY_PATH=$BUILDDIR/src/ PYTHONPATH=$BUILDDIR/src/python/ nosetests -s -v --processes 4 --process-timeout=300 tests/test_createrepo.py:TestCaseCreaterepo_emptyrepo.test_01_createrepo createrepo_c-0.17.0/acceptance_tests/testdata/000077500000000000000000000000001400672373200213575ustar00rootroot00000000000000createrepo_c-0.17.0/acceptance_tests/testdata/packages/000077500000000000000000000000001400672373200231355ustar00rootroot00000000000000createrepo_c-0.17.0/acceptance_tests/testdata/packages/Archer-3.4.5-6.x86_64.rpm000066400000000000000000000060351400672373200266320ustar00rootroot00000000000000Archer-2:3.4.5-6T>D @ (Ao>KH92dbe81e64b4f75ca4aa9b7bf27d1112b725a650 >@ ?d   )8<TX]a q    W \hw   (8 9:BFGHI(X,YHZt[\]^bd~ltuvwxzefCArcher3.4.56Complex package.Archer packageQb localhost.localdomainISISGPLSterling ArcherDevelopment/Toolshttp://soo_complex_package.eu/linuxx86_64A큤QbQb Qbe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootrootrootrootrootArcher-3.4.5-6.src.rpmbarabarbbarcbardbareArcherArcher(x86-64)     fooafoobfoocfoodfooefoofrpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(PayloadIsXz)21.0.0-134563.0.4-14.6.0-14.0-15.2-1 bbabbbbbcbbdbbe22221111.2222.3333-44443333444455554.9.1.3Qb@OO[@Tomas Mlcoch - 3.3.3-3Tomas Mlcoch - 2.2.2-2Tomas Mlcoch - 1.1.1-1- 3. changelog.- That was totally ninja!- First changelog.aaaaabaacaadaaelocalhost.localdomain 1365416480poplpw 2211.22.33-443344552:3.4.5-62:3.4.5-6 222111.2.3-4333444555complex_aArcher-3.4.5README/usr/bin//usr/share/doc//usr/share/doc/Archer-3.4.5/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryemptylrr.p먎)ߧxz2?7zXZ !#,] c2 1O8۟Wџ+̼u0桶ϋ_= hg֞uy_:_ `յń__GMi \{Rؚ&N˳ʋ 'jТbJ#Pz kFނ" W^u=~:mI YZcreaterepo_c-0.17.0/acceptance_tests/testdata/packages/fake_bash-1.1.1-1.x86_64.rpm000066400000000000000000000042751400672373200273170ustar00rootroot00000000000000fake_bash-1.1.1-1T>D @3fZUw5OK10d85b55d2a06084f617504ee92dd725aa98643b>7q?ad   04LP Tn     8L(89:F$GHHLIPXTY`\t]x^bdl t$u(v,w@xDzHXe\f_Cfake_bash1.1.11Fake bashFake bash packageOulocalhost.localdomainGPLSystem Environment/Shellshttp://fake_bash_shell.com/linuxx86_64Oue3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootfake_bash-1.1.1-1.src.rpmbashfake_bashfake_bash(x86-64)    rpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)super_kernelrpmlib(PayloadIsXz)3.0.4-14.6.0-14.0-15.2-14.9.1.2O[@Tomas Mlcoch - 1.1.1-1- First releaselocalhost.localdomain 1334670842V|1.1.1-11.1.1-1fake_bash/usr/bin/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryemptyJnyA\*册xz2?7zXZ !#,Z] c7@r#Yr ]Sgu "Vrƹ x"\\CZ:-͇_Hle%_I`(\eز-!h0ezR4 YZcreaterepo_c-0.17.0/acceptance_tests/testdata/packages/super_kernel-6.0.1-2.x86_64.rpm000066400000000000000000000054351400672373200301160ustar00rootroot00000000000000super_kernel-6.0.1-2T>D @ ayY^Oe05e7db64bdecde91ac31cac5236065d499e2746>=U?Ed  $hl      0(89 :xBFGHIXYZ[\ ](^Gbadltuvwx$z,<e@fCCsuper_kernel6.0.12Test packageThis package has provides, requires, obsoletes, conflicts options.Oulocalhost.localdomainLGPLv2Applications/Systemhttp://so_super_kernel.com/it_is_awesome/yep_it_really_islinuxx86_64OuOue3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootrootrootsuper_kernel-6.0.1-2.src.rpmnot_so_super_kernelsuper_kernelsuper_kernelsuper_kernel(x86-64)     bzip2expatglibrpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)zlibrpmlib(PayloadIsXz)1.0.02.26.03.0.4-14.6.0-14.0-15.2-1kernelsuper_kernelsuper_kernel5.0.04.0.04.9.1.2O[@O[@Tomas Mlcoch - 6.0.1-2Tomas Mlcoch - 6.0.1-1- Second release- First releasesuper_kernelkernellocalhost.localdomain 1334670843VV~5.8.06.0.06.0.1-26.0.1-25.9.0super_kernelsuper_kernel.8.gz/usr/bin//usr/share/man/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryempty_?bd?· xz2?07zXZ !#,v] c7@``;O -ꡏ q_ ]3L;J]|rEn}8|:7Ry@ŨLs{T}^$燉Ov YZcreaterepo_c-0.17.0/acceptance_tests/tests/000077500000000000000000000000001400672373200207105ustar00rootroot00000000000000createrepo_c-0.17.0/acceptance_tests/tests/__init__.py000066400000000000000000000000001400672373200230070ustar00rootroot00000000000000createrepo_c-0.17.0/acceptance_tests/tests/base.py000066400000000000000000000316731400672373200222060ustar00rootroot00000000000000import os import re import time import shutil import pprint import gzip import filecmp import os.path import tempfile import unittest import threading import subprocess from .fixtures import PACKAGESDIR OUTPUTDIR = None OUTPUTDIR_LOCK = threading.Lock() def get_outputdir(): global OUTPUTDIR if OUTPUTDIR: return OUTPUTDIR OUTPUTDIR_LOCK.acquire() if not OUTPUTDIR: prefix = time.strftime("./testresults_%Y%m%d_%H%M%S_") OUTPUTDIR = tempfile.mkdtemp(prefix=prefix, dir="./") OUTPUTDIR_LOCK.release() return OUTPUTDIR class _Result(object): def __str__(self): return pprint.pformat(self.__dict__) class CrResult(_Result): def __init__(self): self.rc = None # Return code self.out = None # stdout + stderr self.dir = None # Directory that was processed self.prog = None # Program name self.cmd = None # Complete command self.outdir = None # Output directory self.logfile = None # Log file where the out was logged class RepoDiffResult(_Result): def __init__(self): self.rc = None self.out = None self.repo1 = None self.repo2 = None self.cmd = None # Complete command self.logfile = None # Log file where the out was logged class RepoSanityCheckResult(_Result): def __init__(self): self.rc = None self.out = None self.repo = None self.cmd = None self.logfile = None class BaseTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): self.outdir = get_outputdir() self.tcdir = os.path.join(self.outdir, self.__class__.__name__) if not os.path.exists(self.tcdir): os.mkdir(self.tcdir) if self.__class__.__doc__: description_fn = os.path.join(self.tcdir, "description") open(description_fn, "w").write(self.__class__.__doc__+'\n') unittest.TestCase.__init__(self, *args, **kwargs) self.tdir = None # Test dir for the current test self.indir = None # Input dir for the current test self._currentResult = None # Result of the current test # Prevent use of a first line from test docstring as its name in output self.shortDescription_orig = self.shortDescription self.shortDescription = self._shortDescription self.main_cwd = os.getcwd() def _shortDescription(self): return ".".join(self.id().split('.')[-2:]) def run(self, result=None): # Hacky self._currentResult = result # remember result for use in tearDown unittest.TestCase.run(self, result) def setUp(self): os.chdir(self.main_cwd) # In case of TimedOutException in Nose test... the tearDown is not called :-/ caller = self.id().split(".", 3)[-1] self.tdir = os.path.abspath(os.path.join(self.tcdir, caller)) os.mkdir(self.tdir) self.indir = os.path.join(self.tdir, "input") os.mkdir(self.indir) description = self.shortDescription_orig() if description: fn = os.path.join(self.tdir, "description") open(fn, "w").write(description+'\n') #self.log = # TODO os.chdir(self.tdir) self.setup() def setup(self): pass def tearDown(self): if self.tdir and self._currentResult: if not len(self._currentResult.errors) + len(self._currentResult.failures): self.set_success() self.teardown() os.chdir(self.main_cwd) def teardown(self): pass def runcmd(self, cmd, logfile=None, workdir=None, stdin_data=None): """Stolen from the kobo library. Author of the original function is dmach@redhat.com""" # TODO: Add time how long the command takes if type(cmd) in (list, tuple): import pipes cmd = " ".join(pipes.quote(i) for i in cmd) if logfile is not None: already_exists = False if os.path.exists(logfile): already_exists = True logfile = open(logfile, "a") if already_exists: logfile.write("\n{0}\n Another run\n{0}\n".format('='*79)) logfile.write("cd %s\n" % os.getcwd()) for var in ("PATH", "PYTHONPATH", "LD_LIBRARY_PATH"): logfile.write('export %s="%s"\n' % (var, os.environ.get(var,""))) logfile.write("\n") logfile.write(cmd+"\n") logfile.write("\n+%s+\n\n" % ('-'*77)) stdin = None if stdin_data is not None: stdin = subprocess.PIPE proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=workdir) if stdin_data is not None: class StdinThread(threading.Thread): def run(self): proc.stdin.write(stdin_data) proc.stdin.close() stdin_thread = StdinThread() stdin_thread.daemon = True stdin_thread.start() output = "" while True: lines = proc.stdout.readline().decode('utf-8') if lines == "": break if logfile: logfile.write(lines) output += lines proc.wait() if logfile: logfile.write("\n+%s+\n\n" % ('-'*77)) logfile.write("RC: %s (%s)\n" % (proc.returncode, "Success" if not proc.returncode else "Failure")) logfile.close() if stdin_data is not None: stdin_thread.join() return proc.returncode, output def set_success(self): """Create a SUCCESS file in directory of the current test""" fn = os.path.join(self.tdir, "SUCCESS") open(fn, "w") def copy_pkg(self, name, dst): """Copy package from testdata into specified destination""" src = os.path.join(PACKAGESDIR, name) shutil.copy(src, dst) return os.path.join(dst, name) def indir_addpkg(self, name): """Add package into input dir for the current test""" src = os.path.join(PACKAGESDIR, name) return self.copy_pkg(src, self.indir) def indir_makedirs(self, path): """Make a directory in input dir of the current test""" path = path.lstrip('/') final_path = os.path.join(self.indir, path) os.makedirs(final_path) return final_path def indir_mkfile(self, name, content=""): """Create a file in input dir of the current test""" fn = os.path.join(self.indir, name) with open(fn, "w") as f: f.write(content) return fn def tdir_makedirs(self, path): """Make a directory in test dir of the current test""" path = path.lstrip('/') final_path = os.path.join(self.tdir, path) os.makedirs(final_path) return final_path def run_prog(self, prog, dir, args=None, outdir=None): res = CrResult() res.dir = dir res.prog = prog res.outdir = outdir res.logfile = os.path.join(self.tdir, "out_%s" % res.prog) res.cmd = "%(prog)s --verbose %(args)s %(dir)s" % { "prog": res.prog, "dir": res.dir, "args": args or "", } res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile) return res def run_cr(self, dir, args=None, c=False, outdir=None): """Run createrepo and return CrResult object with results :returns: Result of the createrepo run :rtype: CrResult """ prog = "createrepo_c" if c else "createrepo" if not outdir: outdir = os.path.join(self.tdir, prog) else: outdir = os.path.join(self.tdir, outdir) if not os.path.exists(outdir): os.mkdir(outdir) args = " -o %s %s" % (outdir, args if args else "") res = self.run_prog(prog, dir, args, outdir) return res def run_sqlr(self, dir, args=None): """""" res = self.run_prog("sqliterepo_c", dir, args) return res def compare_repos(self, repo1, repo2): """Compare two repos :returns: Difference between two repositories :rtype: RepoDiffResult """ res = RepoDiffResult() res.repo1 = os.path.join(repo1, "repodata") res.repo2 = os.path.join(repo2, "repodata") res.logfile = os.path.join(self.tdir, "out_cmp") res.cmd = "yum-metadata-diff --verbose --compare %s %s" % (res.repo1, res.repo2) res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile) return res def check_repo_sanity(self, repo): """Check if a repository is sane :returns: Result of the sanity check :rtype: RepoSanityCheckResult """ res = RepoSanityCheckResult() res.repo = os.path.join(repo, "repodata") res.logfile = os.path.join(self.tdir, "out_sanity") res.cmd = "yum-metadata-diff --verbose --check %s" % res.repo res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile) return res def assert_run_cr(self, *args, **kwargs): """Run createrepo and assert that it finished with return code 0 :returns: Result of the createrepo run :rtype: CrResult """ res = self.run_cr(*args, **kwargs) self.assertFalse(res.rc) return res def assert_run_sqlr(self, *args, **kwargs): """Run sqliterepo and assert that it finished with return code 0 :returns: Result of the sqliterepo run :rtype: CrResult """ res = self.run_sqlr(*args, **kwargs) self.assertFalse(res.rc) return res def assert_same_results(self, indir, args=None): """Run both createrepo and createrepo_c and assert that results are same :returns: (result of comparison, createrepo result, createrepo_c result) :rtype: (RepoDiffResult, CrResult, CrResult) """ crres = self.run_cr(indir, args) crcres = self.run_cr(indir, args, c=True) self.assertFalse(crres.rc) # Error while running createrepo self.assertFalse(crcres.rc) # Error while running createrepo_c cmpres = self.compare_repos(crres.outdir, crcres.outdir) self.assertFalse(cmpres.rc) # Repos are not same return (cmpres, crres, crcres) def assert_repo_sanity(self, repo): """Run repo sanity check and assert it's sane :returns: Result of the sanity check :rtype: RepoSanityCheckResult """ res = self.check_repo_sanity(repo) self.assertFalse(res.rc) return res def assert_repo_files(self, repo, file_patterns, additional_files_allowed=True): """Assert that files (defined by re) are in the repo """ compiled_patterns = map(re.compile, file_patterns) fns = os.listdir(os.path.join(repo, "repodata/")) used_patterns = [] for pattern in compiled_patterns: for fn in fns[:]: if pattern.match(fn): fns.remove(fn) used_patterns.append(pattern) if not additional_files_allowed: self.assertEqual(fns, []) # Unexpected additional files unused_paterns = [x.pattern for x in (set(compiled_patterns) - set(used_patterns))] self.assertEqual(unused_paterns, []) # Some patterns weren't used def assert_same_dir_content(self, a, b): """Assert identical content of two directories (Not recursive yet) """ # TODO: Recursive self.assertTrue(os.path.isdir(a)) self.assertTrue(os.path.isdir(b)) _, logfn = tempfile.mkstemp(prefix="out_dircmp_%s_" % long(time.time()), dir=self.tdir) logfile = open(logfn, "w") logfile.write("A: %s\n" % a) logfile.write("B: %s\n" % b) logfile.write("\n") dircmpobj = filecmp.dircmp(a, b) if dircmpobj.left_only or dircmpobj.right_only or dircmpobj.diff_files: logfile.write("A only:\n%s\n\n" % dircmpobj.left_only) logfile.write("B only:\n%s\n\n" % dircmpobj.right_only) logfile.write("Diff files:\n%s\n\n" % dircmpobj.diff_files) logfile.close() self.assertTrue(False) logfile.write("OK\n") logfile.close() def assert_file_contains_line(self, path_to_file, line): contents = "" found = False if path_to_file.endswith(".gz"): with gzip.open(path_to_file, 'rb') as f: for l in f.readlines(): if (l.strip() == line.strip()): found = True self.assertTrue(found, "Line: " + line + " not found in file: " + path_to_file) return with open(path_to_file) as f: for l in f.readlines(): if (l.strip() == line.strip()): found = True self.assertTrue(found, "Line: " + line + " not found in file: " + path_to_file) createrepo_c-0.17.0/acceptance_tests/tests/fixtures.py000066400000000000000000000004061400672373200231330ustar00rootroot00000000000000import os.path TESTDATADIR = os.path.normpath(os.path.join(__file__, "../../testdata")) PACKAGESDIR=os.path.join(TESTDATADIR, "packages") PACKAGES = [ "Archer-3.4.5-6.x86_64.rpm", "fake_bash-1.1.1-1.x86_64.rpm", "super_kernel-6.0.1-2.x86_64.rpm", ]createrepo_c-0.17.0/acceptance_tests/tests/test_createrepo.py000066400000000000000000000413401400672373200244540ustar00rootroot00000000000000import os import os.path import glob from .fixtures import PACKAGES from .base import BaseTestCase class TestCaseCreaterepo_badparams(BaseTestCase): """Use case with bad commandline arguments""" def setup(self): self.indir_addpkg(PACKAGES[0]) def test_01_createrepo_noinputdir(self): """No directory to index was specified""" res = self.run_cr("", c=True) self.assertTrue(res.rc) def test_02_createrepo_badinputdir(self): """Directory specified to index doesn't exist""" res = self.run_cr("somenonexistingdirectorytoindex/", c=True) self.assertTrue(res.rc) def test_03_createrepo_unknownparam(self): """Unknown param is specified""" res = self.run_cr(self.indir, "--someunknownparam", c=True) self.assertTrue(res.rc) def test_04_createrepo_badchecksumtype(self): """Unknown checksum type is specified""" res = self.run_cr(self.indir, "--checksum foobarunknownchecksum", c=True) self.assertTrue(res.rc) def test_05_createrepo_badcompressiontype(self): """Unknown compressin type is specified""" res = self.run_cr(self.indir, "--compress-type foobarunknowncompression", c=True) self.assertTrue(res.rc) def test_06_createrepo_badgroupfile(self): """Bad groupfile file specified""" res = self.run_cr(self.indir, "--groupfile badgroupfile", c=True) self.assertTrue(res.rc) def test_07_createrepo_badpkglist(self): """Bad pkglist file specified""" res = self.run_cr(self.indir, "--pkglist badpkglist", c=True) self.assertTrue(res.rc) def test_08_createrepo_retainoldmdbyagetogetherwithretainoldmd(self): """Both --retain-old-md-by-age and --retain-old-md are specified""" res = self.run_cr(self.indir, "--retain-old-md-by-age 1 --retain-old-md", c=True) self.assertTrue(res.rc) def test_09_createrepo_retainoldmdbyagewithbadage(self): """Both --retain-old-md-by-age and --retain-old-md are specified""" res = self.run_cr(self.indir, "--retain-old-md-by-age 55Z", c=True) self.assertTrue(res.rc) class TestCaseCreaterepo_emptyrepo(BaseTestCase): """Empty input repository""" def setup(self): self.fn_comps = self.indir_mkfile("comps.xml", '') def test_01_createrepo(self): """Repo from empty directory""" res = self.assert_run_cr(self.indir, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{64}-primary.xml.gz$", "[a-z0-9]{64}-filelists.xml.gz$", "[a-z0-9]{64}-other.xml.gz$", "[a-z0-9]{64}-primary.sqlite.bz2$", "[a-z0-9]{64}-filelists.sqlite.bz2$", "[a-z0-9]{64}-other.sqlite.bz2$", ], additional_files_allowed=False) def test_02_createrepo_database(self): """--database""" res = self.assert_run_cr(self.indir, "--database", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.gz$", "[a-z0-9]+-filelists.xml.gz$", "[a-z0-9]+-other.xml.gz$", "[a-z0-9]+-primary.sqlite.bz2$", "[a-z0-9]+-filelists.sqlite.bz2$", "[a-z0-9]+-other.sqlite.bz2$", ], additional_files_allowed=False) def test_03_createrepo_nodatabase(self): """--database""" res = self.assert_run_cr(self.indir, "--no-database", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.gz$", "[a-z0-9]+-filelists.xml.gz$", "[a-z0-9]+-other.xml.gz$", ], additional_files_allowed=False) def test_04_createrepo_groupfile(self): """--groupfile""" res = self.assert_run_cr(self.indir, "--groupfile %s" % self.fn_comps, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.gz$", "[a-z0-9]+-filelists.xml.gz$", "[a-z0-9]+-other.xml.gz$", "[a-z0-9]+-primary.sqlite.bz2$", "[a-z0-9]+-filelists.sqlite.bz2$", "[a-z0-9]+-other.sqlite.bz2$", "[a-z0-9]+-comps.xml$", "[a-z0-9]+-comps.xml.gz$", ], additional_files_allowed=False) def test_05_createrepo_checksum(self): """--checksum sha and --groupfile""" res = self.assert_run_cr(self.indir, "--checksum %(checksum)s --groupfile %(groupfile)s" % { 'checksum': "sha1", 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{40}-primary.xml.gz$", "[a-z0-9]{40}-filelists.xml.gz$", "[a-z0-9]{40}-other.xml.gz$", "[a-z0-9]{40}-primary.sqlite.bz2$", "[a-z0-9]{40}-filelists.sqlite.bz2$", "[a-z0-9]{40}-other.sqlite.bz2$", "[a-z0-9]{40}-comps.xml$", "[a-z0-9]{40}-comps.xml.gz$", ], additional_files_allowed=False) def test_06_createrepo_simplemdfilenames(self): """--simple-md-filenames and --groupfile""" res = self.assert_run_cr(self.indir, "--simple-md-filenames --groupfile %(groupfile)s" % { 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "primary.xml.gz$", "filelists.xml.gz$", "other.xml.gz$", "primary.sqlite.bz2$", "filelists.sqlite.bz2$", "other.sqlite.bz2$", "comps.xml$", "comps.xml.gz$", ], additional_files_allowed=False) def test_07_createrepo_xz(self): """--xz and --groupfile""" res = self.assert_run_cr(self.indir, "--xz --groupfile %(groupfile)s" % { 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{64}-primary.xml.gz$", "[a-z0-9]{64}-filelists.xml.gz$", "[a-z0-9]{64}-other.xml.gz$", "[a-z0-9]{64}-primary.sqlite.xz$", "[a-z0-9]{64}-filelists.sqlite.xz$", "[a-z0-9]{64}-other.sqlite.xz$", "[a-z0-9]{64}-comps.xml$", "[a-z0-9]{64}-comps.xml.xz$", ], additional_files_allowed=False) def test_08_createrepo_compresstype_bz2(self): """--compress-type bz2""" res = self.assert_run_cr(self.indir, "--compress-type bz2", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{64}-primary.xml.gz$", "[a-z0-9]{64}-filelists.xml.gz$", "[a-z0-9]{64}-other.xml.gz$", "[a-z0-9]{64}-primary.sqlite.bz2$", "[a-z0-9]{64}-filelists.sqlite.bz2$", "[a-z0-9]{64}-other.sqlite.bz2$", ], additional_files_allowed=False) def test_09_createrepo_compresstype_gz(self): """--compress-type bz2""" res = self.assert_run_cr(self.indir, "--compress-type gz", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{64}-primary.xml.gz$", "[a-z0-9]{64}-filelists.xml.gz$", "[a-z0-9]{64}-other.xml.gz$", "[a-z0-9]{64}-primary.sqlite.gz$", "[a-z0-9]{64}-filelists.sqlite.gz$", "[a-z0-9]{64}-other.sqlite.gz$", ], additional_files_allowed=False) def test_10_createrepo_compresstype_xz(self): """--compress-type bz2""" res = self.assert_run_cr(self.indir, "--compress-type xz", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{64}-primary.xml.gz$", "[a-z0-9]{64}-filelists.xml.gz$", "[a-z0-9]{64}-other.xml.gz$", "[a-z0-9]{64}-primary.sqlite.xz$", "[a-z0-9]{64}-filelists.sqlite.xz$", "[a-z0-9]{64}-other.sqlite.xz$", ], additional_files_allowed=False) def test_11_createrepo_repomd_checksum(self): """--checksum sha and --groupfile""" res = self.assert_run_cr(self.indir, "--repomd-checksum %(checksum)s --groupfile %(groupfile)s" % { 'checksum': "sha1", 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{40}-primary.xml.gz$", "[a-z0-9]{40}-filelists.xml.gz$", "[a-z0-9]{40}-other.xml.gz$", "[a-z0-9]{40}-primary.sqlite.bz2$", "[a-z0-9]{40}-filelists.sqlite.bz2$", "[a-z0-9]{40}-other.sqlite.bz2$", "[a-z0-9]{40}-comps.xml$", "[a-z0-9]{40}-comps.xml.gz$", ], additional_files_allowed=False) def test_12_createrepo_repomd_checksum(self): """--checksum sha and --groupfile""" res = self.assert_run_cr(self.indir, "--checksum md5 --repomd-checksum %(checksum)s --groupfile %(groupfile)s" % { 'checksum': "sha1", 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]{40}-primary.xml.gz$", "[a-z0-9]{40}-filelists.xml.gz$", "[a-z0-9]{40}-other.xml.gz$", "[a-z0-9]{40}-primary.sqlite.bz2$", "[a-z0-9]{40}-filelists.sqlite.bz2$", "[a-z0-9]{40}-other.sqlite.bz2$", "[a-z0-9]{40}-comps.xml$", "[a-z0-9]{40}-comps.xml.gz$", ], additional_files_allowed=False) def test_13_createrepo_general_compress_type(self): """--checksum sha and --groupfile""" res = self.assert_run_cr(self.indir, "--general-compress-type %(compress_type)s --groupfile %(groupfile)s" % { 'compress_type': "xz", 'groupfile': self.fn_comps }, c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.xz$", "[a-z0-9]+-filelists.xml.xz$", "[a-z0-9]+-other.xml.xz$", "[a-z0-9]+-primary.sqlite.xz$", "[a-z0-9]+-filelists.sqlite.xz$", "[a-z0-9]+-other.sqlite.xz$", "[a-z0-9]+-comps.xml$", "[a-z0-9]+-comps.xml.xz$", ], additional_files_allowed=False) class TestCaseCreaterepo_packagedirstructure(BaseTestCase): """Use case with various directory structure of packages""" def test_01_createrepo_check_directories_recursively(self): self.indir_makedirs("a") self.indir_makedirs("a/b") self.indir_makedirs("a/b/c") self.copy_pkg(PACKAGES[0], self.indir + "/a") self.copy_pkg(PACKAGES[1], self.indir + "/a/b") self.copy_pkg(PACKAGES[2], self.indir + "/a/b/c") res = self.assert_run_cr(self.indir, "", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.gz$", "[a-z0-9]+-filelists.xml.gz$", "[a-z0-9]+-other.xml.gz$", "[a-z0-9]+-primary.sqlite.bz2$", "[a-z0-9]+-filelists.sqlite.bz2$", "[a-z0-9]+-other.sqlite.bz2$", ], additional_files_allowed=False) primary_path = glob.glob(os.path.join(res.outdir, "repodata/*-primary.xml.gz"))[0] self.assert_file_contains_line(primary_path, "Archer") self.assert_file_contains_line(primary_path, "super_kernel") self.assert_file_contains_line(primary_path, "fake_bash") def test_02_createrepo_check_directories_recursively_with_rpm_suffix(self): self.indir_makedirs("a") self.indir_makedirs('a/b.rpm') self.indir_makedirs("a/b.rpm/c") self.copy_pkg(PACKAGES[0], self.indir + "/a") self.copy_pkg(PACKAGES[1], self.indir + "/a/b.rpm") self.copy_pkg(PACKAGES[2], self.indir + "/a/b.rpm/c") res = self.assert_run_cr(self.indir, "", c=True) self.assert_repo_sanity(res.outdir) self.assert_repo_files(res.outdir, ["repomd.xml$", "[a-z0-9]+-primary.xml.gz$", "[a-z0-9]+-filelists.xml.gz$", "[a-z0-9]+-other.xml.gz$", "[a-z0-9]+-primary.sqlite.bz2$", "[a-z0-9]+-filelists.sqlite.bz2$", "[a-z0-9]+-other.sqlite.bz2$", ], additional_files_allowed=False) primary_path = glob.glob(os.path.join(res.outdir, "repodata/*-primary.xml.gz"))[0] self.assert_file_contains_line(primary_path, "Archer") self.assert_file_contains_line(primary_path, "super_kernel") self.assert_file_contains_line(primary_path, "fake_bash") createrepo_c-0.17.0/acceptance_tests/tests/test_createrepo_comparative.py000066400000000000000000000167051400672373200270550ustar00rootroot00000000000000import os import os.path from .fixtures import PACKAGES from .base import BaseTestCase class TestCaseCreaterepoComparative_emptyrepo(BaseTestCase): """Empty input repository""" def test_01_createrepo(self): """Repo from empty directory""" self.assert_same_results(self.indir) def test_02_createrepo_relativepath(self): """Repo from empty directory - specified by relative path""" self.assert_same_results(os.path.relpath(self.indir)) def test_03_createrepo_distrotag(self): """--distro""" self.assert_same_results(self.indir, "--distro DISTRO-TAG") def test_04_createrepo_distrotag(self): """--distro""" self.assert_same_results(self.indir, "--distro CPEID,Footag") def test_05_createrepo_distrotag(self): """--distro""" self.assert_same_results(self.indir, "--distro cpeid,tag_a --distro tag_b") def test_06_createrepo_contenttag(self): """--content""" self.assert_same_results(self.indir, "--content contenttag_a") def test_07_createrepo_contenttag(self): """--content""" self.assert_same_results(self.indir, "--content contenttag_a --content contettag_b") def test_08_createrepo_repotag(self): """--repo""" self.assert_same_results(self.indir, "--repo repotag_a") def test_09_createrepo_repotag(self): """--repo""" self.assert_same_results(self.indir, "--repo repotag_a --repo repotag_b") def test_10_createrepo_nodatabase(self): """--no-database""" self.assert_same_results(self.indir, "--no-database") def test_11_createrepo_uniquemdfilenames(self): """--unique-md-filenames""" self.assert_same_results(self.indir, "--unique-md-filenames") def test_12_createrepo_simplemdfilenames(self): """--simple-md-filenames""" self.assert_same_results(self.indir, "--simple-md-filenames") def test_13_createrepo_revision(self): """--revision""" self.assert_same_results(self.indir, "--revision XYZ") def test_14_createrepo_skipsymlinks(self): """--skip-symlinks""" self.assert_same_results(self.indir, "--skip-symlinks") class TestCaseCreaterepoComparative_regularrepo(BaseTestCase): """Repo with 3 packages""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_addpkg(PACKAGES[1]) self.indir_addpkg(PACKAGES[2]) def test_01_createrepo(self): """Regular createrepo""" self.assert_same_results(self.indir) def test_02_createrepo_relativepath(self): """Regular createrepo""" self.assert_same_results(os.path.relpath(self.indir)) def test_03_createrepo_excludes(self): """--excludes * param""" self.assert_same_results(self.indir, "--excludes '*'") def test_04_createrepo_excludes(self): """--excludes""" self.assert_same_results(self.indir, "--excludes 'Archer-3.4.5-6.x86_64.rpm'") def test_05_createrepo_excludes(self): """--excludes""" self.assert_same_results(self.indir, "--excludes 'Archer-*.rpm'") def test_06_createrepo_skipsymlinks(self): """--skip-symlinks""" self.assert_same_results(self.indir, "--skip-symlinks") def test_07_createrepo_pkglist(self): """--pkglist""" fn_pkglist = self.indir_mkfile("pkglist", "%s\n" % PACKAGES[0]) self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist) def test_08_createrepo_pkglist(self): """--pkglist""" fn_pkglist = self.indir_mkfile("pkglist", "%s\n%s\n" % (PACKAGES[0], PACKAGES[1])) self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist) def test_09_createrepo_pkglist(self): """--pkglist""" fn_pkglist = self.indir_mkfile("pkglist", "%s\n\n%s\n\nfoobar.rpm\n\n" % (PACKAGES[0], PACKAGES[1])) self.assert_same_results(self.indir, "--pkglist %s" % fn_pkglist) class TestCaseCreaterepoComparative_regularrepowithsubdirs(BaseTestCase): """Repo with 3 packages, each in its own subdir""" def setup(self): subdir_a = self.indir_makedirs("a") self.copy_pkg(PACKAGES[0], subdir_a) subdir_b = self.indir_makedirs("b") self.copy_pkg(PACKAGES[1], subdir_b) subdir_c = self.indir_makedirs("c") self.copy_pkg(PACKAGES[2], subdir_c) def test_01_createrepo(self): """Regular createrepo""" self.assert_same_results(self.indir) def test_02_createrepo_skipsymlinks(self): """--skip-symlinks""" self.assert_same_results(self.indir, "--skip-symlinks") def test_03_createrepo_excludes(self): """--excludes""" self.assert_same_results(self.indir, "--excludes 'Archer-3.4.5-6.x86_64.rpm'") def test_04_createrepo_excludes(self): """--excludes""" self.assert_same_results(self.indir, "--excludes 'Archer-*.rpm'") class TestCaseCreaterepoComparative_repowithsymlinks(BaseTestCase): """Repo with 2 packages and 1 symlink to a package""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_addpkg(PACKAGES[1]) pkg_in_tdir = self.copy_pkg(PACKAGES[2], self.tdir) os.symlink(pkg_in_tdir, os.path.join(self.indir, os.path.basename(pkg_in_tdir))) def test_01_createrepo(self): """Regular createrepo""" self.assert_same_results(self.indir) def test_02_createrepo_skipsymlinks(self): """--skip-symlinks""" self.assert_same_results(self.indir, "--skip-symlinks") class TestCaseCreaterepoComparative_repowithbadpackages(BaseTestCase): """Repo with 1 regular package and few broken packages""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_makedirs("adirthatlookslike.rpm") self.indir_mkfile("emptyfilethatlookslike.rpm") self.indir_mkfile("afilethatlookslike.rpm", content="foobar") def test_01_createrepo(self): """Regular createrepo""" self.assert_same_results(self.indir) class TestCaseCreaterepoComparative_cachedir(BaseTestCase): """Repo with 3 packages and cachedir used""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_addpkg(PACKAGES[1]) self.indir_addpkg(PACKAGES[2]) def test_01_createrepo_owncachedir(self): """Each createrepo has its own cachedir""" # Gen cache self.assert_same_results(self.indir, "--cachedir cache") # Run again and use the cache _, crres, crcres = self.assert_same_results(self.indir, "--cachedir cache") # Compare files in the cache files (they should be identical) cr_cache = os.path.join(crres.outdir, "cache") crc_cache = os.path.join(crcres.outdir, "cache") self.assert_same_dir_content(cr_cache, crc_cache) def test_02_createrepo_sharedcachedir(self): """Use cache mutually""" cache_cr = os.path.abspath(os.path.join(self.indir, "cache_cr")) cache_crc = os.path.abspath(os.path.join(self.indir, "cache_crc")) # Gen cache by the cr then use it by cr_c self.assert_run_cr(self.indir, "--cachedir %s" % cache_cr) self.assert_run_cr(self.indir, "--cachedir %s" % cache_cr, c=True) # Gen cache by the cr then use it by cr_c self.assert_run_cr(self.indir, "--cachedir %s" % cache_crc, c=True) self.assert_run_cr(self.indir, "--cachedir %s" % cache_crc) # Compare files in the cache files (they should be identical) self.assert_same_dir_content(cache_cr, cache_crc) createrepo_c-0.17.0/acceptance_tests/tests/test_createrepo_update_comparative.py000066400000000000000000000071511400672373200304120ustar00rootroot00000000000000import os import os.path from .fixtures import PACKAGES from .base import BaseTestCase class TestCaseCreaterepoUpdateComparative_emptyrepo(BaseTestCase): """Empty input repository""" def test_01_createrepoupdate(self): """Repo from empty directory""" self.assert_same_results(self.indir) self.assert_same_results(self.indir, "--update") def test_02_createrepoupdate_double(self): """Repo from empty directory""" self.assert_same_results(self.indir) self.assert_same_results(self.indir, "--update") self.assert_same_results(self.indir, "--update") def test_03_createrepoupdate_relativepath(self): """Repo from empty directory - specified by relative path""" self.assert_same_results(os.path.relpath(self.indir)) self.assert_same_results(os.path.relpath(self.indir), "--update") def test_04_createrepoupdate_simplemdfilenames(self): """Repo from empty directory - specified by relative path""" self.assert_same_results(os.path.relpath(self.indir)) self.assert_same_results(os.path.relpath(self.indir), "--update --simple-md-filenames") class TestCaseCreaterepoUpdateComparative_regularrepo(BaseTestCase): """Repo with 3 packages""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_addpkg(PACKAGES[1]) self.indir_addpkg(PACKAGES[2]) def test_01_createrepoupdate(self): """Repo from empty directory""" self.assert_same_results(self.indir) self.assert_same_results(self.indir, "--update") def test_02_createrepoupdate_double(self): """Repo from empty directory""" self.assert_same_results(self.indir) self.assert_same_results(self.indir, "--update") self.assert_same_results(self.indir, "--update") def test_03_createrepoupdate_relativepath(self): """Repo from empty directory - specified by relative path""" self.assert_same_results(os.path.relpath(self.indir)) self.assert_same_results(os.path.relpath(self.indir), "--update") def test_04_createrepoupdate_simplemdfilenames(self): """Repo from empty directory - specified by relative path""" self.assert_same_results(os.path.relpath(self.indir)) self.assert_same_results(os.path.relpath(self.indir), "--update --simple-md-filenames") class TestCaseCreaterepoUpdateComparative_regularrepoandupdaterepo(BaseTestCase): """Repo with 3 packages and repo that will be used as --update-md-path""" def setup(self): self.indir_addpkg(PACKAGES[0]) self.indir_addpkg(PACKAGES[1]) self.indir_addpkg(PACKAGES[2]) res = self.assert_run_cr(self.indir) self.update_md_path = os.path.join(self.tdir, "update_md_path") os.rename(res.outdir, self.update_md_path) os.rename(res.logfile, os.path.join(self.tdir, "out_createrepo-update_md_path")) def test_01_createrepoupdate_updatemdpath(self): """Repo from empty directory""" self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path) def test_02_createrepoupdate_updatemdpathandupdate(self): """Repo from empty directory""" self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path) self.assert_same_results(self.indir, "--update") def test_03_createrepoupdate_updatemdpathdouble(self): """Repo from empty directory""" self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path) self.assert_same_results(self.indir, "--update --update-md-path %s" % self.update_md_path) createrepo_c-0.17.0/acceptance_tests/tests/test_sqliterepo.py000066400000000000000000000156641400672373200245240ustar00rootroot00000000000000import os import time import os.path from .fixtures import PACKAGES from .base import BaseTestCase BASE_XML_PATTERNS_SIMPLE = ["repomd\.xml", "primary\.xml\..+", "filelists\.xml\..+", "other\.xml\..+"] BASE_XML_PATTERNS_UNIQUE = ["repomd\.xml", ".*-primary\.xml\..+", ".*-filelists\.xml\..+", ".*-other\.xml\..+"] DBS_PATTERNS = [".*primary\.sqlite\..+", ".*filelists\.sqlite\..+", ".*other\.sqlite\..+"] DBS_PATTERNS_SIMPLE = ["primary\.sqlite\..+", "filelists\.sqlite\..+", "other\.sqlite\..+"] DBS_PATTERNS_UNIQUE = [".*-primary\.sqlite\..+", ".*-filelists\.sqlite\..+", ".*-other\.sqlite\..+"] DBS_PATTERNS_UNIQUE_MD5 = ["[0-9a-z]{32}-primary\.sqlite\..+", ".*-filelists\.sqlite\..+", ".*-other\.sqlite\..+"] DBS_PATTERNS_SIMPLE_GZ = ["primary\.sqlite\.gz", "filelists\.sqlite\.gz", "other\.sqlite\.gz"] DBS_PATTERNS_SIMPLE_BZ2 = ["primary\.sqlite\.bz2", "filelists\.sqlite\.bz2", "other\.sqlite\.bz2"] DBS_PATTERNS_SIMPLE_XZ = ["primary\.sqlite\.xz", "filelists\.sqlite\.xz", "other\.sqlite\.xz"] class TestCaseSqliterepo_badparams(BaseTestCase): """Use case with bad commandline arguments""" def setup(self): self.indir_addpkg(PACKAGES[0]) class TestCaseSqliterepo(BaseTestCase): """Base use cases""" def setup(self): self.indir_addpkg(PACKAGES[0]) def test_01_sqliterepo(self): """Sqlitedbs already exists, sqliterepo without --force should fail""" cr_res = self.run_cr(self.indir, c=True) sq_res = self.run_sqlr(cr_res.outdir) self.assertTrue(sq_res.rc) self.assertTrue("already has sqlitedb present" in sq_res.out) def test_02_sqliterepo(self): """Sqlitedbs should be created""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--no-database", c=True, outdir=outdir) self.assert_run_sqlr(outdir) # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS, additional_files_allowed=True) def test_03_sqliterepo(self): """Sqlitedbs with simple md filenames should be created""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--no-database --simple-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir) # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE, additional_files_allowed=True) def test_04_sqliterepo(self): """Sqlitedbs with unique md filenames should be created""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--no-database --unique-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir) # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS_UNIQUE, additional_files_allowed=True) def test_05_sqliterepo(self): """--xz is used (old bz2 DBs should be removed)""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir, args="--force --xz") # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_XZ+BASE_XML_PATTERNS_SIMPLE, additional_files_allowed=False) def test_06_sqliterepo(self): """DBs already exists but --force is used sqlitedbs should be created""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) old_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2")) self.assert_run_sqlr(outdir, args="--force") new_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2")) # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS, additional_files_allowed=True) # Check that DBs are newer than the old ones self.assertTrue(old_primary_ts < new_primary_ts) def test_07_sqliterepo(self): """--force and --keep-old and --xz are used (old bz2 DBs should be keeped)""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir, args="--force --keep-old --xz") # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_BZ2, additional_files_allowed=True) self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_XZ, additional_files_allowed=True) def test_08_sqliterepo(self): """--local-sqlite is used and old DBs exists (--force is also used)""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) old_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2")) self.assert_run_sqlr(outdir, args="--local-sqlite --force") new_primary_ts = os.path.getmtime(os.path.join(outdir, "repodata", "primary.sqlite.bz2")) # Check that DBs really exists expected_files = DBS_PATTERNS_SIMPLE_BZ2 + BASE_XML_PATTERNS_SIMPLE self.assert_repo_files(outdir, expected_files, additional_files_allowed=False) # Check that DBs are newer than the old ones self.assertTrue(old_primary_ts < new_primary_ts) def test_09_sqliterepo(self): """--local-sqlite is used with --xz and --force and --keep-old""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir, args="--local-sqlite --force --keep-old --xz") # Check that DBs really exists self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_BZ2, additional_files_allowed=True) self.assert_repo_files(outdir, DBS_PATTERNS_SIMPLE_XZ, additional_files_allowed=True) def test_10_sqliterepo(self): """--compress-type used""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --simple-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir, args="--compress-type gz --force") # Check that DBs really exists expected_files = DBS_PATTERNS_SIMPLE_GZ + BASE_XML_PATTERNS_SIMPLE self.assert_repo_files(outdir, expected_files, additional_files_allowed=False) def test_11_sqliterepo(self): """--checksum used""" outdir = self.tdir_makedirs("repository") self.assert_run_cr(self.indir, args="--database --unique-md-filenames", c=True, outdir=outdir) self.assert_run_sqlr(outdir, args="--checksum md5 --force") # Check that DBs really exists expected_files = DBS_PATTERNS_UNIQUE_MD5 + BASE_XML_PATTERNS_UNIQUE self.assert_repo_files(outdir, expected_files, additional_files_allowed=False)createrepo_c-0.17.0/cmake/000077500000000000000000000000001400672373200153165ustar00rootroot00000000000000createrepo_c-0.17.0/cmake/Modules/000077500000000000000000000000001400672373200167265ustar00rootroot00000000000000createrepo_c-0.17.0/cmake/Modules/FindGLIB2.cmake000066400000000000000000000072341400672373200213360ustar00rootroot00000000000000#.rst: # FindGLIB2 # --------- # # Try to locate the GLib2 library. # If found, this will define the following variables: # # ``GLIB2_FOUND`` # True if the GLib2 library is available # ``GLIB2_INCLUDE_DIRS`` # The GLib2 include directories # ``GLIB2_LIBRARIES`` # The GLib2 libraries for linking # ``GLIB2_INCLUDE_DIR`` # Deprecated, use ``GLIB2_INCLUDE_DIRS`` # ``GLIB2_LIBRARY`` # Deprecated, use ``GLIB2_LIBRARIES`` # # If ``GLIB2_FOUND`` is TRUE, it will also define the following # imported target: # # ``GLIB2::GLIB2`` # The GLIB2 library # # Since 5.41.0. #============================================================================= # Copyright (c) 2008 Laurent Montel, # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #============================================================================= find_package(PkgConfig) pkg_check_modules(PC_GLIB2 QUIET glib-2.0) find_path(GLIB2_INCLUDE_DIRS NAMES glib.h HINTS ${PC_GLIB2_INCLUDEDIR} PATH_SUFFIXES glib-2.0) find_library(GLIB2_LIBRARIES NAMES glib-2.0 HINTS ${PC_GLIB2_LIBDIR} ) # search the glibconfig.h include dir under the same root where the library is found get_filename_component(glib2LibDir "${GLIB2_LIBRARIES}" PATH) find_path(GLIB2_INTERNAL_INCLUDE_DIR glibconfig.h PATH_SUFFIXES glib-2.0/include HINTS ${PC_GLIB2_INCLUDEDIR} "${glib2LibDir}" ${CMAKE_SYSTEM_LIBRARY_PATH}) # not sure if this include dir is optional or required # for now it is optional if(GLIB2_INTERNAL_INCLUDE_DIR) list(APPEND GLIB2_INCLUDE_DIRS "${GLIB2_INTERNAL_INCLUDE_DIR}") endif() # Deprecated synonyms set(GLIB2_INCLUDE_DIR "${GLIB2_INCLUDE_DIRS}") set(GLIB2_LIBRARY "${GLIB2_LIBRARIES}") include(FindPackageHandleStandardArgs) find_package_handle_standard_args(GLIB2 DEFAULT_MSG GLIB2_LIBRARIES GLIB2_INCLUDE_DIRS) if(GLIB2_FOUND AND NOT TARGET GLIB2::GLIB2) add_library(GLIB2::GLIB2 UNKNOWN IMPORTED) set_target_properties(GLIB2::GLIB2 PROPERTIES IMPORTED_LOCATION "${GLIB2_LIBRARIES}" INTERFACE_INCLUDE_DIRECTORIES "${GLIB2_INCLUDE_DIRS}") endif() mark_as_advanced(GLIB2_INCLUDE_DIRS GLIB2_INCLUDE_DIR GLIB2_LIBRARIES GLIB2_LIBRARY) include(FeatureSummary) set_package_properties(GLIB2 PROPERTIES URL "https://wiki.gnome.org/Projects/GLib" DESCRIPTION "Event loop and utility library") createrepo_c-0.17.0/cmake/Modules/FindGTHREAD2.cmake000066400000000000000000000061151400672373200216740ustar00rootroot00000000000000#.rst: # FindGTHREAD2 # --------- # # Try to locate the GThread2 library. # If found, this will define the following variables: # # ``GTHREAD2_FOUND`` # True if the GThread2 library is available # ``GTHREAD2_INCLUDE_DIRS`` # The GThread2 include directories # ``GTHREAD2_LIBRARIES`` # The GThread2 libraries for linking # ``GTHREAD2_INCLUDE_DIR`` # Deprecated, use ``GTHREAD2_INCLUDE_DIRS`` # ``GTHREAD2_LIBRARY`` # Deprecated, use ``GTHREAD2_LIBRARIES`` # # If ``GTHREAD2_FOUND`` is TRUE, it will also define the following # imported target: # # ``GTHREAD2::GTHREAD2`` # The GTHREAD2 library #============================================================================= # Copyright (c) 2008 Laurent Montel, # Copyright (c) 2020 Dmitry Mikhirev, # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #============================================================================= find_package(PkgConfig) pkg_check_modules(PC_GTHREAD2 QUIET gthread-2.0) find_path(GTHREAD2_INCLUDE_DIRS NAMES gthread.h HINTS ${PC_GTHREAD2_INCLUDEDIR} PATH_SUFFIXES glib-2.0 glib-2.0/glib) find_library(GTHREAD2_LIBRARIES NAMES gthread-2.0 HINTS ${PC_GTHREAD2_LIBDIR} ) get_filename_component(gthread2LibDir "${GTHREAD2_LIBRARIES}" PATH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(GTHREAD2 DEFAULT_MSG GTHREAD2_LIBRARIES GTHREAD2_INCLUDE_DIRS) if(GTHREAD2_FOUND AND NOT TARGET GTHREAD2::GTHREAD2) add_library(GTHREAD2::GTHREAD2 UNKNOWN IMPORTED) set_target_properties(GTHREAD2::GTHREAD2 PROPERTIES IMPORTED_LOCATION "${GTHREAD2_LIBRARIES}" INTERFACE_INCLUDE_DIRECTORIES "${GTHREAD2_INCLUDE_DIRS}") endif() mark_as_advanced(GTHREAD2_INCLUDE_DIRS GTHREAD2_LIBRARIES) createrepo_c-0.17.0/cmake/Modules/FindLIBMODULEMD.cmake000066400000000000000000000026151400672373200222720ustar00rootroot00000000000000# Try to find libmodulemd 2.3+ # # LIBMODULEMD_FOUND - system has libmodulemd # LIBMODULEMD_INCLUDE_DIRS - the libmodulemd include directory # LIBMODULEMD_LIBRARIES - Link these to use libmodulemd # # Copyright (c) 2019 Stephen Gallagher # Redistribution and use is allowed according to the terms of the New # BSD license. IF ( LIBMODULEMD_LIBRARIES AND LIBMODULEMD_INCLUDE_DIRS ) # in cache already SET(LIBMODULEMD_FOUND TRUE) ELSE ( LIBMODULEMD_LIBRARIES AND LIBMODULEMD_INCLUDE_DIRS ) INCLUDE(FindPkgConfig) IF ( LIBMODULEMD_FIND_REQUIRED ) SET ( _pkgconfig_REQUIRED "REQUIRED" ) ELSE ( LIBMODULEMD_FIND_REQUIRED ) SET ( _pkgconfig_REQUIRED "" ) endif ( LIBMODULEMD_FIND_REQUIRED ) PKG_SEARCH_MODULE ( LIBMODULEMD ${_pkgconfig_REQUIRED} modulemd-2.0>=2.3 ) IF (LIBMODULEMD_FOUND) IF (NOT LIBMODULEMD_FIND_QUIETLY) MESSAGE (STATUS "Found libmodulemd: ${LIBMODULEMD_LIBRARIES} ${LIBMODULEMD_INCLUDE_DIRS}") ENDIF (NOT LIBMODULEMD_FIND_QUIETLY) ELSE (LIBMODULEMD_FOUND) IF (LIBMODULEMD_FIND_REQUIRED) MESSAGE (SEND_ERROR "Could not find libmodulemd") ENDIF (LIBMODULEMD_FIND_REQUIRED) ENDIF (LIBMODULEMD_FOUND) # show the LIBMODULEMD_INCLUDE_DIRS and LIBMODULEMD_LIBRARIES variables only in the advanced view MARK_AS_ADVANCED(LIBMODULEMD_INCLUDE_DIRS LIBMODULEMD_LIBRARIES) ENDIF ( LIBMODULEMD_LIBRARIES AND LIBMODULEMD_INCLUDE_DIRS ) createrepo_c-0.17.0/cmake/Modules/FindLZMA.cmake000066400000000000000000000031501400672373200212730ustar00rootroot00000000000000# - Find lzma and lzmadec # Find the native LZMA includes and library # # LZMA_INCLUDE_DIR - where to find lzma.h, etc. # LZMA_LIBRARIES - List of libraries when using liblzma. # LZMA_FOUND - True if liblzma found. # LZMADEC_INCLUDE_DIR - where to find lzmadec.h, etc. # LZMADEC_LIBRARIES - List of libraries when using liblzmadec. # LZMADEC_FOUND - True if liblzmadec found. IF (LZMA_INCLUDE_DIR) # Already in cache, be silent SET(LZMA_FIND_QUIETLY TRUE) ENDIF (LZMA_INCLUDE_DIR) FIND_PATH(LZMA_INCLUDE_DIR lzma.h) FIND_LIBRARY(LZMA_LIBRARY NAMES lzma ) # handle the QUIETLY and REQUIRED arguments and set LZMA_FOUND to TRUE if # all listed variables are TRUE INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(LZMA DEFAULT_MSG LZMA_LIBRARY LZMA_INCLUDE_DIR) IF(LZMA_FOUND) SET( LZMA_LIBRARIES ${LZMA_LIBRARY} ) ELSE(LZMA_FOUND) SET( LZMA_LIBRARIES ) IF (LZMADEC_INCLUDE_DIR) # Already in cache, be silent SET(LZMADEC_FIND_QUIETLY TRUE) ENDIF (LZMADEC_INCLUDE_DIR) FIND_PATH(LZMADEC_INCLUDE_DIR lzmadec.h) FIND_LIBRARY(LZMADEC_LIBRARY NAMES lzmadec ) # handle the QUIETLY and REQUIRED arguments and set LZMADEC_FOUND to TRUE if # all listed variables are TRUE INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(LZMADEC DEFAULT_MSG LZMADEC_LIBRARY LZMADEC_INCLUDE_DIR) IF(LZMADEC_FOUND) SET( LZMADEC_LIBRARIES ${LZMADEC_LIBRARY} ) ELSE(LZMADEC_FOUND) SET( LZMADEC_LIBRARIES ) ENDIF(LZMADEC_FOUND) ENDIF(LZMA_FOUND) MARK_AS_ADVANCED( LZMA_LIBRARY LZMA_INCLUDE_DIR LZMADEC_LIBRARY LZMADEC_INCLUDE_DIR ) createrepo_c-0.17.0/cmake/Modules/FindLibmagic.cmake000066400000000000000000000012501400672373200222360ustar00rootroot00000000000000# # Find libmagic.so (part of the 'file' package) # (C) 2009 by Lorenzo Villani. Licensed under LGPL license. # include(LibFindMacros) # Include dir find_path(Libmagic_INCLUDE_DIR NAMES magic.h PATHS ${CMAKE_INCLUDE_PATH} ) # Finally the library itself find_library(Libmagic_LIBRARY NAMES magic PATHS ${CMAKE_LIBRARY_PATH} ) # Set the include dir variables and the libraries and let libfind_process do the rest. # NOTE: Singular variables for this library, plural for libraries this this lib depends on. set(Libmagic_PROCESS_INCLUDES Libmagic_INCLUDE_DIR Libmagic_INCLUDE_DIRS) set(Libmagic_PROCESS_LIBS Libmagic_LIBRARY Libmagic_LIBRARIES) libfind_process(Libmagic) createrepo_c-0.17.0/cmake/Modules/FindSqlite3.cmake000066400000000000000000000047611400672373200220650ustar00rootroot00000000000000# - find Sqlite 3 # SQLITE3_INCLUDE_DIR - Where to find Sqlite 3 header files (directory) # SQLITE3_LIBRARIES - Sqlite 3 libraries # SQLITE3_LIBRARY_RELEASE - Where the release library is # SQLITE3_LIBRARY_DEBUG - Where the debug library is # SQLITE3_FOUND - Set to TRUE if we found everything (library, includes and executable) # Copyright (c) 2010 Pau Garcia i Quiles, # # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. # # Generated by CModuler, a CMake Module Generator - http://gitorious.org/cmoduler IF( SQLITE3_INCLUDE_DIR AND SQLITE3_LIBRARY_RELEASE AND SQLITE3_LIBRARY_DEBUG ) SET(SQLITE3_FIND_QUIETLY TRUE) ENDIF( SQLITE3_INCLUDE_DIR AND SQLITE3_LIBRARY_RELEASE AND SQLITE3_LIBRARY_DEBUG ) FIND_PATH( SQLITE3_INCLUDE_DIR sqlite3.h ) FIND_LIBRARY(SQLITE3_LIBRARY_RELEASE NAMES sqlite3 ) FIND_LIBRARY(SQLITE3_LIBRARY_DEBUG NAMES sqlite3 sqlite3d HINTS /usr/lib/debug/usr/lib/ ) IF( SQLITE3_LIBRARY_RELEASE OR SQLITE3_LIBRARY_DEBUG AND SQLITE3_INCLUDE_DIR ) SET( SQLITE3_FOUND TRUE ) ENDIF( SQLITE3_LIBRARY_RELEASE OR SQLITE3_LIBRARY_DEBUG AND SQLITE3_INCLUDE_DIR ) IF( SQLITE3_LIBRARY_DEBUG AND SQLITE3_LIBRARY_RELEASE ) # if the generator supports configuration types then set # optimized and debug libraries, or if the CMAKE_BUILD_TYPE has a value IF( CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE ) SET( SQLITE3_LIBRARIES optimized ${SQLITE3_LIBRARY_RELEASE} debug ${SQLITE3_LIBRARY_DEBUG} ) ELSE( CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE ) # if there are no configuration types and CMAKE_BUILD_TYPE has no value # then just use the release libraries SET( SQLITE3_LIBRARIES ${SQLITE3_LIBRARY_RELEASE} ) ENDIF( CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE ) ELSEIF( SQLITE3_LIBRARY_RELEASE ) SET( SQLITE3_LIBRARIES ${SQLITE3_LIBRARY_RELEASE} ) ELSE( SQLITE3_LIBRARY_DEBUG AND SQLITE3_LIBRARY_RELEASE ) SET( SQLITE3_LIBRARIES ${SQLITE3_LIBRARY_DEBUG} ) ENDIF( SQLITE3_LIBRARY_DEBUG AND SQLITE3_LIBRARY_RELEASE ) IF( SQLITE3_FOUND ) IF( NOT SQLITE3_FIND_QUIETLY ) MESSAGE( STATUS "Found Sqlite3 header file in ${SQLITE3_INCLUDE_DIR}") MESSAGE( STATUS "Found Sqlite3 libraries: ${SQLITE3_LIBRARIES}") ENDIF( NOT SQLITE3_FIND_QUIETLY ) ELSE(SQLITE3_FOUND) IF( SQLITE3_FIND_REQUIRED) MESSAGE( FATAL_ERROR "Could not find Sqlite3" ) ELSE( SQLITE3_FIND_REQUIRED) MESSAGE( STATUS "Optional package Sqlite3 was not found" ) ENDIF( SQLITE3_FIND_REQUIRED) ENDIF(SQLITE3_FOUND) createrepo_c-0.17.0/cmake/Modules/LibFindMacros.cmake000066400000000000000000000100471400672373200224060ustar00rootroot00000000000000# Works the same as find_package, but forwards the "REQUIRED" and "QUIET" arguments # used for the current package. For this to work, the first parameter must be the # prefix of the current package, then the prefix of the new package etc, which are # passed to find_package. macro (libfind_package PREFIX) set (LIBFIND_PACKAGE_ARGS ${ARGN}) if (${PREFIX}_FIND_QUIETLY) set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} QUIET) endif (${PREFIX}_FIND_QUIETLY) if (${PREFIX}_FIND_REQUIRED) set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} REQUIRED) endif (${PREFIX}_FIND_REQUIRED) find_package(${LIBFIND_PACKAGE_ARGS}) endmacro (libfind_package) # Damn CMake developers made the UsePkgConfig system deprecated in the same release (2.6) # where they added pkg_check_modules. Consequently I need to support both in my scripts # to avoid those deprecated warnings. Here's a helper that does just that. # Works identically to pkg_check_modules, except that no checks are needed prior to use. macro (libfind_pkg_check_modules PREFIX PKGNAME) if (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) include(UsePkgConfig) pkgconfig(${PKGNAME} ${PREFIX}_INCLUDE_DIRS ${PREFIX}_LIBRARY_DIRS ${PREFIX}_LDFLAGS ${PREFIX}_CFLAGS) else (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) find_package(PkgConfig) if (PKG_CONFIG_FOUND) pkg_check_modules(${PREFIX} ${PKGNAME}) endif (PKG_CONFIG_FOUND) endif (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) endmacro (libfind_pkg_check_modules) # Do the final processing once the paths have been detected. # If include dirs are needed, ${PREFIX}_PROCESS_INCLUDES should be set to contain # all the variables, each of which contain one include directory. # Ditto for ${PREFIX}_PROCESS_LIBS and library files. # Will set ${PREFIX}_FOUND, ${PREFIX}_INCLUDE_DIRS and ${PREFIX}_LIBRARIES. # Also handles errors in case library detection was required, etc. macro (libfind_process PREFIX) # Skip processing if already processed during this run if (NOT ${PREFIX}_FOUND) # Start with the assumption that the library was found set (${PREFIX}_FOUND TRUE) # Process all includes and set _FOUND to false if any are missing foreach (i ${${PREFIX}_PROCESS_INCLUDES}) if (${i}) set (${PREFIX}_INCLUDE_DIRS ${${PREFIX}_INCLUDE_DIRS} ${${i}}) mark_as_advanced(${i}) else (${i}) set (${PREFIX}_FOUND FALSE) endif (${i}) endforeach (i) # Process all libraries and set _FOUND to false if any are missing foreach (i ${${PREFIX}_PROCESS_LIBS}) if (${i}) set (${PREFIX}_LIBRARIES ${${PREFIX}_LIBRARIES} ${${i}}) mark_as_advanced(${i}) else (${i}) set (${PREFIX}_FOUND FALSE) endif (${i}) endforeach (i) # Print message and/or exit on fatal error if (${PREFIX}_FOUND) if (NOT ${PREFIX}_FIND_QUIETLY) message (STATUS "Found ${PREFIX} ${${PREFIX}_VERSION}") endif (NOT ${PREFIX}_FIND_QUIETLY) else (${PREFIX}_FOUND) if (${PREFIX}_FIND_REQUIRED) foreach (i ${${PREFIX}_PROCESS_INCLUDES} ${${PREFIX}_PROCESS_LIBS}) message("${i}=${${i}}") endforeach (i) message (FATAL_ERROR "Required library ${PREFIX} NOT FOUND.\nInstall the library (dev version) and try again. If the library is already installed, use ccmake to set the missing variables manually.") endif (${PREFIX}_FIND_REQUIRED) endif (${PREFIX}_FOUND) endif (NOT ${PREFIX}_FOUND) endmacro (libfind_process) macro(libfind_library PREFIX basename) set(TMP "") if(MSVC80) set(TMP -vc80) endif(MSVC80) if(MSVC90) set(TMP -vc90) endif(MSVC90) set(${PREFIX}_LIBNAMES ${basename}${TMP}) if(${ARGC} GREATER 2) set(${PREFIX}_LIBNAMES ${basename}${TMP}-${ARGV2}) string(REGEX REPLACE "\\." "_" TMP ${${PREFIX}_LIBNAMES}) set(${PREFIX}_LIBNAMES ${${PREFIX}_LIBNAMES} ${TMP}) endif(${ARGC} GREATER 2) find_library(${PREFIX}_LIBRARY NAMES ${${PREFIX}_LIBNAMES} PATHS ${${PREFIX}_PKGCONF_LIBRARY_DIRS} ) endmacro(libfind_library) createrepo_c-0.17.0/createrepo_c.bash000066400000000000000000000122421400672373200175310ustar00rootroot00000000000000# bash completion for createrepo and friends _cr_compress_type() { COMPREPLY=( $( compgen -W "bz2 gz xz" -- "$2" ) ) } _cr_checksum_type() { COMPREPLY=( $( compgen -W "md5 sha sha1 sha224 sha256 sha384 sha512" -- "$2" ) ) } _cr_createrepo() { COMPREPLY=() case $3 in -V|--version|-h|--help) return 0 ;; --update-md-path|-o|--outputdir|--oldpackagedirs) COMPREPLY=( $( compgen -d -- "$2" ) ) return 0 ;; -g|--groupfile) COMPREPLY=( $( compgen -f -o plusdirs -X '!*.xml' -- "$2" ) ) return 0 ;; -s|--checksum) _cr_checksum_type "$1" "$2" return 0 ;; -i|--pkglist|--read-pkgs-list) COMPREPLY=( $( compgen -f -o plusdirs -- "$2" ) ) return 0 ;; -n|--includepkg) COMPREPLY=( $( compgen -f -o plusdirs -X '!*.rpm' -- "$2" ) ) return 0 ;; # --retain-old-md) # COMPREPLY=( $( compgen -W '0 1 2 3 4 5 6 7 8 9' -- "$2" ) ) # return 0 # ;; # --num-deltas|--max-delta-rpm-size) # COMPREPLY=( $( compgen -W '1 2 3 4 5 6 7 8 9' -- "$2" ) ) # return 0 # ;; --workers) local min=2 max=$( getconf _NPROCESSORS_ONLN 2>/dev/null ) [[ -z $max || $max -lt $min ]] && max=$min COMPREPLY=( $( compgen -W "{1..$max}" -- "$2" ) ) return 0 ;; --compress-type) _cr_compress_type "$1" "$2" return 0 ;; esac if [[ $2 == -* ]] ; then COMPREPLY=( $( compgen -W '--help --version --quiet --verbose --excludes --basedir --baseurl --groupfile --checksum --pretty --database --no-database --update --update-md-path --skip-stat --pkglist --includepkg --outputdir --skip-symlinks --changelog-limit --unique-md-filenames --simple-md-filenames --retain-old-md --distro --content --repo --revision --read-pkgs-list --workers --xz --compress-type --keep-all-metadata --compatibility --retain-old-md-by-age --cachedir --local-sqlite --cut-dirs --location-prefix --deltas --oldpackagedirs --num-deltas --max-delta-rpm-size --recycle-pkglist' -- "$2" ) ) else COMPREPLY=( $( compgen -d -- "$2" ) ) fi } && complete -F _cr_createrepo -o filenames createrepo_c _cr_mergerepo() { COMPREPLY=() case $3 in --version|-h|--help) return 0 ;; -g|--groupfile|--blocked) COMPREPLY=( $( compgen -f -o plusdirs -- "$2" ) ) return 0 ;; -r|--repo|-o|--outputdir|--noarch-repo) COMPREPLY=( $( compgen -d -- "$2" ) ) return 0 ;; --compress-type) _cr_compress_type "" "$2" return 0 ;; --method) COMPREPLY=( $( compgen -W "repo ts nvr" -- "$2" ) ) return 0 ;; esac if [[ $2 == -* ]] ; then COMPREPLY=( $( compgen -W '--version --help --repo --archlist --database --no-database --verbose --outputdir --nogroups --noupdateinfo --compress-type --method --all --noarch-repo --unique-md-filenames --simple-md-filenames --omit-baseurl --koji --groupfile --blocked' -- "$2" ) ) else COMPREPLY=( $( compgen -d -- "$2" ) ) fi } && complete -F _cr_mergerepo -o filenames mergerepo_c _cr_modifyrepo() { COMPREPLY=() case $3 in --version|-h|--help) return 0 ;; -f|--batchfile) COMPREPLY=( $( compgen -f -o plusdirs -- "$2" ) ) return 0 ;; --compress-type) _cr_compress_type "" "$2" return 0 ;; -s|--checksum) _cr_checksum_type "$1" "$2" return 0 ;; esac if [[ $2 == -* ]] ; then COMPREPLY=( $( compgen -W '--version --help --mdtype --remove --compress --no-compress --compress-type --checksum --unique-md-filenames --simple-md-filenames --verbose --batchfile --new-name' -- "$2" ) ) else COMPREPLY=( $( compgen -f -- "$2" ) ) fi } && complete -F _cr_modifyrepo -o filenames modifyrepo_c _cr_sqliterepo() { COMPREPLY=() case $3 in -h|--help|-V|--version) return 0 ;; --compress-type) _cr_compress_type "" "$2" return 0 ;; -s|--checksum) _cr_checksum_type "$1" "$2" return 0 ;; esac if [[ $2 == -* ]] ; then COMPREPLY=( $( compgen -W '--help --version --quiet --verbose --force --keep-old --xz --compress-type --checksum --local-sqlite ' -- "$2" ) ) else COMPREPLY=( $( compgen -f -- "$2" ) ) fi } && complete -F _cr_sqliterepo -o filenames sqliterepo_c # Local variables: # mode: shell-script # sh-basic-offset: 4 # sh-indent-comment: t # indent-tabs-mode: nil # End: # ex: ts=4 sw=4 et filetype=sh createrepo_c-0.17.0/createrepo_c.spec000066400000000000000000000104661400672373200175540ustar00rootroot00000000000000%global libmodulemd_version 2.3.0 %define __cmake_in_source_build 1 %global bash_completion %{_datadir}/bash-completion/completions/* %if 0%{?rhel} && ( 0%{?rhel} <= 7 || 0%{?rhel} >= 9 ) %bcond_with drpm %else %bcond_without drpm %endif %if 0%{?rhel} || 0%{?fedora} < 29 %bcond_with zchunk %else %bcond_without zchunk %endif %if 0%{?rhel} || 0%{?fedora} < 29 %bcond_with libmodulemd %else %bcond_without libmodulemd %endif Summary: Creates a common metadata repository Name: createrepo_c Version: 0.17.0 Release: 1%{?dist} License: GPLv2+ URL: https://github.com/rpm-software-management/createrepo_c Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz BuildRequires: cmake BuildRequires: gcc BuildRequires: bzip2-devel BuildRequires: doxygen BuildRequires: file-devel BuildRequires: glib2-devel >= 2.22.0 BuildRequires: libcurl-devel BuildRequires: libxml2-devel BuildRequires: openssl-devel BuildRequires: rpm-devel >= 4.8.0-28 BuildRequires: sqlite-devel BuildRequires: xz-devel BuildRequires: zlib-devel %if %{with zchunk} BuildRequires: pkgconfig(zck) >= 0.9.11 BuildRequires: zchunk %endif %if %{with libmodulemd} BuildRequires: pkgconfig(modulemd-2.0) >= %{libmodulemd_version} BuildRequires: libmodulemd Requires: libmodulemd%{?_isa} >= %{libmodulemd_version} %endif Requires: %{name}-libs = %{version}-%{release} BuildRequires: bash-completion Requires: rpm >= 4.9.0 %if %{with drpm} BuildRequires: drpm-devel >= 0.4.0 %endif %if 0%{?fedora} || 0%{?rhel} > 7 Obsoletes: createrepo < 0.11.0 Provides: createrepo = %{version}-%{release} %endif %description C implementation of Createrepo. A set of utilities (createrepo_c, mergerepo_c, modifyrepo_c) for generating a common metadata repository from a directory of rpm packages and maintaining it. %package libs Summary: Library for repodata manipulation %description libs Libraries for applications using the createrepo_c library for easy manipulation with a repodata. %package devel Summary: Library for repodata manipulation Requires: %{name}-libs%{?_isa} = %{version}-%{release} %description devel This package contains the createrepo_c C library and header files. These development files are for easy manipulation with a repodata. %package -n python3-%{name} Summary: Python 3 bindings for the createrepo_c library %{?python_provide:%python_provide python3-%{name}} BuildRequires: python3-devel BuildRequires: python3-nose BuildRequires: python3-sphinx Requires: %{name}-libs = %{version}-%{release} %description -n python3-%{name} Python 3 bindings for the createrepo_c library. %prep %autosetup -p1 mkdir build-py3 %build # Build createrepo_c with Pyhon 3 pushd build-py3 %cmake .. \ -DWITH_ZCHUNK=%{?with_zchunk:ON}%{!?with_zchunk:OFF} \ -DWITH_LIBMODULEMD=%{?with_libmodulemd:ON}%{!?with_libmodulemd:OFF} \ -DENABLE_DRPM=%{?with_drpm:ON}%{!?with_drpm:OFF} make %{?_smp_mflags} RPM_OPT_FLAGS="%{optflags}" # Build C documentation make doc-c popd %check # Run Python 3 tests pushd build-py3 # Compile C tests make tests # Run Python 3 tests make ARGS="-V" test popd %install pushd build-py3 # Install createrepo_c with Python 3 make install DESTDIR=%{buildroot} popd %if 0%{?fedora} || 0%{?rhel} > 7 ln -sr %{buildroot}%{_bindir}/createrepo_c %{buildroot}%{_bindir}/createrepo ln -sr %{buildroot}%{_bindir}/mergerepo_c %{buildroot}%{_bindir}/mergerepo ln -sr %{buildroot}%{_bindir}/modifyrepo_c %{buildroot}%{_bindir}/modifyrepo %endif %if 0%{?rhel} && 0%{?rhel} <= 7 %post libs -p /sbin/ldconfig %postun libs -p /sbin/ldconfig %else %ldconfig_scriptlets libs %endif %files %doc README.md %{_mandir}/man8/createrepo_c.8* %{_mandir}/man8/mergerepo_c.8* %{_mandir}/man8/modifyrepo_c.8* %{_mandir}/man8/sqliterepo_c.8* %{bash_completion} %{_bindir}/createrepo_c %{_bindir}/mergerepo_c %{_bindir}/modifyrepo_c %{_bindir}/sqliterepo_c %if 0%{?fedora} || 0%{?rhel} > 7 %{_bindir}/createrepo %{_bindir}/mergerepo %{_bindir}/modifyrepo %endif %files libs %license COPYING %{_libdir}/lib%{name}.so.* %files devel %doc build-py3/doc/html %{_libdir}/lib%{name}.so %{_libdir}/pkgconfig/%{name}.pc %{_includedir}/%{name}/ %files -n python3-%{name} %{python3_sitearch}/%{name}/ %{python3_sitearch}/%{name}-%{version}-py%{python3_version}.egg-info %changelog createrepo_c-0.17.0/doc/000077500000000000000000000000001400672373200150035ustar00rootroot00000000000000createrepo_c-0.17.0/doc/CMakeLists.txt000066400000000000000000000011751400672373200175470ustar00rootroot00000000000000ADD_SUBDIRECTORY (python) find_package(Doxygen) if(DOXYGEN_FOUND) CONFIGURE_FILE("Doxyfile.in.in" "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.in" @ONLY) add_custom_target(doc-c ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.in COMMENT "Building C API documentation with Doxygen" VERBATIM) endif(DOXYGEN_FOUND) IF(CREATEREPO_C_INSTALL_MANPAGES) INSTALL(FILES createrepo_c.8 mergerepo_c.8 modifyrepo_c.8 sqliterepo_c.8 DESTINATION "${CMAKE_INSTALL_MANDIR}/man8" COMPONENT bin) ENDIF(CREATEREPO_C_INSTALL_MANPAGES) ADD_CUSTOM_TARGET (doc) ADD_DEPENDENCIES (doc doc-python doc-c) createrepo_c-0.17.0/doc/Doxyfile.in.in000066400000000000000000002261731400672373200175360ustar00rootroot00000000000000# Doxyfile 1.8.1.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "createrepo_c library" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = @VERSION@ # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "C library for metadata manipulation" # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = YES # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @CMAKE_SOURCE_DIR@/src # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = src/createrepo_c.c \ src/mergerepo_c.c # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = *_wrap.* \ cmd_parser.* \ *_internal.h # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = YES # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES createrepo_c-0.17.0/doc/createrepo_c.8000066400000000000000000000204001400672373200175230ustar00rootroot00000000000000.\" Man page generated from reStructuredText. . .TH CREATEREPO_C 8 "2020-07-02" "" "" .SH NAME createrepo_c \- Create rpm-md format (xml-rpm-metadata) repository . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" -*- coding: utf-8 -*- . .SH SYNOPSIS .sp createrepo_c [options] .SH DESCRIPTION .sp Uses rpm packages from to create repodata. .sp If compiled with libmodulemd support modular metadata inside identified by the patterns below are automatically collected, merged and added to the repodata. .sp The patterns are: .INDENT 0.0 .INDENT 3.5 .INDENT 0.0 .IP \(bu 2 *.modulemd.yaml (recommended file name: N:S:V:C:A.modulemd.yaml) .IP \(bu 2 *.modulemd\-defaults.yaml (recommended file name: N.modulemd\-defaults.yaml) .IP \(bu 2 modules.yaml (recommended way of importing multiple documents at once) .UNINDENT .UNINDENT .UNINDENT .SH OPTIONS .SS \-V \-\-version .sp Show program\(aqs version number and exit. .SS \-q \-\-quiet .sp Run quietly. .SS \-v \-\-verbose .sp Run verbosely. .SS \-x \-\-excludes PACKAGE_NAME_GLOB .sp Path patterns to exclude, can be specified multiple times. The patterns are matched against relative paths to RPMs. Note that the feature uses \fBg\_pattern\_match()\fR function, not \fBglob()\fR, for semantic differences see https://developer.gnome.org/glib/stable/glib-Glob-style-pattern-matching.html .SS \-\-basedir BASEDIR .sp Basedir for path to directories. .SS \-u \-\-baseurl URL .sp Optional base URL location for all files. .SS \-g \-\-groupfile GROUPFILE .sp Path to groupfile to include in metadata. .SS \-s \-\-checksum CHECKSUM_TYPE .sp Choose the checksum type used in repomd.xml and for packages in the metadata. The default is now sha256. .SS \-p \-\-pretty .sp Make sure all xml generated is formatted (default) .SS \-d \-\-database .sp Generate sqlite databases for use with yum. .SS \-\-no\-database .sp Do not generate sqlite databases in the repository. .SS \-\-update .sp If metadata already exists in the outputdir and an rpm is unchanged (based on file size and mtime) since the metadata was generated, reuse the existing metadata rather than recalculating it. In the case of a large repository with only a few new or modified rpms this can significantly reduce I/O and processing time. .SS \-\-update\-md\-path .sp Existing metadata from this path are loaded and reused in addition to those present in the outputdir (works only with \-\-update). Can be specified multiple times. .SS \-\-skip\-stat .sp Skip the stat() call on a \-\-update, assumes if the filename is the same then the file is still the same (only use this if you\(aqre fairly trusting or gullible). .SS \-\-split .sp Run in split media mode. Rather than pass a single directory, take a set of directories corresponding to different volumes in a media set. Meta data is created in the first given directory .SS \-i \-\-pkglist FILENAME .sp Specify a text file which contains the complete list of files to include in the repository from the set found in the directory. File format is one package per line, no wildcards or globs. .SS \-n \-\-includepkg PACKAGE .sp Specify pkgs to include on the command line. Takes urls as well as local paths. .SS \-\-recycle\-pkglist .sp Useful only with \fB\-\-update\fR. Read the list of packages from old metadata, and reuse it instead of (perhaps expensive) directory traversal. This doesn't collide with explicitly selected packages by \fB\-\-pkglist\fR or \fB\-\-includepkg\fR, such packages are appended to the recycled list. This option is useful for I/O optimal repo modifications (package removal by \fB\-\-exclude\fR, and additions with \fB\-\-pkglist\fR). .SS \-o \-\-outputdir URL .sp Optional output directory. .SS \-S \-\-skip\-symlinks .sp Ignore symlinks of packages. .SS \-\-changelog\-limit NUM .sp Only import the last N changelog entries, from each rpm, into the metadata. .SS \-\-unique\-md\-filenames .sp Include the file\(aqs checksum in the metadata filename, helps HTTP caching (default). .SS \-\-simple\-md\-filenames .sp Do not include the file\(aqs checksum in the metadata filename. .SS \-\-retain\-old\-md NUM .sp Specify NUM to 0 to remove all repodata present in old repomd.xml or any other positive number to keep all old repodata. Use \-\-compatibility flag to get the behavior of original createrepo: Keep around the latest (by timestamp) NUM copies of the old repodata (works only for primary, filelists, other and their DB variants). .SS \-\-distro DISTRO .sp Distro tag and optional cpeid: \-\-distro\(aqcpeid,textname\(aq. .SS \-\-content CONTENT_TAGS .sp Tags for the content in the repository. .SS \-\-repo REPO_TAGS .sp Tags to describe the repository itself. .SS \-\-revision REVISION .sp User\-specified revision for this repository. .SS \-\-set\-timestamp\-to\-revision .sp Set timestamp fields in repomd.xml and last modification times of created repodata to a value given with \-\-revision. This requires \-\-revision to be a timestamp formatted in \(aqdate +%s\(aq format. .SS \-\-read\-pkgs\-list READ_PKGS_LIST .sp Output the paths to the pkgs actually read useful with \-\-update. .SS \-\-workers .sp Number of workers to spawn to read rpms. .SS \-\-xz .sp Use xz for repodata compression. .SS \-\-compress\-type COMPRESSION_TYPE .sp Which compression type to use. .SS \-\-general\-compress\-type COMPRESSION_TYPE .sp Which compression type to use (even for primary, filelists and other xml). .SS \-\-zck .sp Generate zchunk files as well as the standard repodata. .SS \-\-zck\-dict\-dir ZCK_DICT_DIR .sp Directory containing compression dictionaries for use by zchunk .SS \-\-keep\-all\-metadata .sp Keep all additional metadata (not primary, filelists and other xml or sqlite files, nor their compressed variants) from source repository during update. .SS \-\-compatibility .sp Enforce maximal compatibility with classical createrepo (Affects only: \-\-retain\-old\-md). .SS \-\-retain\-old\-md\-by\-age AGE .sp During \-\-update, remove all files in repodata/ which are older then the specified period of time. (e.g. \(aq2h\(aq, \(aq30d\(aq, ...). Available units (m \- minutes, h \- hours, d \- days) .SS \-c \-\-cachedir CACHEDIR. .sp Set path to cache dir .SS \-\-deltas .sp Tells createrepo to generate deltarpms and the delta metadata. .SS \-\-oldpackagedirs PATH .sp Paths to look for older pkgs to delta against. Can be specified multiple times. .SS \-\-num\-deltas INT .sp The number of older versions to make deltas against. Defaults to 1. .SS \-\-max\-delta\-rpm\-size MAX_DELTA_RPM_SIZE .sp Max size of an rpm that to run deltarpm against (in bytes). .SS \-\-local\-sqlite .sp Gen sqlite DBs locally (into a directory for temporary files). Sometimes, sqlite has a trouble to gen DBs on a NFS mount, use this option in such cases. This option could lead to a higher memory consumption if TMPDIR is set to /tmp or not set at all, because then the /tmp is used and /tmp dir is often a ramdisk. .SS \-\-cut\-dirs NUM .sp Ignore NUM of directory components in location_href during repodata generation .SS \-\-location\-prefix PREFIX .sp Append this prefix before location_href in output repodata .SS \-\-repomd\-checksum CHECKSUM_TYPE .sp Checksum type to be used in repomd.xml .SS \-\-error\-exit\-val .sp Exit with retval 2 if there were any errors during processing .SS \-\-ignore\-lock .sp Expert (risky) option: Ignore an existing .repodata/. (Remove the existing .repodata/ and create an empty new one to serve as a lock for other createrepo instances. For the repodata generation, a different temporary dir with the name in format .repodata.time.microseconds.pid/ will be used). NOTE: Use this option on your own risk! If two createrepos run simultaneously, then the state of the generated metadata is not guaranteed \- it can be inconsistent and wrong. .\" Generated by docutils manpage writer. . createrepo_c-0.17.0/doc/logo.png000066400000000000000000000027631400672373200164610ustar00rootroot00000000000000PNG  IHDRJ.[sRGBbKGD pHYs  tIME 0:4AtEXtCommentCreated with GIMPWNIDATx1EREpɍ H,Rr`G> Dggzr{jv$Г%3~+5ܰ`! `! `! OsVwiQZKY?+~ P2Չ.'=xOI_ܟ&yoXHlܟ-r_K/kfI>tȶRzj4r}i,}~ I~Or1munC/CRFV/5ϯ-<t?UXz=6j[xI{7C$瓳# u_#"gI$w]ɞCl}I`?k^yC C.3%u0dHqe7dI2d$7!ۦFr汷g>o͝zH![Fr7d BzIn|r6ҭk$nOq;HY}$u>?А.~uv.KV5|{~:N|Ft;~noqE}ސ0OZ;5.O8=Jg9O}\xzV)^:Nɜlf(6t?•BܷR:yXS`EO2=&V)b?} lnUI Rԡus ɣZT^q,9+joK8'uUkV}]&$Or}8h*KZǩ?ԇ^^(1r/p)TERk2 N9VKClfK^ݕlB>???.?>?N?^?n?~ f&@ @ @ @$@$@#@#@.  H L :    : L    e @ @ @ @ @ @ @  t%@@@ @@@@@@@@@@@@@@@@@@@@@ u &@L &@: %@ %@ $@ $@: #@ L #@   e#@@@@@@@@@@@@@@ v 1 1 1 1 1 1 1 1 1 q 1 1 1 1 1 1 1 1 1  @%@@@@@@@@@@@@@@@@  R % @ @ @ @ @ @ @ @  L@@@@@@@@@@@@@ @ @@ @@ @@ @@$@@#@@"@@!@@ B ' @ @ @ @ @  @  @  @  B' @ @@@@@@@ @@ @ @ @@ @ @ @@ @ @ @@ @ @@@@ @@@@@@@@@@@@]  K LK : ::       ; :; K LK    @@@@@@@@@@@@ @@@@ @ @@ @ @ @@ @ @ @@ @========. j<L<:<<<<:<L<@ =======*@@@@@ @@  @ @ @ l 1 1 1 1 1 1 1 1 1 q 1 1 1 1 1 1 1 1 1 @ @@ @\========= *========6 7@@@@Kh%4C implementation of createrepo     9 [gimp-text-layer?(text "C implementation of createrepo") (font "Sans Bold") (font-size 14.000000) (font-size-unit pixels) (hinting yes) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 0.000000 0.000000 0.000000)) (justify left) (box-mode fixed) (box-width 257.000000) (box-height 21.000000) (box-unit pixels) AAIIB DF#GI@@@1޳0 #Z!L Z>{'QO/JZ; ұkqA2=&Pk E D::C-E    ;  M ::T0 VC,7 (e1TQ'ڠ6$==[@@@( ( >{'/JG!pޏ 88q2=&P0 |~6{88DC-]  :;     TK::,7 (e9p998'ڠ6`)^`89@@@@:<;!G!88 OH/Jp0 |88'C 102=&P~] :;OYC-    ::NT98(B /,7 (e89#P'ڠ6)@@@=6ޏ /JH/JQO886{2=&P02=&PA88 C-YC-:::;    KTT::::p9,7 (e,7 (eC98^`'ڠ6'ڠ6TQ89-==V @L createrepo_c     Igimp-text-layer-(text "createrepo_c") (font "Sans Bold") (font-size 47.000000) (font-size-unit pixels) (hinting yes) (antialias yes) (language "en-us") (base-direction ltr) (color (color-rgb 0.131765 0.235700 0.533333)) (justify left) (box-mode fixed) (box-width 395.000000) (box-height 76.000000) (box-unit pixels) KLL\:\F\RLL`OeR)TXZ[[[[[\ \\*"<J A}ȩxH!T  )n!  ,zL 0U~ i8 7|5\S ~ a" d ?YW 58   5 Y  Xa" c J -zK -Pu 2 py  BɩyI" D"<77777ոw2 3Vy׹P M I$I R) 0ècJ15_ H|$ \ Vv.@=q L !_^tv0 <F  U.10.zI$!;lx@?^ R  T  Wa DA g̷a" c˓E"<STոw2!) M   !$ R) 0e5\H i8 7| ~  v.d=q?W8X c C L U 20 zI$!;l   y  a  D̷a""<JTոw27џH ) M 09  q q${ m:R) 0e5\H s97pk~+& v.<77d=qt?jhBB'(    '(BBXjhct<78 U+&20 s8 6omzI$!;l <| qyr t  a 2< D̷a" ;ӡJ(77777777"<L'hϫk*0D G,)$& G] \G,()kja_`;<#$    #$;<_`kja))H\[H-&( ,G K0(jѭm+'''F"<D A}ȩxH- -n,,,,zL 0U~+S+7"78Y8588858Y88"77J+zK -Pu+,,-p. 0 BɩyI"<"<"<"<"<"<"<"<&b1 oJ Drop Shadow      ]oJ]&oJ]bhtp.>R 0                       !"##"! !%(+-.//.,*'$ !"#$$%%$#"  #(-148:<==<:852.)%""%(*,./00110/-+)& "(.49>BFHJKKJHEA=83.*&$#$&(,047:<=>>=<;9630 %,3:AGMRUXYZYXURMHC=72.,+,.27<@EHJLMMLKIGDA>: &/7@HPW\aeghhgeb^YSMF@:63348=CINSWZ\]\\[YXUSOLHD '09CMV^ekpsuusplhb[TMGA=::=AGNU\bgjllkjhfc`]YUQN %/:EPZdmty}~{xsnhaZRLFB@ADJQYbjqvyzzywtpmiea]YV #-8DP\hrz|wqkd\UNIFDFJQZcmv~}xtokfb^\ )5AN\it~|vpib[UNJGGJPXbmw}xrmhda` %0:64459?ITbrxlbYTQQU[ !,:J[m~wj]RH@940.,-/4J #0?Oas|m^PD:1+&#""$(.7BP_o~tdTF;2..3;F ".=M_pqcVJ@81,)'&'+19DQ`o~qaQC7/++/8C !,:J[m~wj]RH@940-,-/4;FR`o}|m]N@4,((,4? *7FWhy}qf[RJC>964358?HS`n|zk[K=2*&%)0; '3BQbrzof]UNIC?<::=BJT`mzvgXH;/'#"&-7 #/2'  ' )5AO\iu|vpib[UNJGGINU^fntvvqj_SF9.$" #-8DP]hrz|wqjc\UNIEDEIOV^dilkg`VK?4)! %/:EPZdmty}~|xsnhaZRLFB@@CHNTZ^``\VMC9.% '09CMV^flpsuusplhb[TMFA<::BFIJKKJHEA=83.*&$##%'*,./.-)%   #(-148:<==<:852.)%" "#$$"   !%(+-.//.,*'$   !"##"!                # ~<;                        !!  !"##"! "$')+,,+)'%"  $&)+-../..-+)&# $$"  "$&)-035789987530,($  %)-1468:;;9852/+'#  $+-+))*,.259=@CEFGGFEC@=94/)$ #(-27?@DHLQUY]`bccb_]YTNHA:3-(%#$'+07=DJPUY\_abcdcb`\XRLF@:767;BKIIJMPUZ_dhkmoppomjgb\UME>71-*+-16=DKRX]bfikmopponkfaZSLFA??CJSRSUY]chmruxz{{zywtoibZRIA:41126CHOV]dinsw{xof^YVVZcglryƏ}si^TKEA@AEJQW^djotx}ypg`[[^ekqy}rf\RKFCDGKQX^djoty~xnf`^_fmuynbXPJGGIMSY_ejoty~tkd``enysh]UNKJLPU[aflqv{yofa`do{wlaYRONPTY^djoty}}rhb`bn|zoe\WSSUY^djoty}tib^`n|zpg_ZXX[_ekqv{vjb]]l|yphb^]^bgmsy~wkb\[j{~wohdabdiov|xka[XhyËysmhedfjpw~xk`ZUev탁~ytojgefjov~٘xj`YQbs~}}{yvsnjgeehmt|wi_WM]n~}zxwwvuspmifdcehowvh]VIXhx~yvtsrqpnkhebaadhoxtf[TCRbr|xtrqonljheb_^_bgoxqdYQ>KZjx{wtrpnmkifc`]\]`emv|n`UM7DRao{|xurpnljgda][YZ\bir|wi\QI1=JWeq|{xusqomjgc_[XVVX]dlv}pcVLD*5AMZfpy}zxusqnliea\XTRQSW]enwth\PF?$-8CNZdmty}~~|zxusqomjfb^YTPMKLOU\dmv}|tj_SH?8&/9CMW`gmqtuutrqomkheb]XSNJFDEGLRZbjqvz|}|{yxwwxyzzxuoh_TJ@81&/8AJRZ`dhjklkjihfdb^[VQLGB><<>AGNU\cimopponlkklkifaZRI@70*&.6>ELRWZ]_``_^][XUQMHC>:643476/(#$+28?DIMPRSTTSQPMJGC>:51-+*+-16=<962.)# "'+/25789::987530-*&#!%)-035776543210/.,*'#  #&(*,--,+*(&# "%')*++**)('&%%$#!   !""!                      5 /-+*  )           !#$$#!  #'+.00/,($ %,27:<<;73-'!   '.6>DHJKIE?91*#    &/8BJQVYZXSME=4,$    &/9CNW`fiigb[SI?6-& !!  -7BNZdntxyvqj`VKA7/'!!$')*,,+)'%" !"5@LYfq{xncXMB80)#!%(,/25789987530,($  #'*,.=HVcq~|peYMC:2,'$##%(+048<@CEFGGFEC@=94/*$  $).3699767:?DIOTY\_accb_]YTNHA:4.*'')-28@GMSWZS`n}xlaVNGB@@BFKQW]cgjmoppomjgb\UNF?830026:9;?FNWaipuy_jxxmc[VSRTX^elsy}{vog_VNHCBDHOXblv}bmzyof_[YZ]cjqyĊ{skaYRMKLQYbmwdn{yoga^^aflt|Ə~ulc[VTUYakvenzvmfb`bflt}Buld^[\`hr}emxzphb``cjr{}tkebbfnwcjttjb^]_dlvyqjggkr|agq}xmc]ZZ^eny}unkknu_dmy~qf]WUW\do|wpmmpw]bjuyl`XSQTZdp}~vpmnqx[_gq~uh]TOORYcp{tolmqxZ]dn{sf[RNMQXcpwqljkpwXZalxrf[RNMQXcpxrlhginvVX_iuth]TPORYcp~|wqlgdcfkt~UW]gswk`XSRTZdp}~}}|{xtpkfb__ciq|RTZdp}|pe]XUW\do{}zxwvutqnie`\[[_foyPRWamzukc]ZZ^eny~yvtsrpolhd`\XWX\clwMOT]ivzqhb^]_dlv|xtrpomkiea\XUTUY`is~JJOXcp}umfa_`ciq{{wtrpnljgc_ZVRQRV]epzEFJS]jvyqid`_aelt||xurpnlifb]XSPNOSYaku~AAELVbnzyqjd_]]`eks{{xusqoliea\VQNLLOT\enw}z;;>ENYepz~wphb]YXY]bipw}}zxuspnkhd_YTNJHHJOV^gntwvq447=EOZeow}}xrkd]XSQQTX]dkqw{~~|zxusqolifa\VPKFCCDHNV]dilkg.-05=>AFMTZ^``\'&(,3:CLT[aeghfc_YSMHC@??AEJPV\aehjklkjihfda^ZVPJE?:7679>CINRSSP !$*08?FMRVYYXVSNIC>:7556:>CINSW[]_``_^][XUQMGB=730./149>BEGFD!'-39?CGIJJHEA=840-++,.27;@EIMPRSTTSQPMJGB>94/+(''(+/369::7#(-159;<<:852.*&$""$'+/38>=<;9630-+))*,.259=@CEFGGFEC@=94/*$  %*/37:=?@ABCDFGHLMMLKIGDA>:754457;?CGKNQSUUSQNKFA;5/)%!!$).4:@EILNOPQRTUVW\]\\[YXUSOLHDA?>?@DHLQUY]`bccb_]YTNHA:4.*'')-29@GNTX\^_`abcdefklkjhfc`]YUQNKIIJMPUZ_dhkmoppomjgb\UNF?830026:9;?FNXajqw{}~}}||}~}xtokfb^\Z[\`djpv{{vog_VNHCBDHOYcmv~}xrmhda``aeiov|Ċ{skaYRMKMQYbmxxrlgdbbcglryƏ~ulc[VTUYakv~vnidbabekqyBuld^[\`hr}ypic_]^`fmu}tkebbfnw~sia[XWY^enyyqjggkr|~}xlbYTQQU[do{}unkknv|wvx~qdYQKIKOWbn|wpmmpw}upnpxj]QIDBEKT`n|~vpmnqyxojhjrcUJB=JXhyxrlhgiow}ricactdTF;2..3;FUevv|wrlgddgmvtkeceqaQC7/++/8CQbs~}}|{xtpkfb``djtxpjhj}m]N@4,((,4?M]n~}zxwvutqnie`]\]ahs~vqoqzk[K=2*&%)0;IXhx~yvtsrpolie`\YXZ_gq~~ywxvgXH;/'#"&-7CRbr|xtrpomkiea]YVVX]ep}~rdTE8-% "(2>KZjx{wtrpnljgc_[WTTV\dp}ym_PB5*"$,7DRao{|xurpnlifb^YUSRU[co|rgYL>2'  '1=JWeq|{xusqolifa]XTQQTZcn|j_SF9.$"*5AMZfpy}zxuspnkhd`[VROORXam{`VK?4)!$-8CNZdmty}~~|zxusqolifb]XSOMMPV`lzVMC9.%&/9CMW`gmqtuutrqomkhea]XSOKIJMT^jxJC:1( &/8AJRZ`dhjklkjihfda^[VRMIFDEJQ[hv~?91*" &.6>ELRWZ]_``_^][XURNIEA?>@EMXeszur4/)"  $+28?DIMPRSTTSQPMJGD@<988;@ITbp}unhd)%   !'-27;?BDFGGFEDB?<9631015;DP^lz|ria[V   "'+/25789::987531.,*))+/6@LYhv|qg]UNH   #&(*,--,+*(&$"!!"%*1;GTbp{|rg\QIA;   !""! $,6AN\it}{rg\QG=60  '04+#  )2;CKPSSQLE=4,$ "*29?DFFD@:3+$  ")/479984/)#  !%),..-*&!   "##"  $  %                 !    "##"   ,+*(%" #&)+-//-+)&#  :99741-)$ %*.269;<==<;962.*%   HGFC?:5/)#%*06;@DGJKLLKJGD@;60*$ XWURNHB;4-'!#)/6=CIOSWY[\\[YWSOIC<5.&  geb]WPH@81+&##$(-4;CKRY^cgikllkigc^YRJB90(  uvutqlf_VMD<50-,.28@HQYahmruxyzzyxurmhaXOE;2(  {umd[QH@:768=DLU^gov{脂{vof]RG<1' ≋{rh^TLEBADIPYblu}荌|si^RF:.$ 厑~tj`WQMMOU\foy둍vj^PC6+  vlc]YX[air{uhZL>1& wnhdcflt}rcTE7+  yrnnqw~{l\L=/$ {xwzscRB4' |yiXG8* u~{xx{~m\K;-! ox|vrrv|q_N=/# ktyrnnrysaP?0$ ir~wokkowucQ@1$ ir~wokkowucQ@1$ ktyqnnqysbP?0$ px|vrrv|q_N>/# v~{xx{~m\K;-! }yiXG8* |xx{scRB4' yronqw{l\L=0$ wnhdcflt}rdUF8+  vlc]YX[air|떑vi[L?2' 蒎~uj`XQMMOU]foyꓑvk^QD7," 卋{rh^TLFBBDIPYclu}ꋈ|tj_SG;0' Ȇ{umd[QH@:779=DLU_gov{|vog^SI>4*"  |zxvrmg_VMD<50-,.28@HQYahnruxyzzyxvrnhaYPG>5,% oljgc^XPH@81+&$#%(-4;CKRY^cgikllkigc_YSLD<4-&!a^[WSNIB;4-'"$)/6=DJOSWZ[\\[ZWTOJE>81+&!RNKHD?:5/)# %*06;@DGJKLLKJHD@<72-($  !!""##"!D@=962.)$ %*.269;<==<;963/+($!"$%')**++,,++**730,)&" #&)+-//-,)'$!"%(+.13456654+'$!   "##" !%*.258;<>??>??><  "&*-02456654 "$'()*++,,++*)   !""#"!                 !"##"!  !%(+-.//.,)&#  #(-148:<==<:851,'"  "(.49>BFHJKKJHEA<60*#  %,3:AGMRUXYZYXURMG@91*"  &/7@HPW\aeghhgeb^XQJB90'  '09CMV^ekpsuusplgaZQH?5+#  %/:EPZdmty}~{xsmg_VMB8.%  #-8DP\hrz|wpiaXND:/&  )5AN\it~|voh`WMC9/&  %0S S67=S S?837ISS7>SS?867:QS S'66OSSPORSS?837ISS7LSS?897LSSOPSS57DSSF9776=IS?837ISS=>SS?827@SSH877:LS57RSSR=77=<837ISS=ISSK@>@H>887NSSA776J7ASS?765465567737ISS?RR@77854'6637ISSRK76.757MSSN77376:SSQ78 637ISS>54697RSSE7277?SSJ78 37ISSR779;SS?7677+7BSSE7: 37ISSL777>SS>7717CSS@7 37ISSF776@SSFDD47FSS?9 37ISSA86AS S17FSS=: 37ISS?8@7BS S57GSS=9 37ISS=87BS S37FSS?8 37ISS=97AS S:7CSSB7 37ISS=98?SS?<<7ASSF70 37ISS=97>SS;776837ISS=9 57LSSK87767>SSM@:9SSRE=99;>@66MS S?837ISS=9 67HS S678QS S?837ISS=9 677NS S67;RS S?837ISS=9 79OSS377:MSSR7837ISS=9 678JSS:675>IRSSRMC976377<<79 6773:?>>?=?>??>:?@@A=:@@7@?@@A?B@@6?@@FMPQOHA@@A<@@C'@@HOP@B:B@@GNPQNF@?@DTdiig\K@B<@ESS@C@@D`iiKA@Oi iKA<@ZiiJ@@_iiKAA@Jhii=@Ihi iKA<@Zii@JiiKA?@Egi i'?AbiidbhiiKA<@Zii@`iiKAB@^iicdii=@SiihVC@@?HZiKA<@ZiiJiKA<@LiiXD@@E_i>BgiifI@@HA<@ZiiJZii]NJMXJAA@aiiN@@?\7@NiiK@?>=<>@?@@<@ZiiKggL@@AHiiY@?>69@@<@Zii[iM@?@?>?@@<@UiiD@>?@@AgiiJ>='@?<@Ziih]@?67@?=@`iia@?3@>BiigAB ?<@ZiiK>=<A@giiT@;A?Kii\@> <@ZiifAACFiiN@@+@PiiT@: <@Zii^@=@KiiJ@@;@RiiNA <@ZiiV@7BMiiVSS;@TiiLB <@ZiiO@?Oi i;@ViiJD <@ZiiKB@Pi i:@WiiJC <@ZiiIB@Pi i:@UiiKB <@ZiiJC@Ni i:@RiiOA <@ZiiJC@KiiKHH@NiiU@8 <@ZiiJCAKiiE@@?Hii^@C9<@ZiiJCADiiK@?@@@AiihBC9@<@ZiiJC@fiiU@=@fiiN@?@_iia@@=@Yii`@B=>?@@<@ZiiJC6@SiiI@?.@HiiR@AA??>?@@<@ZiiJC @Dii`A@A?>@???diiR@@RKA<@ZiiJC @^ii]B@@=@Jii`MDBHRaiKA<@ZiiJC 6?JiifTICCFJ?^i iKA<@ZiiJC @Yi i>@Bei iKA<@ZiiJC @Cbii3@@C`iih@A<@ZiiJC >@C\ii:@@>KZfiih`RC@@<@@HH@C @@HXdiie >?@@AHGF@@?<@@>?@@BEGDB@=@?@@?18@@3@?@@ ?@>@@>:#<=@@>?=mopopnpfjqoo knopoopn pqnjoo[oopprorpploppz~rppsoppvbopq}prhrppq{{popwpropwpvppxuispujop {uopppvu`spop uopppuspp uoppukpy boruoppusppwppouopunpvppzospp}uoputpppodoppqlmppoppopppt}poononpuopqp^loppopppqoppmpwoopnqomboooppol[qopppqfqovrr`nopommsppmsppoUopqstyppoopUpph oppptpplpr oppmqipu opqp lpv opt@p jpu opvp mpt opvr hpq opvr}}Upph opvrzppp{pyjopvqwppooorswjoopvnppmnppuioqopvopponpqrmnppopvlppnU]p{pqrponoppqopv qxqppnnmqoqppquopv qptpppxv}uopv lovu{@oo uopv op pu uopv mpr pz uopv qpvfppupuopv Uppuhopnvpqopp}}pv `pp~`ppu{}yppopoppqoppty{zspmooppomahoopfqpp `noqoqnm"Uoppopqn544677646776467KRR76467MSS96467MSS96467MSS96467MSS96467MSS962766577667 67MSS96 52 $5667765+677MSS767555778<@BCA?<7787MSS767B775377>FMSSRI=777DOSSEDD7667SK87437CS SF7636ASS96678SSJ76437HS SF7:6ASS96677KSSE7737HSS=716ASS9667HSS;6437HSSRPOPSSL7<6ASS9687>SSK7637HSKA9778CPSS;:7;MMRSSNMM7666MSS=7337=;778PSSD7177MSS977617@SSRSSH773776768436567LRS S=967MSS9657NSSN76466HSS=967MSS9657PSSP767JSS=967MSS967QSSP7537ASSOD>=<967RSS=967LSS:7'6BSS3677527HSS@7/777ISS=966LSSD73177CSSRJ;775577OSSC77ISS=9678DOSSD7367JC877657GRSSM>76577@@795779;<<7347763+677:;87737765677337663 67642775 35466 ' $565525<<:4>@@?4>@@?4>@]ff@?4>@_iiB?4>@_iiB?4>@_iiB?4>@_iiB?4>@_iiB?8?@?>??> >@_iiB? >= $?? @A@+@_ii@@>>;?@@AGMPQOLH@@C>@_ii@?>P@@?<@@JV`iifZI@@@SciiUSS@??@i\B@=<@Si iV@A9@NiiB??@Bii\@>;<@Xi iU@E@NiiB?<@@]iiU@@<@XiiH@<@NiiB?>@XiiG?9<@Xiigdbdii`@G@NiiB?>@Kii]@?<@Xi]OD@@APdiiFD?F``gii`@?@?`iiI?3<@IF@@AciiT@;@@_iiB@@?1?LiigiiX@><@@?=9??@@Iii[@?@@_ii@??@]iiYiif?@<@@:$?@fiia@E8<<@@_iiB?<<:>AhiiMiiG@@_iihAD>@_iiB??JiiEiiO?719>>??>@@ZiiBE>@_iiB?1@Tii@iiU@9=@ @ZiiDE>@_iiB?>@[ii@gii\@?>?@@AHLORSS`iiGD>@_iiB??@`iiShii_@>@J^gi iHC>@_iiB??@aii`@@?@XiiJC>@_iiB?@diic@??@[iiJC>@_iiB?>@giid@@<@NiicSJHH]iiJC>@_iiB?>@fiid@@?@aiiZ@@ZiiJC>@_iiB?@diiHC@@>CiidA@@?@@ZiiJC>@_iiB?>@aii@?LiiW@>=@[iiJC>@_iiB?>@`ii@?@<3@RiiO?>@`iiJC>@_iiB??@Zii;@@9@UiiKB?@giiJC?@_iiD@'?Rii3@@;@XiiLB9?IiiJC=@]iiHB?Hii;>@@9@ViiS@@>@YbiiJC<@[iiQ?>>??+@AgiiA@@DV@@5@Rii`@A@@>@E`ZiiJC;@Wii`@@3=@Yii@FXhd@@?KiiL@@BaNZiiJC?Rii\ICBB@@3.@GiiT`iid@@>DiihTECOdcAZiiJC?Ii iS@3?@`iid@@@ci iH@WiiJC?@fiiS@3>@Dhiid@@=@Kiib@@YiiJC=@SiiS@3@NiiV@@@?`iiJ@@ZiiJC?A_iiS@3;?@Siih[H@@?@BbiiQ@@ZiiJC>@@TciiS@3>?@\QA@@?@5@AWgii`J@??@@MM@C>@@CFHH@3>?@@?9+>@@ADFA@@?><@@=@ @39@@>3 ?@@>:9?? 9?>@@ ' $>??8jmmn4oppo4oppo4oppo4opto4opto4opto4opto4optojpnoonpm` opto o Imoqp prUpooppponpookppr|}qpprrpqppopqpnopppqrppoqpspnop pqlptoprqqiop pyptomoqpoop ~pmptoop{omopp~ptooppoopwpps{wp{poooo3opypprplpptppoappmoppqpmlkppptpppoppopoppomIoqp|hiimpsoiihpr{oopmnprzoptoowomaconoopooppuzoptoapppjmpoppxoptoppppoppq}{woptonpponpp ~voptooppnopvoptomppmpvoptonppqop~}}voptonppqqpqppvoptoqp}vpqpwrpoopvoptonppq@opnopvoptooponppfpp@opvoptokploqlpunovpxpbofoppqmpthovp}rp}knppqlppsUopvmpoonqqUqrrppwpqjppqnoopxvkpppfnpp{pq@pppvvUovttppf]p~pqowyvqvp pfoppqqp }pvoqpfnpwpqmpppvppfoppqpppvUoqpflop|ppqmpsppvoprpfptppojoptpoopppvoppty}}pfnoppoplUppx}tppomoppnp pfloof ponjnnm lpnoo`b Ipqnoj:56676763.676640:566767633557765@677357763757765@6779>BCB>87763765679@C97646779>BCB>877657?LSSRH;7677D<7577DRSSM76677?LSSH:767ISSR>77 779$S SR<777SSM76577LS SR=77S SP9637SSRORSSJ7SSO?679DRSS<777>DG76:7@SSM77777767<76 677BLSSMG>776:;;9776@76 3776:;;96776676776556367677655567566&5767566:=>@??3.=@?@>8:?>@??33?>@@B>@<>A@@>3@>@@B>A@@BJPQPKB@@A<?@@=?@BMQC@>>A@@BJPQPKB@@5@K^iigYE@B7?@SSG@@?@Sgii_@>?@@K_iihYD@A7YiigJ@A ?Gii]@@Uii_@>>@BZiigJ@A$i ihH@@?Gii]@@Jii_@>:@@^i igH@@i idCA9?Gii]@@aii_@>>@Xi idBAiifcgii[@A?GiiZ@@ii_@>=@Jiifcgii[@iicL?@BRhiiH@7?Gii]@`ii_@>??`iicL?@ARhiiHii`A@@Ihii]@A?Gii]GiieTJJSW@>:?Lii`A@@Ihii]ihF@@=9=?@ViiCB?Gii]Wi\B@@>?@]iihF@@=9=?@UiiZ@?9@CiiP?9?Gii^e`?@@??@@>?ChiiZ@?9?DiiL?3@bii\@??GiihiE@??JiiLA5?@biihBA<@Ziic@@?Gii^@?+:1@TiihBA?@Yiib@@TiiBA?GiiQ?=?@[iib@@?@Tii]@@SiiGB?GiiF@>@`ii^@@Sii`SS]iiJB?Giih?@>@bii`SS]iiKA?Giic@??@di#iN@?Gii_@>?@gi#iNA?Gii]@=>@gi#iN@?Gii]@=?@dii[HH@?Gii]@=@aii[H H[@@?Gii]@=>@`ii[@ @`@?@@?Gii]@==@Zii`@?@ @gBA @@?Gii]@='?PiigBA iK?9?>@@?Gii]@=?HiiL?:?i]@B:+=@@?Gii]@=@Agii\@B=+=@@iK@@?>@>@@OJ@?Gii]@=>@YiiL@@?>@>@@OiigK@@APbiN@?Gii]@=.@GiigL@@APbii_NEBEIP[fiiN@?Gii]@= ?@`ii^NEBEIP[fiiN@?Gii]@= =@DhiiN@?Gii]@= >@Niid@??Gii]@= 7?@Si icKfi ibQ@@??Gii]@= =?@Kfi ibR@@BP_hiih_WJ@@?=?@HH@= >@>>?<&5>@@>>?<hpoopqoM]poonnhhnoopqoMfqoppro@ opqppofooppro@puspprfoppopuvpoopusppsjpypqmop{pmpppoqppxprmpt`o|pppooprptI }pso|pppojoq ~ps vqlo|pppoop vqpto|ppponppppq}pmo|ppopoppq}rpp~pqo||pohprpp~󞱰zpooloppvuo|qppoppzpoolooppoUjpwoco|oppnoppooupoUjovofoppoo|yppmpooqjpspoppoo|poUmapspmpppoopruo|omopppooppp{so|yopppto|opqpto|ppnp#qo|pop#po|pnnp#po|ppp}}po|ppop} }ppo|ppnpp ppooo|pplpppo oru Unpo|ppboru Uommqppo|ppp}ojmopsnUppo|ppoqpsmUpppoppqqppoppqqo|ppop찄poppqqppoppqpprqo|pp]p~pprytwqo|pp opytwqo|pp lpwqo|pp ppqo|pp mop qppo|pp npp qpsqppop}}p opsqppoppox}zvpp@opploppox}zvppopnooppopmnoolopopmjpooqqpk&jpnoqqpk5667676757+6773177672 48776+7656775 +677;@BCA=7764 7D<7577;IQSSMA757-678COSSRJ<775 7SSF7767HSS@7337:SSO>779HSSG7777776 6;SSM7477LSS;667MSS<6S89777SSF74<77766867NSS9577SSO77987CSSK76>7776.67?=7=+@<1??@@?@8 <@ @+?@@=?@@?= +A@@ELPQOIA@@< ?@SSG@@?@FZfih`O@>>-@@BRciig[H@A> ?Gii]@@MgiiX@A9?@Fbi iT@A> ?Gii]@@JiiV@?>@Gei iU@A.<?Gii[@@diiJ@=5?Abi iO@>A?Gii]@Si ib@A@ViiechiigC?:@9?Gii]EhiZMJXhiiL@93?EhiicK@@BXiiW@@@B?Gii]ZhJ@@Hfii^@@?@ZiibA@@SiihCABC?GiibiR@?@??@KiiBAAgiiI@??=??`iiR?5P?-?Giid?@63?@`iiO@.AMii_@>=@Lii_@>[@??GiiV@==@RiiY@=;@ViiQ@;?Ahiih@>b@A?GiiJ? ?Hii`@>=@^iiF?>@`iiG@iBB?Gii@?hiif@@?@fiih??7@ViiN?iGB?Giif@?@aiiB@??iic@?@PiiS@3iJA?Gii`@=?@]iiD?@Bii^@=?LiiT@5iJA?Gii^@?<@ZiiG?@Eii\@=@IiiW@;iMA?Gii]@=<@YiiG??GiiZ@??IiiY@;iN@?Gii]@=;@XiiJ@?GiiZ@?AIiiX@;iN@?Gii]@><@ZiiF?@Dii]@=@JiiV@;H@@?Gii_@==@\iiC>@Bii`@>ALiiU@3@?Giid@@?@`iiA@??iic@@ @QiiS@+@?GiiA??@giih@@?@cii@<@XiiL@?@?GiiJ?@Giia@?<@\iiJA?@aiiC>@?GiiT@>=@Pii[@>9@TiiT@>ABiig??@?Giia@?7+A@^iiQ?6>Iiib@@9@Qii\@?J@?GiiciM@@?@IiiE?>?eiiN@A?@?@BdiiM?0N@?Gii]eF@@Cciia@??@UiieH@@A[iifA@N@?Gii]HifSEDPdiiN@??BfiigVECL`iiR@?N@?Gii]@Wi ifB??@QiicA?N@?Gii[@@fiiM@???^i ihI@?@??Gii]@@Oii\@?=@B`i iL@@@??Gii]@@Tii_B?=?@B[iieJ@?6@=?Gii]@@?@I`iigVB@@ ?@@IYfiih`QA@?< ?Gii]@=>@@BGF?@@> @?@@CHGE?@@9 ?Gii]@=:?@@= 7>?@@?>?Gii]@==?>??33>?>@??7?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?Gii]@=4?@HH@=4?@@=.UoopmmpoppoopU oaqqppoj pqnUoppoqppo Uppysppqp op{pqopyqopiqpr}poo o|ppqql@qpz qoo o|pppqp{ pq]ko|pppnjqt pnto|p poopuqhpho|zplMqyppvpppro|pp{pqopuppvqvo|pponppuooq~ponkooojoio|oolfopq]UqpoUoppopoo|ppopplipplorpoppo|o`p}popp{oop|prto|tnopoqpoonpo{uo|pqopsoqppq@opfso|ppnpvonupoopjto|poop{ooyppopkro|ppopno|pmn~pmro|pplppo{pnpplro|ppop{oovppppi}ppo|pppxoqtpoqpfpo|pmqptpnppq`ppUopo|qonppnmpqonpompo|pr{ppnpqUqpvopo|pnoppphppo@quonpo|pq[Uppp^oqpcpppqo|ppnpqpxpoppqoopsp`qo|{ppvpnnp}ppqsoqo|}ywpmUorzvppqo|p poqpqpUqo|pppoUpo pnpqo|pppoUnpq popoo|ppqonqpqpolpo|pqppppqUqpp~spom@o|ppoppt{yopp noppt}}xoppoh o|ppjoppo dooppoo|ppmonmp33ooqopomo|pp4o|pp4o|pp4o|pp4o|pp4o|pp4o|pp4o|pp4o|pp4op}}p5opp/F¶Ù ÿ ÿĿǶ̇½Ŀ¾ƈ½Ŷ½쥼ĵō¾í«ѿ¾÷ܫìþԩѽۻþȖþԢsZbt{P^i{~|Z¼ʾĽʽ¼˼ûͼ»ƹӻȹݿª˹¥¿IYZcMXUVPCm@fO^WoP]OiCSDKڳIX[aLXWUW79{5DZB?RA1}Wf{7dS4p=5mc:u_JXQ|gKVJa>L>DڨDQUZGQPOP35r2?R=:K<-qP`r3]M}1g81c69ڣ39k2FLC+iU@*ad,v6e)_]'nJCm;lW6uDPG5T3ښ+dY=*\h,o>$x9Q#Z~1zAG0!oCFj(Q1ut7"OZW1}ږ$s>K#U|8%S}*vwXpDM[$L3p2:!T/}ڜ)Lu3YZZ\Oqqgdze`q[|YZ[a𧦤x󨧦qz}𥤣jߦ\whۉ<md} hb🞝{]`{""!""!""!F󀇇tf 􀇇}| U}~xsx~X~wozuZt{p{sur~nqx]ݖvq~uom|\앞|mt~upmzeexwvuem|_ۈgexz]]u|`ꈚm]e{tu]]s|{xtmb]iaDQU_gnvl{ytmb^aj<}픏{yuqn\{햓}zwsomo앓~zxtpn\픒|zvrnmr떔}zwsom`m앓{yuqnmm|yvrnma땓~zxtpnjpߖ~{xuqnme|zvsomfr󕔒퓒|zwspnlh{~{yurnmdk󓒑푐~{yurnmlkx}zwtpnm``ޑ~t7BDIy:A>>i90^tpG+TkyJ:F@aQ;D:Ja0:uri-2^ڀy7ADG~:A?=f={p)+Zw(o2?/,8rh,"Piv@KY)I=_'N,&]qG(*_}u*{/U}(i7;4wl$SC4|t"KM#Y*K _ECNip;6W/VE,Z5/W/<5k(='_xr$PG1y{"HO#T0wh^.@rF_sz&Z14$bOkn69T!m@'ZfyW*`9A?%[umZ1;wB^qz+ujxuAo_|zx!VV@fOjq6=Hm;(}{yT'q+e=$Z}lrx!;tY|zx'nqFxjFdEjzxvD`n`ih:[hqWWOM\Lzyw`GohclPA\roFulFcFgzxvGrv~}|zzxusqnmlkjihio~}{zywtrpnmlkjiVyr}|{zxvsqUq}}||{zywusqnnmlkjic[_}}|{zzxvtrpnmlkjigMn}}||{zywusqomzzywvtsqnnmlkjig`HYzzyxwutrpnmlkjihfKizzyxvusqongxxwvusrpnnmlkjihe^/SyxxwvutsqonmllkjigdJbxxwvusrpnnmbuutsrqonmmlkjihfdZOuutsqpnnmllkjigecH]uutsrqonmmlaxyyzzyyzyywvuqnjYEvxyzzyzyyxwwvspmgH\xxyzzyyzyyx  @ ÷û üö Ƿψ½ʈ½ŷힿ͌ĸȌþĮUÐþùրíƐԻþfѽѭÿԴUѾbr~|Q^h{wZbq~󺸶ʾĽ󺷵Uʽļ̼üϼĻӻ̺ѫ̹øѤ¿Ct@|ѥkN^WsO]OeCMGEIV\_LWWT]Cz@w?JA5v__Ă6kK5q;8t<87Ck5QKQ?CB7nn-qf,rXDvAnf6M=;_G;b1騠.hd?-_s1rKl/y_2k~OI6#sQEx*a/5&W[i/騖&vFK%WDvVD9+mzYtRJl$[/.H!d,骕/Kv?qR%lWꦀxsihd`韚abdo\ﷶ𶵴𵴳wꗷn񳲰p𳲱rSsmꊴa?kjꄲO'`l<#""%%#"!%%#"@ ô æ ŷyþŴýyÿſힳ쐷ûĿUöĿǀ½ùǭ½ĴꥼƿfıѡǨU¾IJXhr}|rLU_pxxpRXgr|󫩦񿼿󫨥¿¶ĶíǷȬ~Ѡʵј>zi:pњcIXQvkJVJ^>GA?DPVXGQPNU>tn:k:D;0jXXy2cF1g63xi63韞3>c2LEJ:><3de)~f\(gR?n>e_3vH9w6VA6X-鞔+a];*Xj.iEc+wmU-`rHB1!gL@o(Y,u|1#OS_+雋$nAF#P{?kN>4'boQhMEd!T+y*AZ)~颊,El:gJ"aO{voi`}`[uV}逎YZ\e~Sw﨧yx𥤣lzft𣢡eꅥgLjc}Y:c`zJ%U`w8!""!""!@ 􋇇| 󀇇zm nx}ZvZtzg|t[tqv]쎝xq~U^댜}ms~rrmy_wwel|]ꂙhew~f^n]d{U{^]r|_@KSXgjw^susg`Y7>DQU`gmsazytmb^Xt<@KSXehtc|yuqn\{햓~zwsomk񕑃|yuqn^~{xtpn^U핒}zvromn{xtqn^}zwspn\앓|yuqnmn~zwspn]|yvrom\땓~zxtqnmp|zvrom]~{xuqnm`~}zwsonjo핔~{yuqnm]}zwspnlc{~{yurnmho퓒}zwtpnl^{yuromlgx}zwtpnmcf푐~{yvromla0[tqK+PkyO:F@\U;D:He06url0.as~7@EF{:@?=cB0VtqO+Ld-3rk,$LhtFF_)O8f'N*'XqK(&^}{*t2O(c;69-/rm-&HiJ \JCJlqA3W1QJ(Z8-Z*@1m(?"^ut$LI/u"ER%O5I!WM>"FhT61%eKhr<4X iF$Yey\&d9]pz1O:.(aGkQ\;lKft<7NiA#}{yZ!q0aA[wlk|$7yS|zx,K`6lFg_n`hk:Xgu[VRJ_HzywdCohblS>]`nFrpFbGczxvJ[nagk=Uhnmlkjihht~}{zywurpnmlkjiVm~|{zxvsqnnmkjihfnmlkjif\a}}|{zzxvtrpnmlkjigNh}}|{zywusqonmlkjifYmlkjigdLWzzyxwutrpnmlkjihfIezzyxvusqonmlkjigeMlkjihe`9QyxxwwutsqonmllkjigdG]xxwvusrpnnmlkjihfcEkjihfd]*JuutsqpnnmllkjigecGZuutsrqonmmlkjihfd`7wvuroj]BvxyzzyxwwvtpmgGYwyyzzyyzyywvroj_-  0U.UWq| |5-MZ 870U] 8P<5Y 8u?7[|8k~C9` 8jomJ7` 8io^hS79`ɭs}}|{z8in^^d~Z8.`ɍ36 6gm^^azc94`Ɍ8IWVVUTQwm^^`ui:5 `Ɍ8w}|{zzyxwwvyg^^_pn>6 `Ɍ]iihhfa8qn^^krB6 `Ɍy8pm^^grH8@ `Ɍ˴8pm^^crO8;`Ɍ¨8nl^^aoU87`Ɍ8nl^^`m[95`Ɍ8ll^^_i`;5`ɑ8heXXVW Wb]86`Ǻ||{zyx8_YKKPW@8$`Ļ8]XKKQT;6]ĩs| |{z8\XKKSQ97]Š¿8\WKKLTL95]8[WKKMVH75 ]˿8ZVKKNVC75 ]ʾ񹬫8[aUTTSRQNKKOU@9. ]Ȼ^kkjjgd8M]YXXWWVVUUSR[RKKPS=6 ]ǹ|778 87MQKKQP99\Ƹʺ8MQKKLSK83[Ŷȭ7LPKMSH8/Xõ7KPMSC7&X³6JRR>5Uzzyxwv5KQ<4UǸ5D9/S7.0VWW4.< 0U.UYt~ ~D<M] G@U^ HbKC\ HOD]}H~UG` H|ZH`· H|t}eG9`ʯu~}H{ttznG:`ʎFH HFzttwvIA`ʎHXjiihhiihgcttv}KF `ʎH}ttuOJ `ʎ_kjjigcHttTF `ʎ|Htt|XH `ʎ̶HttyaH;`ʎêHttwiH@`ʎHttvnIE`ʎHttutJG`ʒH{zmmlm mxqHG`ȼ~~}|{HqmaadjPH$`ŽHpmaafiLG`ūu~~}HolaaheIF`ÌHokaabi`IE`‹Hnkaabj[GA `¿HmkaackTH@ `˿ﻭHnujiihhgfcaadiRH. `ɽ`llkkhdH_pmllkkjjiigfngaaehNG `Ȼ~¿HGH HGafaafeJG_ǺʼG`eaabg`HC[ƸɯF_eabhZH<[ķG^dbhUG1YõE]ggQEX´}}|{zyD^gLBWȺDWJ>TFG<0WY YXB9<0U.Uoz zxMXUY V ‹Yzſ] ]ļ q]Īp{{zyx]ĉ򽻺 ]ĉ񼺸 ]ĉ ]ĉ[eedc^ ]ĉv@ ]ĉƱv]ĉƼ]ĉ]ĉ]Č ]¶zzyxwvm]]˿ozzyx]ʽ]ȼ ]Ǻ ]ƹ񴩨t ]ŷ[ggffdaU ]öy \´ƷWĩoWWU4T}xxwwvut|SĴrPm+UVVU}j<#US@ |n[U Z V sV [W̓UʄUzyxuʆUɆUɇUɈUɉUɉUȉU~ljU}ƉU}ʼnUvĈUxwÈUˆUzyywtUUUUUU~U~R}K|EuCvuu}AAt:WVQJUS@ ~o^U [ X uY ]W΃X˅X}|{xˇXʈXʈXʉXʉXʊXɊXȉXljXƉXyʼnX{zĈXÈX}||zvˆXXXXXXXUNGxCxwCCv:XYQJUN@ zl[U Z T rV YRɁOƂOxwwurƂO|ƄO~ƄO~ƆO}ƆO|ƈO|ƈO|ćO{ÇO{‡OsOvuuOOxwwupO|OO~O}O}O|O{P{IzCsAtssrr{?~?r7USOJu:J New Layer     8J8:!:-:9J9!919A9Q9a9q99999999::@@@@%v; createrepo_c-0.17.0/doc/mergerepo_c.8000066400000000000000000000066241400672373200173730ustar00rootroot00000000000000.\" Man page generated from reStructuredText. . .TH MERGEREPO_C 8 "2020-04-29" "" "" .SH NAME mergerepo_c \- Merge multiple rpm-md format repositories together . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" -*- coding: utf-8 -*- . .SH SYNOPSIS .sp mergerepo_c [options] \-\-repo repo1 \-\-repo repo2 .SH OPTIONS .SS \-\-version .sp Show program\(aqs version number and exit .SS \-r \-\-repo REPOS .sp Repo url .SS \-\-repo\-prefix\-search OLD_PREFIX .sp Repository prefix to be replaced by NEW_PREFIX. .SS \-\-repo\-prefix\-replace NEW_PREFIX .sp Repository prefix URL by which the OLD_PREFIX is replaced. .SS \-a \-\-archlist ARCHLIST .sp Defaults to all arches \- otherwise specify arches .SS \-d \-\-database .SS \-\-no\-database .SS \-v \-\-verbose .SS \-o \-\-outputdir OUTPUTDIR .sp Location to create the repository .SS \-\-nogroups .sp Do not merge group (comps) metadata .SS \-\-noupdateinfo .sp Do not merge updateinfo metadata .SS \-\-compress\-type COMPRESS_TYPE .sp Which compression type to use .SS \-\-zck .sp Generate zchunk files as well as the standard repodata. .SS \-\-zck\-dict\-dir ZCK_DICT_DIR .sp Directory containing compression dictionaries for use by zchunk .SS \-\-method MERGE_METHOD .sp Specify merge method for packages with the same name and arch. .sp Available merge methods: .P .B repo .RS 3 (default) select package from the first specified repository. .RE .sp .B nvr .RS 3 select package with the higher version and release. .RE .sp .B ts .RS 3 select package with the newest timestamp. .RE .SS \-\-all .sp Include all packages with the same name and arch if version or release is different. If used \-\-method argument is ignored! .SS \-\-noarch\-repo URL .sp Packages with noarch architecture will be replaced by package from this repo if exists in it. .SS \-\-unique\-md\-filenames .sp Include the file\(aqs checksum in the metadata filename, helps HTTP caching (default). .SS \-\-simple\-md\-filenames .sp Do not include the file\(aqs checksum in the metadata filename. .SS \-\-omit\-baseurl .sp Don\(aqt add a baseurl to packages that don\(aqt have one before. .SS \-k \-\-koji .sp Enable koji mergerepos behaviour. (Optionally select simple mode with: \-\-simple) .SS \-\-simple .sp Enable koji specific simple merge mode where we keep even packages with identical NEVRAs. Only works with combination with \-\-koji/\-k. .SS \-\-pkgorigins .sp Enable standard mergerepos behavior while also providing the pkgorigins file for koji. .SS \-\-arch\-expand .sp Add multilib architectures for specified archlist and expand all of them. Only works with combination with \-\-archlist. .SS \-g \-\-groupfile GROUPFILE .sp Path to groupfile to include in metadata. .SS \-b \-\-blocked FILE .sp A file containing a list of srpm names to exclude from the merged repo. Only works with combination with \-\-koji/\-k. .\" Generated by docutils manpage writer. . createrepo_c-0.17.0/doc/modifyrepo_c.8000066400000000000000000000041641400672373200175600ustar00rootroot00000000000000.\" Man page generated from reStructuredText. . .TH MODIFYREPO_C 8 "2019-07-18" "" "" .SH NAME modifyrepo_c \- Modify a repomd.xml of rpm-md format repository . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" -*- coding: utf-8 -*- . .SH SYNOPSIS .sp modifyrepo_c [options] .sp modifyrepo_c \-\-remove .sp modifyrepo_c [options] \-\-batchfile .SH OPTIONS .SS \-\-version .sp Show program\(aqs version number and exit. .SS \-\-mdtype MDTYPE .sp Specific datatype of the metadata, will be derived from the filename if not specified. .SS \-\-remove .sp Remove specified file from repodata. .SS \-\-compress .sp Compress the new repodata before adding it to the repo. (default) .SS \-\-no\-compress .sp Do not compress the new repodata before adding it to the repo. .SS \-\-compress\-type COMPRESS_TYPE .sp Compression format to use. .SS \-s \-\-checksum SUMTYPE .sp Specify the checksum type to use. (default: sha256) .SS \-\-unique\-md\-filenames .sp Include the file\(aqs checksum in the filename, helps with proxies. (default) .SS \-\-simple\-md\-filenames .sp Do not include the file\(aqs checksum in the filename. .SS \-\-verbose .sp Verbose output. .SS \-f \-\-batchfile BATCHFILE .sp Batch file. .SS \-\-new\-name NEWFILENAME .sp New filename for the file .SS \-\-zck .sp Generate zchunk files as well as the standard repodata. .SS \-\-zck\-dict\-dir ZCK_DICT_DIR .sp Directory containing compression dictionaries for use by zchunk .\" Generated by docutils manpage writer. . createrepo_c-0.17.0/doc/python/000077500000000000000000000000001400672373200163245ustar00rootroot00000000000000createrepo_c-0.17.0/doc/python/CMakeLists.txt000066400000000000000000000003771400672373200210730ustar00rootroot00000000000000ADD_CUSTOM_TARGET (doc-python PYTHONPATH=${CMAKE_BINARY_DIR}/src/python sphinx-build -E -b html ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/html COMMENT "Building Python API documentation with Sphinx") createrepo_c-0.17.0/doc/python/conf.py000066400000000000000000000177001400672373200176300ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # createrepo_c documentation build configuration file, created by # sphinx-quickstart on Tue Jan 8 13:45:14 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import createrepo_c # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'createrepo_c' copyright = u'2013, Red Hat' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = createrepo_c.VERSION # The full version, including alpha/beta/rc tags. release = '%s-1' % version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'createrepo_cdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'createrepo_c.tex', u'createrepo_c Documentation', u'Tomas Mlcoch', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'createrepo_c', u'createrepo_c Documentation', [u'Tomas Mlcoch'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'createrepo_c', u'createrepo_c Documentation', u'Tomas Mlcoch', 'createrepo_c', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' def skip(app, what, name, obj, skip, options): if what == "module" and type(obj).__name__ == "builtin_function_or_method": return False if name == "__init__": return type(obj).__name__ == "wrapper_descriptor" return skip def setup(app): app.connect("autodoc-skip-member", skip) createrepo_c-0.17.0/doc/python/index.rst000066400000000000000000000004631400672373200201700ustar00rootroot00000000000000############ createrepo_c ############ A library providing C and Python API for reading headers of rpm packages and reading and writing yum repository metadata. Contents: .. toctree:: :maxdepth: 2 lib Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` createrepo_c-0.17.0/doc/python/lib.rst000066400000000000000000000010461400672373200176250ustar00rootroot00000000000000.. _lib: The Createrepo_c Library ======================== Createrepo_c includes several classes. .. automodule:: createrepo_c :members: :undoc-members: :inherited-members: :private-members: :member-order: bysource .. autofunction:: xml_dump_primary .. autofunction:: xml_dump_filelists .. autofunction:: xml_dump_other .. autofunction:: xml_dump .. autofunction:: checksum_name_str .. autofunction:: checksum_type .. autofunction:: compress_file_with_stat .. autofunction:: compression_suffix .. autofunction:: detect_compression createrepo_c-0.17.0/doc/sqliterepo_c.8000066400000000000000000000033541400672373200175720ustar00rootroot00000000000000.\" Man page generated from reStructuredText. . .TH SQLITEREPO_C 8 "2017-02-23" "" "" .SH NAME sqliterepo_c \- Generate sqlite db files for a repository in rpm-md format . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .\" -*- coding: utf-8 -*- . .SH SYNOPSIS .sp sqliterepo_c [options] .SH OPTIONS .SS \-V \-\-version .sp Show program\(aqs version number and exit. .SS \-q \-\-quiet .sp Run quietly. .SS \-v \-\-verbose .sp Run verbosely. .SS \-f \-\-force .sp Overwrite existing DBs. .SS \-\-keep\-old .sp Do not remove old DBs. Use only with combination with \-\-force. .SS \-\-xz .sp Use xz for repodata compression. .SS \-\-compress\-type .sp Which compression type to use. .SS \-\-checksum .sp Which checksum type to use in repomd.xml for sqlite DBs. .SS \-\-local\-sqlite .sp Gen sqlite DBs locally (into a directory for temporary files). Sometimes, sqlite has a trouble to gen DBs on a NFS mount, use this option in such cases. This option could lead to a higher memory consumption if TMPDIR is set to /tmp or not set at all, because then the /tmp is used and /tmp dir is often a ramdisk. .\" Generated by docutils manpage writer. . createrepo_c-0.17.0/examples/000077500000000000000000000000001400672373200160545ustar00rootroot00000000000000createrepo_c-0.17.0/examples/python/000077500000000000000000000000001400672373200173755ustar00rootroot00000000000000createrepo_c-0.17.0/examples/python/contenthash_calculation.py000077500000000000000000000024501400672373200246470ustar00rootroot00000000000000#!/usr/bin/env python import sys import os import os.path import hashlib import createrepo_c as cr REPO_PATH = "repo/" class CalculationException(Exception): pass def calculate_contenthash(path): if not os.path.isdir(path) or \ not os.path.isdir(os.path.join(path, "repodata/")): raise AttributeError("Not a repo: {0}".format(path)) repomd_path = os.path.join(path, "repodata/repomd.xml") repomd = cr.Repomd(repomd_path) primary_path = None for rec in repomd.records: if rec.type == "primary": primary_path = rec.location_href break if not primary_path: raise CalculationException("primary metadata are missing") pkgids = [] def pkgcb(pkg): pkgids.append("{0}{1}{2}".format(pkg.pkgId, pkg.location_href, pkg.location_base or '')) cr.xml_parse_primary(os.path.join(path, primary_path), pkgcb=pkgcb) contenthash = hashlib.new("sha256") for pkgid in sorted(pkgids): contenthash.update(pkgid.encode('utf-8')) return contenthash.hexdigest() if __name__ == "__main__": path = REPO_PATH if len(sys.argv) == 2: path = sys.argv[1] print(calculate_contenthash(path)) createrepo_c-0.17.0/examples/python/repodata_parsing.py000077500000000000000000000166641400672373200233110ustar00rootroot00000000000000#!/usr/bin/env python import os import os.path import createrepo_c as cr REPO_PATH = "repo/" def print_package_info(pkg): def print_pcors(lst, requires=False): for item in lst: print(" Name: %s" % item[cr.PCOR_ENTRY_NAME]) print(" Flags: %s" % item[cr.PCOR_ENTRY_FLAGS]) print(" Epoch: %s" % item[cr.PCOR_ENTRY_EPOCH]) print(" Version: %s" % item[cr.PCOR_ENTRY_VERSION]) print(" Release: %s" % item[cr.PCOR_ENTRY_RELEASE]) if requires: print(" Pre: %s" % item[cr.PCOR_ENTRY_PRE]) print(" +-----------------------------------+") def print_files(lst): for item in lst: print(" Name: %s" % item[cr.FILE_ENTRY_NAME]) print(" Path: %s" % item[cr.FILE_ENTRY_PATH]) print(" Type: %s" % item[cr.FILE_ENTRY_TYPE]) print(" +-----------------------------------+") def print_changelogs(lst): for item in lst: print(" Author: %s" % item[cr.CHANGELOG_ENTRY_AUTHOR]) print(" Date: %s" % item[cr.CHANGELOG_ENTRY_DATE]) print(" Changelog: %s" % item[cr.CHANGELOG_ENTRY_CHANGELOG]) print(" +-----------------------------------+") print("+=======================================+") print(" %s" % pkg.name) print("+=======================================+") print("NEVRA: %s" % pkg.nevra()) print("NVRA: %s" % pkg.nvra()) print("Name: %s" % pkg.name) print("Checksum (pkgId): %s" % pkg.pkgId) print("Checksum type: %s" % pkg.checksum_type) print("Arch: %s" % pkg.arch) print("Version: %s" % pkg.version) print("Epoch: %s" % pkg.epoch) print("Release: %s" % pkg.release) print("Summary: %s" % pkg.summary) print("Description: %s" % pkg.description) print("URL: %s" % pkg.url) print("Time file: %s" % pkg.time_file) print("Time build: %s" % pkg.time_build) print("License: %s" % pkg.rpm_license) print("Vendor: %s" % pkg.rpm_vendor) print("Group: %s" % pkg.rpm_group) print("Buildhost: %s" % pkg.rpm_buildhost) print("Source RPM: %s" % pkg.rpm_sourcerpm) print("Header start: %s" % pkg.rpm_header_start) print("Header end: %s" % pkg.rpm_header_end) print("Packager: %s" % pkg.rpm_packager) print("Size package: %s" % pkg.size_package) print("Size installed: %s" % pkg.size_installed) print("Size archive: %s" % pkg.size_archive) print("Location href: %s" % pkg.location_href) print("Location base: %s" % pkg.location_base) print("Requires:") print_pcors(pkg.requires, requires=True) print("Provides:") print_pcors(pkg.provides) print("Conflicts:") print_pcors(pkg.conflicts) print("Obsoletes:") print_pcors(pkg.obsoletes) print("Files:") print_files(pkg.files) print("Changelogs:") print_changelogs(pkg.changelogs) def first_method(): """Use of this method is discouraged.""" md = cr.Metadata() md.locate_and_load_xml(REPO_PATH) for key in md.keys(): pkg = md.get(key) print_package_info(pkg) def second_method(): """Preferred method for repodata parsing. Important callbacks for repodata parsing: newpkgcb -------- Via newpkgcb (Package callback) you could directly affect if the current package element should be parsed or not. This decision could be based on three values that are available as attributtes in the element. This values are: - pkgId (package checksum) - name (package name) - arch (package architecture) (Note: This is applicable only for filelists.xml and other.xml, primary.xml doesn't contain this information in element) If newpkgcb returns a package object, the parsed data will be loaded to this package object. If it returns a None, package element is skiped. This could help you to reduce a memory requirements because non wanted packages could be skiped without need to store them into the memory. If no newpkgcb is specified, default callback returning a new package object is used. pkgcb ----- Callback called when a element parsing is done. Its argument is a package object that has been previously returned by the newpkgcb. This function should return True if parsing should continue or False if parsing should be interrupted. Note: Both callbacks are optional, BUT at least one MUST be used (newpkgcb or pkgcb)! warningcb --------- Warning callbacks is called when a non-fatal oddity of prased XML is detected. If True is returned, parsing continues. If return value is False, parsing is terminated. This callback is optional. """ primary_xml_path = None filelists_xml_path = None other_xml_path = None # # repomd.xml parsing # # Parse repomd.xml to get paths (1. Method - Repomd object based) # Pros: Easy to use repomd = cr.Repomd(os.path.join(REPO_PATH, "repodata/repomd.xml")) # Parse repomd.xml (2. Method - Parser based) # Pros: Warning callback could be specified def warningcb(warning_type, message): """Optional callback for warnings about wierd stuff and formatting in XML. :param warning_type: Integer value. One from the XML_WARNING_* constants. :param message: String message. """ print("PARSER WARNING: %s" % message) return True repomd2 = cr.Repomd() cr.xml_parse_repomd(os.path.join(REPO_PATH, "repodata/repomd.xml"), repomd2, warningcb) # Get stuff we need # (repomd or repomd2 could be used, both have the same values) for record in repomd.records: if record.type == "primary": primary_xml_path = record.location_href elif record.type == "filelists": filelists_xml_path = record.location_href elif record.type == "other": other_xml_path = record.location_href # # Main XML metadata parsing (primary, filelists, other) # packages = {} def pkgcb(pkg): # Called when whole package entry in xml is parsed packages[pkg.pkgId] = pkg def newpkgcb(pkgId, name, arch): # Called when new package entry is encountered # And only opening element is parsed # This function has to return a package to which # parsed data will be added or None if this package # should be skiped. return packages.get(pkgId, None) # Option do_files tells primary parser to skip element of package. # If you plan to parse filelists.xml after the primary.xml, always # set do_files to False. cr.xml_parse_primary(os.path.join(REPO_PATH, primary_xml_path), pkgcb=pkgcb, do_files=False, warningcb=warningcb) cr.xml_parse_filelists(os.path.join(REPO_PATH, filelists_xml_path), newpkgcb=newpkgcb, warningcb=warningcb) cr.xml_parse_other(os.path.join(REPO_PATH, other_xml_path), newpkgcb=newpkgcb, warningcb=warningcb) for pkg in packages.values(): print_package_info(pkg) if __name__ == "__main__": print('"All in one shot" method:') first_method() print() print("Callback based method:") second_method() createrepo_c-0.17.0/examples/python/repomd_parsing.py000077500000000000000000000022231400672373200227620ustar00rootroot00000000000000#!/usr/bin/env python import os import os.path import createrepo_c as cr REPO_PATH = "repo/" def parse_repomd(path): repomd = cr.Repomd(path) print("Revision:", repomd.revision) if repomd.contenthash: print("Contenthash:", repomd.contenthash) print("Contenthash type:", repomd.contenthash_type) print("Repo tags:", repomd.repo_tags) print("Content tags:", repomd.content_tags) print("Distro tags:", repomd.distro_tags) print() for rec in repomd.records: print("Type:", rec.type) print("Location href:", rec.location_href) print("Location base:", rec.location_base) print("Checksum:", rec.checksum) print("Checksum type:", rec.checksum_type) print("Checksum open:", rec.checksum_open) print("Checksum open type:", rec.checksum_open_type) print("Timestamp:", rec.timestamp) print("Size:", rec.size) print("Size open:", rec.size_open) if rec.db_ver: print("Db version:", rec.db_ver) print() if __name__ == "__main__": repomd_path = os.path.join(REPO_PATH, "repodata/repomd.xml") parse_repomd(repomd_path) createrepo_c-0.17.0/examples/python/simple_createrepo.py000077500000000000000000000063411400672373200234600ustar00rootroot00000000000000#!/usr/bin/env python import os import sys import shutil import os.path import createrepo_c as cr def do_repodata(path): # Prepare repodata/ directory repodata_path = os.path.join(path, "repodata") if os.path.exists(repodata_path): x = 0 while True: new_repodata_path = "%s_%s" % (repodata_path, x) if not os.path.exists(new_repodata_path): shutil.move(repodata_path, new_repodata_path) break x += 1 os.mkdir(repodata_path) # Prepare metadata files repomd_path = os.path.join(repodata_path, "repomd.xml") pri_xml_path = os.path.join(repodata_path, "primary.xml.gz") fil_xml_path = os.path.join(repodata_path, "filelists.xml.gz") oth_xml_path = os.path.join(repodata_path, "other.xml.gz") pri_db_path = os.path.join(repodata_path, "primary.sqlite") fil_db_path = os.path.join(repodata_path, "filelists.sqlite") oth_db_path = os.path.join(repodata_path, "other.sqlite") pri_xml = cr.PrimaryXmlFile(pri_xml_path) fil_xml = cr.FilelistsXmlFile(fil_xml_path) oth_xml = cr.OtherXmlFile(oth_xml_path) pri_db = cr.PrimarySqlite(pri_db_path) fil_db = cr.FilelistsSqlite(fil_db_path) oth_db = cr.OtherSqlite(oth_db_path) # List directory and prepare list of files to process pkg_list = [] for filename in os.listdir(path): filename = os.path.join(path, filename) if os.path.isfile(filename) and filename.endswith(".rpm"): pkg_list.append(filename) pri_xml.set_num_of_pkgs(len(pkg_list)) fil_xml.set_num_of_pkgs(len(pkg_list)) oth_xml.set_num_of_pkgs(len(pkg_list)) # Process all packages for filename in pkg_list: pkg = cr.package_from_rpm(filename) pkg.location_href = os.path.basename(filename) print("Processing: %s" % pkg.nevra()) pri_xml.add_pkg(pkg) fil_xml.add_pkg(pkg) oth_xml.add_pkg(pkg) pri_db.add_pkg(pkg) fil_db.add_pkg(pkg) oth_db.add_pkg(pkg) pri_xml.close() fil_xml.close() oth_xml.close() # Note: DBs are still open! We have to calculate checksums of xml files # and insert them to the databases first! # Prepare repomd.xml repomd = cr.Repomd() # Add records into the repomd.xml repomdrecords = (("primary", pri_xml_path, pri_db), ("filelists", fil_xml_path, fil_db), ("other", oth_xml_path, oth_db), ("primary_db", pri_db_path, None), ("filelists_db", fil_db_path, None), ("other_db", oth_db_path, None)) for name, path, db_to_update in repomdrecords: record = cr.RepomdRecord(name, path) record.fill(cr.SHA256) if (db_to_update): db_to_update.dbinfo_update(record.checksum) db_to_update.close() repomd.set_record(record) # Write repomd.xml open(repomd_path, "w").write(repomd.xml_dump()) # DONE! if __name__ == "__main__": if len(sys.argv) != 2 or not os.path.isdir(sys.argv[1]): print("Usage: %s " % (sys.argv[0])) sys.exit(1) do_repodata(sys.argv[1]) print("Repository created in %s" % sys.argv[1]) createrepo_c-0.17.0/examples/python/simple_modifyrepo.py000077500000000000000000000014761400672373200235100ustar00rootroot00000000000000#!/usr/bin/env python """ An example of inserting updateinfo.xml into repodata. """ import os import shutil import createrepo_c as cr REPO_PATH = "repo/" def modifyrepo(filename, repodata): repodata = os.path.join(repodata, 'repodata') uinfo_xml = os.path.join(repodata, os.path.basename(filename)) shutil.copyfile(filename, uinfo_xml) uinfo_rec = cr.RepomdRecord('updateinfo', uinfo_xml) uinfo_rec.fill(cr.SHA256) uinfo_rec.rename_file() repomd_xml = os.path.join(repodata, 'repomd.xml') repomd = cr.Repomd(repomd_xml) repomd.set_record(uinfo_rec) with open(repomd_xml, 'w') as repomd_file: repomd_file.write(repomd.xml_dump()) if __name__ == '__main__': # Generate the updateinfo.xml exec(open("./updateinfo_gen_02.py").read()) modifyrepo(OUT_FILE, REPO_PATH) createrepo_c-0.17.0/examples/python/updateinfo_gen_01.py000077500000000000000000000023331400672373200232420ustar00rootroot00000000000000#!/usr/bin/env python import datetime import createrepo_c as cr def generate(): pkg = cr.UpdateCollectionPackage() pkg.name = "Foo" pkg.version = "1.2.3" pkg.release = "1" pkg.epoch = "0" pkg.arch = "noarch" pkg.src = "foo.src.rpm" pkg.filename = "foo-1.2.3-1.rpm" pkg.sum = "123456789" pkg.sum_type = cr.MD5 pkg.reboot_suggested = False col = cr.UpdateCollection() col.shortname = "Bar-product" col.name = "Bar Product" col.append(pkg) ref = cr.UpdateReference() ref.href = "http://foo.bar/foobar" ref.id = "123" ref.type = "self" ref.title = "Foo Update" rec = cr.UpdateRecord() rec.fromstr = "security@foo.bar" rec.status = "final" rec.type = "enhancement" rec.version = "1" rec.id = "UPDATE-1" rec.title = "Bar Product Update" rec.issued_date = datetime.datetime(2014, 8, 14) rec.updated_date = datetime.datetime(2014, 8, 14) rec.rights = "Copyright 2014 Bar Inc" rec.summary = "An update for Bar" rec.description = "Fixes a bug" rec.append_collection(col) rec.append_reference(ref) ui = cr.UpdateInfo() ui.append(rec) print(ui.xml_dump()) if __name__ == "__main__": generate() createrepo_c-0.17.0/examples/python/updateinfo_gen_02.py000077500000000000000000000025201400672373200232410ustar00rootroot00000000000000#!/usr/bin/env python import datetime import createrepo_c as cr OUT_FILE = "updateinfo.xml.gz" def generate(): pkg = cr.UpdateCollectionPackage() pkg.name = "Foo" pkg.version = "1.2.3" pkg.release = "1" pkg.epoch = "0" pkg.arch = "noarch" pkg.src = "foo.src.rpm" pkg.filename = "foo-1.2.3-1.rpm" pkg.sum = "123456789" pkg.sum_type = cr.MD5 pkg.reboot_suggested = False col = cr.UpdateCollection() col.shortname = "Bar-product" col.name = "Bar Product" col.append(pkg) ref = cr.UpdateReference() ref.href = "http://foo.bar/foobar" ref.id = "123" ref.type = "self" ref.title = "Foo Update" rec = cr.UpdateRecord() rec.fromstr = "security@foo.bar" rec.status = "final" rec.type = "enhancement" rec.version = "1" rec.id = "UPDATE-1" rec.title = "Bar Product Update" rec.issued_date = datetime.datetime(2014, 8, 14) rec.updated_date = datetime.datetime(2014, 8, 14) rec.rights = "Copyright 2014 Bar Inc" rec.summary = "An update for Bar" rec.description = "Fixes a bug" rec.append_collection(col) rec.append_reference(ref) chunk = cr.xml_dump_updaterecord(rec) f = cr.UpdateInfoXmlFile(OUT_FILE) f.add_chunk(chunk) f.close() print("See the %s" % OUT_FILE) if __name__ == "__main__": generate() createrepo_c-0.17.0/examples/python/updateinfo_parsing.py000077500000000000000000000046311400672373200236370ustar00rootroot00000000000000#!/usr/bin/env python import os import os.path import optparse import createrepo_c as cr def parse_updateinfo(path): uinfo = cr.UpdateInfo(path) for update in uinfo.updates: print("From: %s" % update.fromstr) print("Status: %s" % update.status) print("Type: %s" % update.type) print("Version: %s" % update.version) print("Id: %s" % update.id) print("Title: %s" % update.title) print("Issued date: %s" % update.issued_date) print("Updated date: %s" % update.updated_date) print("Rights: %s" % update.rights) print("Release: %s" % update.release) print("Pushcount: %s" % update.pushcount) print("Severity: %s" % update.severity) print("Summary: %s" % update.summary) print("Description: %s" % update.description) print("Solution: %s" % update.solution) print("References:") for ref in update.references: print(" Href: %s" % ref.href) print(" Id: %s" % ref.id) print(" Type: %s" % ref.type) print(" Title: %s" % ref.title) print(" ----------------------------") print("Pkglist (collections):") for col in update.collections: print(" Short: %s" % col.shortname) print(" name: %s" % col.name) print(" Packages:") for pkg in col.packages: print(" Name: %s" % pkg.name) print(" Version: %s" % pkg.version) print(" Release: %s" % pkg.release) print(" Epoch: %s" % pkg.epoch) print(" Arch: %s" % pkg.arch) print(" Src: %s" % pkg.src) print(" Filename: %s" % pkg.filename) print(" Sum: %s" % pkg.sum) print(" Sum type: %s (%s)" % (pkg.sum_type, cr.checksum_name_str(pkg.sum_type))) print(" Reboot suggested: %s" % pkg.reboot_suggested) print(" ----------------------------") print("==============================") if __name__ == "__main__": parser = optparse.OptionParser(usage="%prog PATH_TO_UPDATEINFO") options, args = parser.parse_args() if len(args) != 1: parser.error("You have to specify exactly one update info") parse_updateinfo(args[0]) createrepo_c-0.17.0/pyproject.toml000066400000000000000000000002141400672373200171470ustar00rootroot00000000000000[build-system] requires = [ "setuptools >= 40.8.0", "wheel >= 0.29.0", "scikit-build" ] build-backend = 'setuptools.build_meta' createrepo_c-0.17.0/requirements-dev.txt000066400000000000000000000000361400672373200202750ustar00rootroot00000000000000setuptools wheel scikit-build createrepo_c-0.17.0/setup.py000066400000000000000000000036501400672373200157540ustar00rootroot00000000000000from skbuild import setup with open('VERSION.cmake', 'r+') as version_file: lines = version_file.read().splitlines() # parse out digit characters from the line, convert to int numbers = [int("".join(filter(str.isdigit, line))) for line in lines] # build version string version = '{major}.{minor}.{patch}'.format( major=numbers[0], minor=numbers[1], patch=numbers[2] ) setup( name='createrepo_c', description='C implementation of createrepo', version=version, license='GPLv2+', author='RPM Software Management', author_email='rpm-ecosystem@lists.rpm.org', url='https://github.com/rpm-software-management', classifiers=[ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Operating System :: POSIX :: Linux', 'Programming Language :: C', 'Topic :: System :: Software Distribution', 'Topic :: System :: Systems Administration', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], packages=['createrepo_c'], package_dir={ 'createrepo_c': 'src/python/createrepo_c' }, cmake_args=[ '-DBIN_INSTALL_DIR:PATH=src/python/createrepo_c/data/bin', '-DBUILD_LIBCREATEREPO_C_SHARED:BOOL=OFF', '-DCREATEREPO_C_INSTALL_DEVELOPMENT:BOOL=OFF', '-DCREATEREPO_C_INSTALL_MANPAGES:BOOL=OFF', '-DENABLE_BASHCOMP:BOOL=OFF', '-DENABLE_DRPM:BOOL=OFF', ], cmake_languages=['C'], entry_points={ 'console_scripts': [ 'createrepo_c=createrepo_c:createrepo_c', 'mergerepo_c=createrepo_c:mergerepo_c', 'modifyrepo_c=createrepo_c:modifyrepo_c', 'sqliterepo_c=createrepo_c:sqliterepo_c' ] }, ) createrepo_c-0.17.0/src/000077500000000000000000000000001400672373200150255ustar00rootroot00000000000000createrepo_c-0.17.0/src/CMakeLists.txt000066400000000000000000000106671400672373200175770ustar00rootroot00000000000000SET (createrepo_c_SRCS checksum.c compression_wrapper.c createrepo_shared.c deltarpms.c dumper_thread.c error.c helpers.c load_metadata.c locate_metadata.c misc.c modifyrepo_shared.c package.c parsehdr.c parsepkg.c repomd.c sqlite.c threads.c updateinfo.c xml_dump.c xml_dump_deltapackage.c xml_dump_filelists.c xml_dump_other.c xml_dump_primary.c xml_dump_repomd.c xml_dump_updateinfo.c xml_file.c xml_parser.c xml_parser_filelists.c xml_parser_other.c xml_parser_primary.c xml_parser_repomd.c xml_parser_updateinfo.c koji.c) SET(headers checksum.h compression_wrapper.h constants.h mergerepo_c.h createrepo_c.h deltarpms.h error.h helpers.h load_metadata.h locate_metadata.h misc.h modifyrepo_shared.h package.h parsehdr.h parsepkg.h repomd.h sqlite.h threads.h updateinfo.h version.h xml_dump.h xml_file.h koji.h xml_parser.h) IF (BUILD_LIBCREATEREPO_C_SHARED) SET (createrepo_c_library_type SHARED) ELSE () SET (createrepo_c_library_type STATIC) ENDIF () ADD_LIBRARY(libcreaterepo_c ${createrepo_c_library_type} ${createrepo_c_SRCS}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${BZIP2_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${CURL_LIBRARY}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${GLIB2_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${Libmagic_LIBRARY}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${LIBMODULEMD_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${LIBXML2_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${LZMA_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${OPENSSL_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${RPMDB_LIBRARY}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${SQLITE3_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${ZLIB_LIBRARY}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${ZCK_LIBRARIES}) TARGET_LINK_LIBRARIES(libcreaterepo_c ${DRPM_LIBRARIES}) SET_TARGET_PROPERTIES(libcreaterepo_c PROPERTIES OUTPUT_NAME "createrepo_c" SOVERSION ${CR_MAJOR} VERSION "${VERSION}" COMPILE_DEFINITIONS "G_LOG_DOMAIN=\"${G_LOG_DOMAIN}\"") ADD_EXECUTABLE(createrepo_c createrepo_c.c cmd_parser.c) TARGET_LINK_LIBRARIES(createrepo_c libcreaterepo_c ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES}) ADD_EXECUTABLE(mergerepo_c mergerepo_c.c) TARGET_LINK_LIBRARIES(mergerepo_c libcreaterepo_c ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES} ${LIBMODULEMD_LIBRARIES}) ADD_EXECUTABLE(modifyrepo_c modifyrepo_c.c) TARGET_LINK_LIBRARIES(modifyrepo_c libcreaterepo_c ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES}) ADD_EXECUTABLE(sqliterepo_c sqliterepo_c.c) TARGET_LINK_LIBRARIES(sqliterepo_c libcreaterepo_c ${GLIB2_LIBRARIES} ${GTHREAD2_LIBRARIES}) CONFIGURE_FILE("createrepo_c.pc.cmake" "${CMAKE_SOURCE_DIR}/src/createrepo_c.pc" @ONLY) CONFIGURE_FILE("version.h.in" "${CMAKE_CURRENT_SOURCE_DIR}/version.h" @ONLY) CONFIGURE_FILE("deltarpms.h.in" "${CMAKE_CURRENT_SOURCE_DIR}/deltarpms.h" @ONLY) IF (CREATEREPO_C_INSTALL_DEVELOPMENT OR "${createrepo_c_library_type}" STREQUAL "SHARED") INSTALL( TARGETS libcreaterepo_c RUNTIME DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT RuntimeLibraries LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT RuntimeLibraries ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Development ) ENDIF (CREATEREPO_C_INSTALL_DEVELOPMENT OR "${createrepo_c_library_type}" STREQUAL "SHARED") IF (CREATEREPO_C_INSTALL_DEVELOPMENT) INSTALL(FILES ${headers} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/createrepo_c") INSTALL(FILES "createrepo_c.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") ENDIF (CREATEREPO_C_INSTALL_DEVELOPMENT) IF (NOT DEFINED BIN_INSTALL_DIR) SET(BIN_INSTALL_DIR "bin/") ENDIF (NOT DEFINED BIN_INSTALL_DIR) INSTALL( TARGETS createrepo_c mergerepo_c modifyrepo_c sqliterepo_c RUNTIME DESTINATION ${BIN_INSTALL_DIR} COMPONENT Runtime ) IF (ENABLE_PYTHON) ADD_SUBDIRECTORY(python) ENDIF (ENABLE_PYTHON) createrepo_c-0.17.0/src/checksum.c000066400000000000000000000172001400672373200167730ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "checksum.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define MAX_CHECKSUM_NAME_LEN 7 #define BUFFER_SIZE 2048 struct _cr_ChecksumCtx { EVP_MD_CTX *ctx; cr_ChecksumType type; }; cr_ChecksumType cr_checksum_type(const char *name) { size_t len; char name_lower[MAX_CHECKSUM_NAME_LEN+1]; if (!name) return CR_CHECKSUM_UNKNOWN; len = strlen(name); if (len > MAX_CHECKSUM_NAME_LEN) return CR_CHECKSUM_UNKNOWN; for (size_t x = 0; x <= len; x++) name_lower[x] = tolower(name[x]); if (!strncmp(name_lower, "md", 2)) { // MD* family // if (name_lower[2] == '2') // return CR_CHECKSUM_MD2; // else if (name_lower[2] == '5') if (name_lower[2] == '5') return CR_CHECKSUM_MD5; } else if (!strncmp(name_lower, "sha", 3)) { // SHA* family char *sha_type = name_lower + 3; if (!strcmp(sha_type, "")) return CR_CHECKSUM_SHA; else if (!strcmp(sha_type, "1")) return CR_CHECKSUM_SHA1; else if (!strcmp(sha_type, "224")) return CR_CHECKSUM_SHA224; else if (!strcmp(sha_type, "256")) return CR_CHECKSUM_SHA256; else if (!strcmp(sha_type, "384")) return CR_CHECKSUM_SHA384; else if (!strcmp(sha_type, "512")) return CR_CHECKSUM_SHA512; } return CR_CHECKSUM_UNKNOWN; } const char * cr_checksum_name_str(cr_ChecksumType type) { switch (type) { case CR_CHECKSUM_UNKNOWN: return "Unknown checksum"; // case CR_CHECKSUM_MD2: // return "md2"; case CR_CHECKSUM_MD5: return "md5"; case CR_CHECKSUM_SHA: return "sha"; case CR_CHECKSUM_SHA1: return "sha1"; case CR_CHECKSUM_SHA224: return "sha224"; case CR_CHECKSUM_SHA256: return "sha256"; case CR_CHECKSUM_SHA384: return "sha384"; case CR_CHECKSUM_SHA512: return "sha512"; default: return NULL; } } char * cr_checksum_file(const char *filename, cr_ChecksumType type, GError **err) { FILE *f; int rc; unsigned int len; ssize_t readed; char buf[BUFFER_SIZE]; unsigned char raw_checksum[EVP_MAX_MD_SIZE]; char *checksum; EVP_MD_CTX *ctx; const EVP_MD *ctx_type; switch (type) { //case CR_CHECKSUM_MD2: ctx_type = EVP_md2(); break; case CR_CHECKSUM_MD5: ctx_type = EVP_md5(); break; case CR_CHECKSUM_SHA: ctx_type = EVP_sha1(); break; case CR_CHECKSUM_SHA1: ctx_type = EVP_sha1(); break; case CR_CHECKSUM_SHA224: ctx_type = EVP_sha224(); break; case CR_CHECKSUM_SHA256: ctx_type = EVP_sha256(); break; case CR_CHECKSUM_SHA384: ctx_type = EVP_sha384(); break; case CR_CHECKSUM_SHA512: ctx_type = EVP_sha512(); break; case CR_CHECKSUM_UNKNOWN: default: g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCHECKSUMTYPE, "Unknown checksum type"); return NULL; } f = fopen(filename, "rb"); if (!f) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open a file: %s", g_strerror(errno)); return NULL; } ctx = EVP_MD_CTX_create(); rc = EVP_DigestInit_ex(ctx, ctx_type, NULL); if (!rc) { g_set_error(err, ERR_DOMAIN, CRE_OPENSSL, "EVP_DigestInit_ex() failed"); EVP_MD_CTX_destroy(ctx); fclose(f); return NULL; } while ((readed = fread(buf, 1, BUFFER_SIZE, f)) == BUFFER_SIZE) EVP_DigestUpdate(ctx, buf, readed); if (feof(f)) { EVP_DigestUpdate(ctx, buf, readed); } else { g_set_error(err, ERR_DOMAIN, CRE_IO, "Error while reading a file: %s", g_strerror(errno)); EVP_MD_CTX_destroy(ctx); fclose(f); return NULL; } fclose(f); EVP_DigestFinal_ex(ctx, raw_checksum, &len); EVP_MD_CTX_destroy(ctx); checksum = g_malloc0(sizeof(char) * (len * 2 + 1)); for (size_t x = 0; x < len; x++) sprintf(checksum+(x*2), "%02x", raw_checksum[x]); return checksum; } cr_ChecksumCtx * cr_checksum_new(cr_ChecksumType type, GError **err) { EVP_MD_CTX *ctx; const EVP_MD *ctx_type; cr_ChecksumCtx *cr_ctx; assert(!err || *err == NULL); switch (type) { //case CR_CHECKSUM_MD2: ctx_type = EVP_md2(); break; case CR_CHECKSUM_MD5: ctx_type = EVP_md5(); break; case CR_CHECKSUM_SHA: ctx_type = EVP_sha1(); break; case CR_CHECKSUM_SHA1: ctx_type = EVP_sha1(); break; case CR_CHECKSUM_SHA224: ctx_type = EVP_sha224(); break; case CR_CHECKSUM_SHA256: ctx_type = EVP_sha256(); break; case CR_CHECKSUM_SHA384: ctx_type = EVP_sha384(); break; case CR_CHECKSUM_SHA512: ctx_type = EVP_sha512(); break; case CR_CHECKSUM_UNKNOWN: default: g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCHECKSUMTYPE, "Unknown checksum type"); return NULL; } ctx = EVP_MD_CTX_create(); if (!ctx) { g_set_error(err, ERR_DOMAIN, CRE_OPENSSL, "EVP_MD_CTX_create() failed"); return NULL; } if (!EVP_DigestInit_ex(ctx, ctx_type, NULL)) { g_set_error(err, ERR_DOMAIN, CRE_OPENSSL, "EVP_DigestInit_ex() failed"); EVP_MD_CTX_destroy(ctx); return NULL; } cr_ctx = g_malloc0(sizeof(cr_ChecksumCtx)); cr_ctx->ctx = ctx; cr_ctx->type = type; return cr_ctx; } int cr_checksum_update(cr_ChecksumCtx *ctx, const void *buf, size_t len, GError **err) { assert(ctx); assert(!err || *err == NULL); if (len == 0) return CRE_OK; if (!EVP_DigestUpdate(ctx->ctx, buf, len)) { g_set_error(err, ERR_DOMAIN, CRE_OPENSSL, "EVP_DigestUpdate() failed"); return CRE_OPENSSL; } return CRE_OK; } char * cr_checksum_final(cr_ChecksumCtx *ctx, GError **err) { unsigned int len; unsigned char raw_checksum[EVP_MAX_MD_SIZE]; char *checksum; assert(ctx); assert(!err || *err == NULL); if (!EVP_DigestFinal_ex(ctx->ctx, raw_checksum, &len)) { g_set_error(err, ERR_DOMAIN, CRE_OPENSSL, "EVP_DigestFinal_ex() failed"); EVP_MD_CTX_destroy(ctx->ctx); g_free(ctx); return NULL; } EVP_MD_CTX_destroy(ctx->ctx); checksum = g_malloc0(sizeof(char) * (len * 2 + 1)); for (size_t x = 0; x < len; x++) sprintf(checksum+(x*2), "%02x", raw_checksum[x]); g_free(ctx); return checksum; } createrepo_c-0.17.0/src/checksum.h000066400000000000000000000070721400672373200170060ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_CHECKSUM_H__ #define __C_CREATEREPOLIB_CHECKSUM_H__ #include #ifdef __cplusplus extern "C" { #endif /** \defgroup checksum API for checksum calculation. * \addtogroup checksum * @{ */ /** Checksum context. */ typedef struct _cr_ChecksumCtx cr_ChecksumCtx; /** * Enum of supported checksum types. * Note: SHA is just a "nickname" for the SHA1. This * is for the compatibility with original createrepo. */ typedef enum { CR_CHECKSUM_UNKNOWN, /*!< Unknown checksum */ // CR_CHECKSUM_MD2, /*!< MD2 checksum */ CR_CHECKSUM_MD5, /*!< MD5 checksum */ CR_CHECKSUM_SHA, /*!< SHA checksum */ CR_CHECKSUM_SHA1, /*!< SHA1 checksum */ CR_CHECKSUM_SHA224, /*!< SHA224 checksum */ CR_CHECKSUM_SHA256, /*!< SHA256 checksum */ CR_CHECKSUM_SHA384, /*!< SHA384 checksum */ CR_CHECKSUM_SHA512, /*!< SHA512 checksum */ CR_CHECKSUM_SENTINEL, /*!< sentinel of the list */ } cr_ChecksumType; /** Return checksum name. * @param type checksum type * @return constant null terminated string with checksum name * or NULL on error */ const char *cr_checksum_name_str(cr_ChecksumType type); /** Return checksum type. * @param name checksum name * @return checksum type */ cr_ChecksumType cr_checksum_type(const char *name); /** Compute file checksum. * @param filename filename * @param type type of checksum * @param err GError ** * @return malloced null terminated string with checksum * or NULL on error */ char *cr_checksum_file(const char *filename, cr_ChecksumType type, GError **err); /** Create new checksum context. * @param type Checksum algorithm of the new checksum context. * @param err GError ** * @return cr_ChecksumCtx or NULL on error */ cr_ChecksumCtx *cr_checksum_new(cr_ChecksumType type, GError **err); /** Feeds data into the checksum. * @param ctx Checksum context. * @param buf Pointer to the data. * @param len Length of the data. * @param err GError ** * @return cr_Error code. */ int cr_checksum_update(cr_ChecksumCtx *ctx, const void *buf, size_t len, GError **err); /** Finalize checksum calculation, return checksum string and frees * all checksum context resources. * @param ctx Checksum context. * @param err GError ** * @return Checksum string or NULL on error. */ char *cr_checksum_final(cr_ChecksumCtx *ctx, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_PARSER_H__ */ createrepo_c-0.17.0/src/cleanup.h000066400000000000000000000076561400672373200166430ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * * Copyright (C) 2012 Colin Walters . * Copyright (C) 2014 Richard Hughes * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ /* This file was taken from libhif (https://github.com/hughsie/libhif) */ #ifndef __CR_CLEANUP_H__ #define __CR_CLEANUP_H__ #include #include #include G_BEGIN_DECLS static void my_close(int fildes) { if (fildes < 0) return; close(fildes); } #define CR_DEFINE_CLEANUP_FUNCTION(Type, name, func) \ static inline void name (void *v) \ { \ func (*(Type*)v); \ } #define CR_DEFINE_CLEANUP_FUNCTION0(Type, name, func) \ static inline void name (void *v) \ { \ if (*(Type*)v) \ func (*(Type*)v); \ } #define CR_DEFINE_CLEANUP_FUNCTIONt(Type, name, func) \ static inline void name (void *v) \ { \ if (*(Type*)v) \ func (*(Type*)v, TRUE); \ } CR_DEFINE_CLEANUP_FUNCTION0(FILE*, cr_local_file_fclose, fclose) CR_DEFINE_CLEANUP_FUNCTION0(GArray*, cr_local_array_unref, g_array_unref) CR_DEFINE_CLEANUP_FUNCTION0(GChecksum*, cr_local_checksum_free, g_checksum_free) CR_DEFINE_CLEANUP_FUNCTION0(GDir*, cr_local_dir_close, g_dir_close) CR_DEFINE_CLEANUP_FUNCTION0(GError*, cr_local_free_error, g_error_free) CR_DEFINE_CLEANUP_FUNCTION0(GHashTable*, cr_local_hashtable_unref, g_hash_table_unref) CR_DEFINE_CLEANUP_FUNCTION0(GKeyFile*, cr_local_keyfile_free, g_key_file_free) #if GLIB_CHECK_VERSION(2, 32, 0) CR_DEFINE_CLEANUP_FUNCTION0(GKeyFile*, cr_local_keyfile_unref, g_key_file_unref) #endif CR_DEFINE_CLEANUP_FUNCTION0(GPtrArray*, cr_local_ptrarray_unref, g_ptr_array_unref) CR_DEFINE_CLEANUP_FUNCTION0(GTimer*, cr_local_destroy_timer, g_timer_destroy) CR_DEFINE_CLEANUP_FUNCTIONt(GString*, cr_local_free_string, g_string_free) CR_DEFINE_CLEANUP_FUNCTION(char**, cr_local_strfreev, g_strfreev) CR_DEFINE_CLEANUP_FUNCTION(GList*, cr_local_free_list, g_list_free) CR_DEFINE_CLEANUP_FUNCTION(void*, cr_local_free, g_free) CR_DEFINE_CLEANUP_FUNCTION(int, cr_local_file_close, my_close) #define _cleanup_array_unref_ __attribute__ ((cleanup(cr_local_array_unref))) #define _cleanup_checksum_free_ __attribute__ ((cleanup(cr_local_checksum_free))) #define _cleanup_dir_close_ __attribute__ ((cleanup(cr_local_dir_close))) #define _cleanup_error_free_ __attribute__ ((cleanup(cr_local_free_error))) #define _cleanup_file_close_ __attribute__ ((cleanup(cr_local_file_close))) #define _cleanup_file_fclose_ __attribute__ ((cleanup(cr_local_file_fclose))) #define _cleanup_free_ __attribute__ ((cleanup(cr_local_free))) #define _cleanup_hashtable_unref_ __attribute__ ((cleanup(cr_local_hashtable_unref))) #define _cleanup_keyfile_free_ __attribute__ ((cleanup(cr_local_keyfile_free))) #define _cleanup_keyfile_unref_ __attribute__ ((cleanup(cr_local_keyfile_unref))) #define _cleanup_list_free_ __attribute__ ((cleanup(cr_local_free_list))) #define _cleanup_ptrarray_unref_ __attribute__ ((cleanup(cr_local_ptrarray_unref))) #define _cleanup_string_free_ __attribute__ ((cleanup(cr_local_free_string))) #define _cleanup_strv_free_ __attribute__ ((cleanup(cr_local_strfreev))) #define _cleanup_timer_destroy_ __attribute__ ((cleanup(cr_local_destroy_timer))) G_END_DECLS #endif createrepo_c-0.17.0/src/cmd_parser.c000066400000000000000000000655071400672373200173250ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include "cmd_parser.h" #include "deltarpms.h" #include "error.h" #include "compression_wrapper.h" #include "misc.h" #include "cleanup.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define DEFAULT_CHECKSUM "sha256" #define DEFAULT_WORKERS 5 #define DEFAULT_UNIQUE_MD_FILENAMES TRUE #define DEFAULT_IGNORE_LOCK FALSE #define DEFAULT_LOCAL_SQLITE FALSE struct CmdOptions _cmd_options = { .changelog_limit = DEFAULT_CHANGELOG_LIMIT, .checksum = NULL, .workers = DEFAULT_WORKERS, .unique_md_filenames = DEFAULT_UNIQUE_MD_FILENAMES, .checksum_type = CR_CHECKSUM_SHA256, .retain_old = 0, .compression_type = CR_CW_UNKNOWN_COMPRESSION, .general_compression_type = CR_CW_UNKNOWN_COMPRESSION, .ignore_lock = DEFAULT_IGNORE_LOCK, .md_max_age = G_GINT64_CONSTANT(0), .cachedir = NULL, .local_sqlite = DEFAULT_LOCAL_SQLITE, .cut_dirs = 0, .location_prefix = NULL, .repomd_checksum = NULL, .deltas = FALSE, .oldpackagedirs = NULL, .num_deltas = 1, .max_delta_rpm_size = CR_DEFAULT_MAX_DELTA_RPM_SIZE, .checksum_cachedir = NULL, .repomd_checksum_type = CR_CHECKSUM_SHA256, .zck_compression = FALSE, .zck_dict_dir = NULL, .recycle_pkglist = FALSE, }; // Command line params static GOptionEntry cmd_entries[] = { { "version", 'V', 0, G_OPTION_ARG_NONE, &(_cmd_options.version), "Show program's version number and exit.", NULL}, { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(_cmd_options.quiet), "Run quietly.", NULL }, { "verbose", 'v', 0, G_OPTION_ARG_NONE, &(_cmd_options.verbose), "Run verbosely.", NULL }, { "excludes", 'x', 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.excludes), "Path patterns to exclude, can be specified multiple times.", "PACKAGE_NAME_GLOB" }, { "basedir", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.basedir), "Basedir for path to directories.", "BASEDIR" }, { "baseurl", 'u', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.location_base), "Optional base URL location for all files.", "URL" }, { "groupfile", 'g', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.groupfile), "Path to groupfile to include in metadata.", "GROUPFILE" }, { "checksum", 's', 0, G_OPTION_ARG_STRING, &(_cmd_options.checksum), "Choose the checksum type used in repomd.xml and for packages in the " "metadata. The default is now \"sha256\".", "CHECKSUM_TYPE" }, { "pretty", 'p', 0, G_OPTION_ARG_NONE, &(_cmd_options.pretty), "Make sure all xml generated is formatted (default)", NULL }, { "database", 'd', 0, G_OPTION_ARG_NONE, &(_cmd_options.database), "Generate sqlite databases for use with yum.", NULL }, { "no-database", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.no_database), "Do not generate sqlite databases in the repository.", NULL }, { "update", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.update), "If metadata already exists in the outputdir and an rpm is unchanged " "(based on file size and mtime) since the metadata was generated, reuse " "the existing metadata rather than recalculating it. In the case of a " "large repository with only a few new or modified rpms " "this can significantly reduce I/O and processing time.", NULL }, { "update-md-path", 0, 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.update_md_paths), "Existing metadata from this path are loaded and reused in addition to those " "present in the outputdir (works only with --update). Can be specified multiple times.", NULL }, { "skip-stat", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.skip_stat), "Skip the stat() call on a --update, assumes if the filename is the same " "then the file is still the same (only use this if you're fairly " "trusting or gullible).", NULL }, { "split", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.split), "Run in split media mode. Rather than pass a single directory, take a set of" "directories corresponding to different volumes in a media set. " "Meta data is created in the first given directory", NULL }, { "pkglist", 'i', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.pkglist), "Specify a text file which contains the complete list of files to " "include in the repository from the set found in the directory. File " "format is one package per line, no wildcards or globs.", "FILENAME" }, { "includepkg", 'n', 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.includepkg), "Specify pkgs to include on the command line. Takes urls as well as local paths.", "PACKAGE" }, { "outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.outputdir), "Optional output directory.", "URL" }, { "skip-symlinks", 'S', 0, G_OPTION_ARG_NONE, &(_cmd_options.skip_symlinks), "Ignore symlinks of packages.", NULL}, { "changelog-limit", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.changelog_limit), "Only import the last N changelog entries, from each rpm, into the metadata.", "NUM" }, { "unique-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.unique_md_filenames), "Include the file's checksum in the metadata filename, helps HTTP caching (default).", NULL }, { "simple-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.simple_md_filenames), "Do not include the file's checksum in the metadata filename.", NULL }, { "retain-old-md", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.retain_old), "Specify NUM to 0 to remove all repodata present in old repomd.xml or any other positive number to keep all old repodata. " "Use --compatibility flag to get the behavior of original createrepo: " "Keep around the latest (by timestamp) NUM copies of the old repodata (works only for primary, filelists, other and their DB variants).", "NUM" }, { "distro", 0, 0, G_OPTION_ARG_STRING_ARRAY, &(_cmd_options.distro_tags), "Distro tag and optional cpeid: --distro'cpeid,textname'.", "DISTRO" }, { "content", 0, 0, G_OPTION_ARG_STRING_ARRAY, &(_cmd_options.content_tags), "Tags for the content in the repository.", "CONTENT_TAGS" }, { "repo", 0, 0, G_OPTION_ARG_STRING_ARRAY, &(_cmd_options.repo_tags), "Tags to describe the repository itself.", "REPO_TAGS" }, { "revision", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.revision), "User-specified revision for this repository.", "REVISION" }, { "set-timestamp-to-revision", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.set_timestamp_to_revision), "Set timestamp fields in repomd.xml and last modification times of created repodata to a value given with --revision. " "This requires --revision to be a timestamp formatted in 'date +%s' format.", NULL }, { "read-pkgs-list", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.read_pkgs_list), "Output the paths to the pkgs actually read useful with --update.", "READ_PKGS_LIST" }, { "workers", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.workers), "Number of workers to spawn to read rpms.", NULL }, { "xz", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.xz_compression), "Use xz for repodata compression.", NULL }, { "compress-type", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.compress_type), "Which compression type to use.", "COMPRESSION_TYPE" }, { "general-compress-type", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.general_compress_type), "Which compression type to use (even for primary, filelists and other xml).", "COMPRESSION_TYPE" }, #ifdef WITH_ZCHUNK { "zck", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.zck_compression), "Generate zchunk files as well as the standard repodata.", NULL }, { "zck-dict-dir", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.zck_dict_dir), "Directory containing compression dictionaries for use by zchunk", "ZCK_DICT_DIR" }, #endif { "keep-all-metadata", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.keep_all_metadata), "Keep all additional metadata (not primary, filelists and other xml or sqlite files, " "nor their compressed variants) from source repository during update.", NULL }, { "compatibility", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.compatibility), "Enforce maximal compatibility with classical createrepo (Affects only: --retain-old-md).", NULL }, { "retain-old-md-by-age", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.retain_old_md_by_age), "During --update, remove all files in repodata/ which are older " "then the specified period of time. (e.g. '2h', '30d', ...). " "Available units (m - minutes, h - hours, d - days)", "AGE" }, { "cachedir", 'c', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.cachedir), "Set path to cache dir", "CACHEDIR." }, #ifdef CR_DELTA_RPM_SUPPORT { "deltas", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.deltas), "Tells createrepo to generate deltarpms and the delta metadata.", NULL }, { "oldpackagedirs", 0, 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.oldpackagedirs), "Paths to look for older pkgs to delta against. Can be specified " "multiple times.", "PATH" }, { "num-deltas", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.num_deltas), "The number of older versions to make deltas against. Defaults to 1.", "INT" }, { "max-delta-rpm-size", 0, 0, G_OPTION_ARG_INT64, &(_cmd_options.max_delta_rpm_size), "Max size of an rpm that to run deltarpm against (in bytes).", "MAX_DELTA_RPM_SIZE" }, #endif { "local-sqlite", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.local_sqlite), "Gen sqlite DBs locally (into a directory for temporary files). " "Sometimes, sqlite has a trouble to gen DBs on a NFS mount, " "use this option in such cases. " "This option could lead to a higher memory consumption " "if TMPDIR is set to /tmp or not set at all, because then the /tmp is " "used and /tmp dir is often a ramdisk.", NULL }, { "cut-dirs", 0, 0, G_OPTION_ARG_INT, &(_cmd_options.cut_dirs), "Ignore NUM of directory components in location_href during repodata " "generation", "NUM" }, { "location-prefix", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.location_prefix), "Append this prefix before location_href in output repodata", "PREFIX" }, { "repomd-checksum", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.repomd_checksum), "Checksum type to be used in repomd.xml", "CHECKSUM_TYPE"}, { "error-exit-val", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.error_exit_val), "Exit with retval 2 if there were any errors during processing", NULL }, { "recycle-pkglist", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.recycle_pkglist), "Read the list of packages from old metadata directory and re-use it. This " "option is only useful with --update (complements --pkglist and friends).", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }, }; static GOptionEntry expert_entries[] = { { "ignore-lock", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.ignore_lock), "Expert (risky) option: Ignore an existing .repodata/. " "(Remove the existing .repodata/ and create an empty new one " "to serve as a lock for other createrepo instances. For the repodata " "generation, a different temporary dir with the name in format " "\".repodata.time.microseconds.pid/\" will be used). " "NOTE: Use this option on your " "own risk! If two createrepos run simultaneously, then the state of the " "generated metadata is not guaranteed - it can be inconsistent and wrong.", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }, }; struct CmdOptions *parse_arguments(int *argc, char ***argv, GError **err) { gboolean ret; GOptionContext *context; GOptionGroup *group_expert; assert(!err || *err == NULL); context = g_option_context_new(""); g_option_context_set_summary(context, "Program that creates a repomd " "(xml-based rpm metadata) repository from a set of rpms."); g_option_context_add_main_entries(context, cmd_entries, NULL); group_expert = g_option_group_new("expert", "Expert (risky) options", "Expert (risky) options", NULL, NULL); g_option_group_add_entries(group_expert, expert_entries); g_option_context_add_group(context, group_expert); ret = g_option_context_parse(context, argc, argv, err); g_option_context_free(context); if (!ret) return NULL; return &(_cmd_options); } /** Convert string to compression type set an error if failed. * @param type_str String with compression type (e.g. "gz") * @param type Pointer to cr_CompressionType variable * @param err Err that will be set in case of error */ static gboolean check_and_set_compression_type(const char *type_str, cr_CompressionType *type, GError **err) { assert(!err || *err == NULL); _cleanup_string_free_ GString *compress_str = NULL; compress_str = g_string_ascii_down(g_string_new(type_str)); if (!strcmp(compress_str->str, "gz")) { *type = CR_CW_GZ_COMPRESSION; } else if (!strcmp(compress_str->str, "bz2")) { *type = CR_CW_BZ2_COMPRESSION; } else if (!strcmp(compress_str->str, "xz")) { *type = CR_CW_XZ_COMPRESSION; } else { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Unknown/Unsupported compression type \"%s\"", type_str); return FALSE; } return TRUE; } /** Convert a time period to seconds (gint64 value) * Format: "[0-9]+[mhd]?" * Units: m - minutes, h - hours, d - days, ... * @param timeperiod Time period * @param time Time period converted to gint64 will be stored here */ static gboolean parse_period_of_time(const gchar *timeperiod, gint64 *time, GError **err) { assert(!err || *err == NULL); gchar *endptr = NULL; gint64 val = g_ascii_strtoll(timeperiod, &endptr, 0); *time = G_GINT64_CONSTANT(0); // Check the state of the conversion if (val == 0 && endptr == timeperiod) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad time period \"%s\"", timeperiod); return FALSE; } if (val == G_MAXINT64) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Time period \"%s\" is too high", timeperiod); return FALSE; } if (val == G_MININT64) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Time period \"%s\" is too low", timeperiod); return FALSE; } if (!endptr || endptr[0] == '\0') // Secs *time = (gint64) val; else if (!strcmp(endptr, "m")) // Minutes *time = (gint64) val*60; else if (!strcmp(endptr, "h")) // Hours *time = (gint64) val*60*60; else if (!strcmp(endptr, "d")) // Days *time = (gint64) val*24*60*60; else { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad time unit \"%s\"", endptr); return FALSE; } return TRUE; } gboolean check_arguments(struct CmdOptions *options, const char *input_dir, GError **err) { assert(!err || *err == NULL); // Check outputdir if (options->outputdir && !g_file_test(options->outputdir, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Specified outputdir \"%s\" doesn't exists", options->outputdir); return FALSE; } // Check workers if ((options->workers < 1) || (options->workers > 100)) { g_warning("Wrong number of workers - Using 5 workers."); options->workers = DEFAULT_WORKERS; } // Check changelog_limit if ((options->changelog_limit < -1)) { g_warning("Wrong changelog limit \"%d\" - Using 10", options->changelog_limit); options->changelog_limit = DEFAULT_CHANGELOG_LIMIT; } // Check simple filenames if (options->simple_md_filenames) { options->unique_md_filenames = FALSE; } // Check and set checksum type if (options->checksum) { cr_ChecksumType type; type = cr_checksum_type(options->checksum); if (type == CR_CHECKSUM_UNKNOWN) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Unknown/Unsupported checksum type \"%s\"", options->checksum); return FALSE; } options->checksum_type = type; } // Check and set checksum type for repomd if (options->repomd_checksum) { cr_ChecksumType type; type = cr_checksum_type(options->repomd_checksum); if (type == CR_CHECKSUM_UNKNOWN) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Unknown/Unsupported checksum type \"%s\"", options->repomd_checksum); return FALSE; } options->repomd_checksum_type = type; } else { options->repomd_checksum_type = options->checksum_type; } // Check and set compression type if (options->compress_type) { if (!check_and_set_compression_type(options->compress_type, &(options->compression_type), err)) { return FALSE; } } //options --xz has priority over compress_type, but not over general_compress_type if (options->xz_compression) { options->compression_type = CR_CW_XZ_COMPRESSION; } // Check and set general compression type if (options->general_compress_type) { if (!check_and_set_compression_type(options->general_compress_type, &(options->general_compression_type), err)) { return FALSE; } } int x; // Process exclude glob masks x = 0; while (options->excludes && options->excludes[x] != NULL) { GPatternSpec *pattern = g_pattern_spec_new(options->excludes[x]); options->exclude_masks = g_slist_prepend(options->exclude_masks, (gpointer) pattern); x++; } // Process includepkgs x = 0; while (options->includepkg && options->includepkg[x] != NULL) { options->include_pkgs = g_slist_prepend(options->include_pkgs, (gpointer) g_strdup(options->includepkg[x])); x++; } // Check groupfile options->groupfile_fullpath = NULL; if (options->groupfile) { gboolean remote = FALSE; if (g_str_has_prefix(options->groupfile, "/")) { // Absolute local path options->groupfile_fullpath = g_strdup(options->groupfile); } else if (strstr(options->groupfile, "://")) { // Remote groupfile remote = TRUE; options->groupfile_fullpath = g_strdup(options->groupfile); } else { // Relative path (from intput_dir) options->groupfile_fullpath = g_strconcat(input_dir, options->groupfile, NULL); } if (!remote && !g_file_test(options->groupfile_fullpath, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "groupfile %s doesn't exists", options->groupfile_fullpath); return FALSE; } } // Process pkglist file if (options->pkglist) { if (!g_file_test(options->pkglist, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "pkglist file \"%s\" doesn't exists", options->pkglist); return FALSE; } else { char *content = NULL; GError *tmp_err = NULL; if (!g_file_get_contents(options->pkglist, &content, NULL, &tmp_err)) { g_warning("Error while reading pkglist file: %s", tmp_err->message); g_error_free(tmp_err); g_free(content); } else { x = 0; char **pkgs = g_strsplit(content, "\n", 0); while (pkgs && pkgs[x] != NULL) { if (strlen(pkgs[x])) { options->include_pkgs = g_slist_prepend(options->include_pkgs, (gpointer) g_strdup(pkgs[x])); } x++; } g_strfreev(pkgs); g_free(content); } } } // Process update_md_paths if (options->update_md_paths && !options->update) g_warning("Usage of --update-md-path without --update has no effect!"); x = 0; while (options->update_md_paths && options->update_md_paths[x] != NULL) { char *path = options->update_md_paths[x]; options->l_update_md_paths = g_slist_prepend(options->l_update_md_paths, (gpointer) path); x++; } // Check keep-all-metadata if (options->keep_all_metadata && !options->update) { g_warning("--keep-all-metadata has no effect (--update is not used)"); } // Process --distro tags x = 0; while (options->distro_tags && options->distro_tags[x]) { if (!strchr(options->distro_tags[x], ',')) { options->distro_cpeids = g_slist_append(options->distro_cpeids, NULL); options->distro_values = g_slist_append(options->distro_values, g_strdup(options->distro_tags[x])); x++; continue; } gchar **items = g_strsplit(options->distro_tags[x++], ",", 2); if (!items) continue; if (!items[0] || !items[1] || items[1][0] == '\0') { g_strfreev(items); continue; } if (items[0][0] != '\0') options->distro_cpeids = g_slist_append(options->distro_cpeids, g_strdup(items[0])); else options->distro_cpeids = g_slist_append(options->distro_cpeids, NULL); options->distro_values = g_slist_append(options->distro_values, g_strdup(items[1])); g_strfreev(items); } // Check retain-old-md-by-age if (options->retain_old_md_by_age) { if (options->retain_old) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "--retain-old-md-by-age cannot be combined " "with --retain-old-md"); return FALSE; } // Parse argument if (!parse_period_of_time(options->retain_old_md_by_age, &options->md_max_age, err)) return FALSE; } // check if --revision is numeric, when --set-timestamp-to-revision is given if (options->set_timestamp_to_revision) { char *endptr; gint64 revision = strtoll(options->revision, &endptr, 0); if (endptr == options->revision || *endptr != '\0') { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "--set-timestamp-to-revision require numeric value for --revision"); return FALSE; } if ((errno == ERANGE && revision == LLONG_MAX) || revision < 0) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "--revision value out of range"); return FALSE; } } // Check oldpackagedirs x = 0; while (options->oldpackagedirs && options->oldpackagedirs[x]) { char *path = options->oldpackagedirs[x]; options->oldpackagedirs_paths = g_slist_prepend( options->oldpackagedirs_paths, (gpointer) path); x++; } // Check cut_dirs if (options->cut_dirs < 0) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "--cur-dirs value must be positive integer"); return FALSE; } // Zchunk options if (options->zck_dict_dir && !options->zck_compression) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Cannot use --zck-dict-dir without setting --zck"); return FALSE; } if (options->zck_dict_dir) options->zck_dict_dir = cr_normalize_dir_path(options->zck_dict_dir); return TRUE; } void free_options(struct CmdOptions *options) { g_free(options->basedir); g_free(options->location_base); g_free(options->outputdir); g_free(options->pkglist); g_free(options->checksum); g_free(options->compress_type); g_free(options->groupfile); g_free(options->groupfile_fullpath); g_free(options->revision); g_free(options->retain_old_md_by_age); g_free(options->cachedir); g_free(options->checksum_cachedir); g_strfreev(options->excludes); g_strfreev(options->includepkg); g_strfreev(options->distro_tags); g_strfreev(options->content_tags); g_strfreev(options->repo_tags); g_strfreev(options->oldpackagedirs); cr_slist_free_full(options->include_pkgs, g_free); cr_slist_free_full(options->exclude_masks, (GDestroyNotify) g_pattern_spec_free); cr_slist_free_full(options->l_update_md_paths, g_free); cr_slist_free_full(options->distro_cpeids, g_free); cr_slist_free_full(options->distro_values, g_free); cr_slist_free_full(options->modulemd_metadata, g_free); g_slist_free(options->oldpackagedirs_paths); } createrepo_c-0.17.0/src/cmd_parser.h000066400000000000000000000206101400672373200173140ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_CMD_PARSER_H__ #define __C_CREATEREPOLIB_CMD_PARSER_H__ #include #include "checksum.h" #include "compression_wrapper.h" #define DEFAULT_CHANGELOG_LIMIT 10 /** * Command line options */ struct CmdOptions { /* Items filled by cmd option parser */ char *basedir; /*!< basedir for path to directories */ char *location_base; /*!< base URL location */ char *outputdir; /*!< output directory */ char **excludes; /*!< list of file globs to exclude */ char *pkglist; /*!< file with files to include */ char **includepkg; /*!< list of files to include */ char *groupfile; /*!< groupfile path or URL */ gboolean quiet; /*!< quiet mode */ gboolean verbose; /*!< verbosely more than usual (debug) */ gboolean update; /*!< update repo if metadata already exists */ gboolean pretty; /*!< generate pretty xml (just for compatibility) */ char **update_md_paths; /*!< list of paths to repositories which should be used for update */ gboolean skip_stat; /*!< skip stat() call during --update */ gboolean split; /*!< generate split media */ gboolean version; /*!< print program version */ gboolean database; /*!< create sqlite database metadata */ gboolean no_database; /*!< do not create database */ char *checksum; /*!< type of checksum */ char *compress_type; /*!< which compression type to use */ char *general_compress_type;/*!< which compression type to use (even for primary, filelists and other xml) */ gboolean skip_symlinks; /*!< ignore symlinks of packages */ gint changelog_limit; /*!< number of changelog messages in other.(xml|sqlite) */ gboolean unique_md_filenames; /*!< include the file checksums in the filenames */ gboolean simple_md_filenames; /*!< simple filenames (names without checksums) */ gint retain_old; /*!< keep latest N copies of the old repodata */ char **distro_tags; /*!< distro tag and optional cpeid */ char **content_tags; /*!< tags for the content in the repository */ char **repo_tags; /*!< tags to describe the repo_tagsitory itself */ char *revision; /*!< user-specified revision */ gboolean set_timestamp_to_revision; /*!< use --revision instead of current time for timestamps */ char *read_pkgs_list; /*!< output the paths to pkgs actually read */ gint workers; /*!< number of threads to spawn */ gboolean xz_compression; /*!< use xz for repodata compression */ gboolean zck_compression; /*!< generate zchunk files */ char *zck_dict_dir; /*!< directory with zchunk dictionaries */ gboolean keep_all_metadata; /*!< keep groupfile and updateinfo from source repo during update */ gboolean ignore_lock; /*!< Ignore existing .repodata/ - remove it, create the new one (empty) to serve as a lock and use a .repodata.date.pid for data generation. */ gboolean compatibility; /*!< Enforce maximal compatibility with createrepo. I.e. mimics some dump behavior like perseve old comps file(s) during update etc.*/ char *retain_old_md_by_age; /*!< Remove all files in repodata/ older then specified period of time, during --update. Value examples: "360" (360 sec), "5d" (5 days), .. Available units: (m - minutes, h - hours, d - days) */ char *cachedir; /*!< Cache dir for checksums */ gboolean deltas; /*!< Is delta generation enabled? */ char **oldpackagedirs; /*!< Paths to look for older pks to delta agains */ gint num_deltas; /*!< Number of older version to make deltas against */ gint64 max_delta_rpm_size; /*!< Max size of an rpm that to run deltarpm against */ gboolean local_sqlite; /*!< Gen sqlite locally into a directory for temporary files. For situations when sqlite has a trouble to gen DBs on NFS mounts. */ gint cut_dirs; /*!< Ignore *num* of directory components during repodata generation in location href value. */ gchar *location_prefix; /*!< Append this prefix into location_href during repodata generation. */ gchar *repomd_checksum; /*!< Checksum type for entries in repomd.xml */ gboolean error_exit_val; /*!< exit 2 on processing errors */ /* Items filled by check_arguments() */ char *groupfile_fullpath; /*!< full path to groupfile */ GSList *exclude_masks; /*!< list of exclude masks (list of GPatternSpec pointers) */ GSList *include_pkgs; /*!< list of packages to include (build from includepkg options and pkglist file) */ GSList *l_update_md_paths; /*!< list of repo from update_md_paths (remote repo are downloaded) */ GSList *distro_cpeids; /*!< CPEIDs from --distro params */ GSList *distro_values; /*!< values from --distro params */ cr_ChecksumType checksum_type; /*!< checksum type */ cr_ChecksumType repomd_checksum_type; /*!< checksum type */ cr_CompressionType compression_type; /*!< compression type */ cr_CompressionType general_compression_type; /*!< compression type */ gint64 md_max_age; /*!< Max age of files in repodata/. Older files will be removed during --update. Filled if --retain-old-md-by-age is used */ char *checksum_cachedir; /*!< Path to cachedir */ GSList *oldpackagedirs_paths; /*!< paths to look for older pkgs to delta against */ GSList *modulemd_metadata; /*!< paths to all modulemd metadata */ gboolean recycle_pkglist; }; /** * Parses commandline arguments. * @param argc pointer to argc * @param argv pointer to argv * @return CmdOptions filled by command line arguments */ struct CmdOptions * parse_arguments(int *argc, char ***argv, GError **err); /** * Performs some checks of arguments and fill some other items. * in the CmdOptions structure. */ gboolean check_arguments(struct CmdOptions *options, const char *inputdir, GError **err); /** * Frees CmdOptions. * @param options pointer to struct with command line options */ void free_options(struct CmdOptions *options); #endif /* __C_CREATEREPOLIB_CMD_PARSER_H__ */ createrepo_c-0.17.0/src/compression_wrapper.c000066400000000000000000001532021400672373200212750ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef WITH_ZCHUNK #include #endif // WITH_ZCHUNK #include "error.h" #include "compression_wrapper.h" #define ERR_DOMAIN CREATEREPO_C_ERROR /* #define Z_CR_CW_NO_COMPRESSION 0 #define Z_BEST_SPEED 1 #define Z_BEST_COMPRESSION 9 #define Z_DEFAULT_COMPRESSION (-1) */ #define CR_CW_GZ_COMPRESSION_LEVEL Z_DEFAULT_COMPRESSION /* #define Z_FILTERED 1 #define Z_HUFFMAN_ONLY 2 #define Z_RLE 3 #define Z_FIXED 4 #define Z_DEFAULT_STRATEGY 0 */ #define GZ_STRATEGY Z_DEFAULT_STRATEGY #define GZ_BUFFER_SIZE (1024*128) #define BZ2_VERBOSITY 0 #define BZ2_BLOCKSIZE100K 5 // Higher gives better compression but takes // more memory #define BZ2_WORK_FACTOR 0 // 0 == default == 30 (available 0-250) #define BZ2_USE_LESS_MEMORY 0 #define BZ2_SKIP_FFLUSH 0 /* number 0..9 or LZMA_PRESET_DEFAULT default preset LZMA_PRESET_EXTREME significantly slower, improving the compression ratio marginally */ #define CR_CW_XZ_COMPRESSION_LEVEL 5 /* LZMA_CHECK_NONE LZMA_CHECK_CRC32 LZMA_CHECK_CRC64 LZMA_CHECK_SHA256 */ #define XZ_CHECK LZMA_CHECK_CRC32 /* UINT64_MAX effectively disable the limiter */ #define XZ_MEMORY_USAGE_LIMIT UINT64_MAX #define XZ_DECODER_FLAGS 0 #define XZ_BUFFER_SIZE (1024*32) #if ZLIB_VERNUM < 0x1240 // XXX: Zlib has gzbuffer since 1.2.4 #define gzbuffer(a,b) 0 #endif cr_ContentStat * cr_contentstat_new(cr_ChecksumType type, GError **err) { cr_ContentStat *cstat; assert(!err || *err == NULL); cstat = g_malloc0(sizeof(cr_ContentStat)); cstat->checksum_type = type; return cstat; } void cr_contentstat_free(cr_ContentStat *cstat, GError **err) { assert(!err || *err == NULL); if (!cstat) return; g_free(cstat->hdr_checksum); g_free(cstat->checksum); g_free(cstat); } typedef struct { lzma_stream stream; FILE *file; unsigned char buffer[XZ_BUFFER_SIZE]; } XzFile; cr_CompressionType cr_detect_compression(const char *filename, GError **err) { cr_CompressionType type = CR_CW_UNKNOWN_COMPRESSION; assert(filename); assert(!err || *err == NULL); if (!g_file_test(filename, G_FILE_TEST_IS_REGULAR)) { g_debug("%s: File %s doesn't exists or not a regular file", __func__, filename); g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "File %s doesn't exists or not a regular file", filename); return CR_CW_UNKNOWN_COMPRESSION; } // Try determine compression type via filename suffix if (g_str_has_suffix(filename, ".gz") || g_str_has_suffix(filename, ".gzip") || g_str_has_suffix(filename, ".gunzip")) { return CR_CW_GZ_COMPRESSION; } else if (g_str_has_suffix(filename, ".bz2") || g_str_has_suffix(filename, ".bzip2")) { return CR_CW_BZ2_COMPRESSION; } else if (g_str_has_suffix(filename, ".xz")) { return CR_CW_XZ_COMPRESSION; } else if (g_str_has_suffix(filename, ".zck")) { return CR_CW_ZCK_COMPRESSION; } else if (g_str_has_suffix(filename, ".xml") || g_str_has_suffix(filename, ".tar") || g_str_has_suffix(filename, ".yaml") || g_str_has_suffix(filename, ".sqlite")) { return CR_CW_NO_COMPRESSION; } // No success? Let's get hardcore... (Use magic bytes) magic_t myt = magic_open(MAGIC_MIME | MAGIC_SYMLINK); if (myt == NULL) { g_set_error(err, ERR_DOMAIN, CRE_MAGIC, "magic_open() failed: Cannot allocate the magic cookie"); return CR_CW_UNKNOWN_COMPRESSION; } if (magic_load(myt, NULL) == -1) { g_set_error(err, ERR_DOMAIN, CRE_MAGIC, "magic_load() failed: %s", magic_error(myt)); return CR_CW_UNKNOWN_COMPRESSION; } const char *mime_type = magic_file(myt, filename); if (mime_type) { g_debug("%s: Detected mime type: %s (%s)", __func__, mime_type, filename); if (g_str_has_prefix(mime_type, "application/x-gzip") || g_str_has_prefix(mime_type, "application/gzip") || g_str_has_prefix(mime_type, "application/gzip-compressed") || g_str_has_prefix(mime_type, "application/gzipped") || g_str_has_prefix(mime_type, "application/x-gzip-compressed") || g_str_has_prefix(mime_type, "application/x-compress") || g_str_has_prefix(mime_type, "application/x-gunzip") || g_str_has_prefix(mime_type, "multipart/x-gzip")) { type = CR_CW_GZ_COMPRESSION; } else if (g_str_has_prefix(mime_type, "application/x-bzip2") || g_str_has_prefix(mime_type, "application/x-bz2") || g_str_has_prefix(mime_type, "application/bzip2") || g_str_has_prefix(mime_type, "application/bz2")) { type = CR_CW_BZ2_COMPRESSION; } else if (g_str_has_prefix(mime_type, "application/x-xz")) { type = CR_CW_XZ_COMPRESSION; } else if (g_str_has_prefix(mime_type, "text/plain") || g_str_has_prefix(mime_type, "text/xml") || g_str_has_prefix(mime_type, "application/xml") || g_str_has_prefix(mime_type, "application/x-xml") || g_str_has_prefix(mime_type, "application/x-empty") || g_str_has_prefix(mime_type, "application/x-tar") || g_str_has_prefix(mime_type, "inode/x-empty")) { type = CR_CW_NO_COMPRESSION; } } else { g_debug("%s: Mime type not detected! (%s): %s", __func__, filename, magic_error(myt)); g_set_error(err, ERR_DOMAIN, CRE_MAGIC, "mime_type() detection failed: %s", magic_error(myt)); magic_close(myt); return CR_CW_UNKNOWN_COMPRESSION; } // Xml detection if (type == CR_CW_UNKNOWN_COMPRESSION && g_str_has_suffix(filename, ".xml")) type = CR_CW_NO_COMPRESSION; magic_close(myt); return type; } cr_CompressionType cr_compression_type(const char *name) { if (!name) return CR_CW_UNKNOWN_COMPRESSION; int type = CR_CW_UNKNOWN_COMPRESSION; gchar *name_lower = g_strdup(name); for (gchar *c = name_lower; *c; c++) *c = tolower(*c); if (!g_strcmp0(name_lower, "gz") || !g_strcmp0(name_lower, "gzip")) type = CR_CW_GZ_COMPRESSION; if (!g_strcmp0(name_lower, "bz2") || !g_strcmp0(name_lower, "bzip2")) type = CR_CW_BZ2_COMPRESSION; if (!g_strcmp0(name_lower, "xz")) type = CR_CW_XZ_COMPRESSION; if (!g_strcmp0(name_lower, "zck")) type = CR_CW_ZCK_COMPRESSION; g_free(name_lower); return type; } const char * cr_compression_suffix(cr_CompressionType comtype) { switch (comtype) { case CR_CW_GZ_COMPRESSION: return ".gz"; case CR_CW_BZ2_COMPRESSION: return ".bz2"; case CR_CW_XZ_COMPRESSION: return ".xz"; case CR_CW_ZCK_COMPRESSION: return ".zck"; default: return NULL; } } static const char * cr_gz_strerror(gzFile f) { int errnum; const char *msg = gzerror(f, &errnum); if (errnum == Z_ERRNO) msg = g_strerror(errno); return msg; } #ifdef WITH_ZCHUNK cr_ChecksumType cr_cktype_from_zck(zckCtx *zck, GError **err) { int cktype = zck_get_full_hash_type(zck); if (cktype < 0) { g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Unable to read hash from zchunk file"); return CR_CHECKSUM_UNKNOWN; } if (cktype == ZCK_HASH_SHA1) return CR_CHECKSUM_SHA1; else if (cktype == ZCK_HASH_SHA256) return CR_CHECKSUM_SHA256; else { const char *ckname = zck_hash_name_from_type(cktype); if (ckname == NULL) ckname = "Unknown"; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Unknown zchunk checksum type: %s", ckname); return CR_CHECKSUM_UNKNOWN; } } #endif // WITH_ZCHUNK CR_FILE * cr_sopen(const char *filename, cr_OpenMode mode, cr_CompressionType comtype, cr_ContentStat *stat, GError **err) { CR_FILE *file = NULL; cr_CompressionType type = comtype; GError *tmp_err = NULL; assert(filename); assert(mode == CR_CW_MODE_READ || mode == CR_CW_MODE_WRITE); assert(mode < CR_CW_MODE_SENTINEL); assert(comtype < CR_CW_COMPRESSION_SENTINEL); assert(!err || *err == NULL); if (mode == CR_CW_MODE_WRITE) { if (comtype == CR_CW_AUTO_DETECT_COMPRESSION) { g_debug("%s: CR_CW_AUTO_DETECT_COMPRESSION cannot be used if " "mode is CR_CW_MODE_WRITE", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "CR_CW_AUTO_DETECT_COMPRESSION cannot be used if " "mode is CR_CW_MODE_WRITE"); return NULL; } if (comtype == CR_CW_UNKNOWN_COMPRESSION) { g_debug("%s: CR_CW_UNKNOWN_COMPRESSION cannot be used if mode" " is CR_CW_MODE_WRITE", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "CR_CW_UNKNOWN_COMPRESSION cannot be used if mode " "is CR_CW_MODE_WRITE"); return NULL; } } if (comtype == CR_CW_AUTO_DETECT_COMPRESSION) { // Try to detect type of compression type = cr_detect_compression(filename, &tmp_err); if (tmp_err) { // Error while detection g_propagate_error(err, tmp_err); return NULL; } } if (type == CR_CW_UNKNOWN_COMPRESSION) { // Detection without error but compression type is unknown g_debug("%s: Cannot detect compression type", __func__); g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCOMPRESSION, "Cannot detect compression type"); return NULL; } // Open file const char *mode_str = (mode == CR_CW_MODE_WRITE) ? "wb" : "rb"; file = g_malloc0(sizeof(CR_FILE)); file->mode = mode; file->type = type; file->INNERFILE = NULL; switch (type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- mode_str = (mode == CR_CW_MODE_WRITE) ? "w" : "r"; file->FILE = (void *) fopen(filename, mode_str); if (!file->FILE) g_set_error(err, ERR_DOMAIN, CRE_IO, "fopen(): %s", g_strerror(errno)); break; case (CR_CW_GZ_COMPRESSION): // --------------------------------------- file->FILE = (void *) gzopen(filename, mode_str); if (!file->FILE) { g_set_error(err, ERR_DOMAIN, CRE_GZ, "gzopen(): %s", g_strerror(errno)); break; } if (mode == CR_CW_MODE_WRITE) gzsetparams((gzFile) file->FILE, CR_CW_GZ_COMPRESSION_LEVEL, GZ_STRATEGY); if (gzbuffer((gzFile) file->FILE, GZ_BUFFER_SIZE) == -1) { g_debug("%s: gzbuffer() call failed", __func__); g_set_error(err, ERR_DOMAIN, CRE_GZ, "gzbuffer() call failed"); } break; case (CR_CW_BZ2_COMPRESSION): { // ------------------------------------ FILE *f = fopen(filename, mode_str); file->INNERFILE = f; int bzerror; if (!f) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fopen(): %s", g_strerror(errno)); break; } if (mode == CR_CW_MODE_WRITE) { file->FILE = (void *) BZ2_bzWriteOpen(&bzerror, f, BZ2_BLOCKSIZE100K, BZ2_VERBOSITY, BZ2_WORK_FACTOR); } else { file->FILE = (void *) BZ2_bzReadOpen(&bzerror, f, BZ2_VERBOSITY, BZ2_USE_LESS_MEMORY, NULL, 0); } if (bzerror != BZ_OK) { const char *err_msg; fclose(f); switch (bzerror) { case BZ_CONFIG_ERROR: err_msg = "library has been mis-compiled"; break; case BZ_PARAM_ERROR: err_msg = "bad function params"; break; case BZ_IO_ERROR: err_msg = "ferror(f) is nonzero"; break; case BZ_MEM_ERROR: err_msg = "insufficient memory is available"; break; default: err_msg = "other error"; } g_set_error(err, ERR_DOMAIN, CRE_BZ2, "Bz2 error: %s", err_msg); } break; } case (CR_CW_XZ_COMPRESSION): { // ------------------------------------- int ret; XzFile *xz_file = g_malloc(sizeof(XzFile)); lzma_stream *stream = &(xz_file->stream); memset(stream, 0, sizeof(lzma_stream)); /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ XXX: This part is a little tricky. Because in the default initializer LZMA_STREAM_INIT are some items NULL and (according to C standard) NULL may have different internal representation than zero. This should not be a problem nowadays. */ // Prepare coder/decoder if (mode == CR_CW_MODE_WRITE) { #ifdef ENABLE_THREADED_XZ_ENCODER // The threaded encoder takes the options as pointer to // a lzma_mt structure. lzma_mt mt = { // No flags are needed. .flags = 0, // Let liblzma determine a sane block size. .block_size = 0, // Use no timeout for lzma_code() calls by setting timeout // to zero. That is, sometimes lzma_code() might block for // a long time (from several seconds to even minutes). // If this is not OK, for example due to progress indicator // needing updates, specify a timeout in milliseconds here. // See the documentation of lzma_mt in lzma/container.h for // information how to choose a reasonable timeout. .timeout = 0, // Use the default preset (6) for LZMA2. // To use a preset, filters must be set to NULL. .preset = LZMA_PRESET_DEFAULT, .filters = NULL, // Integrity checking. .check = XZ_CHECK, }; // Detect how many threads the CPU supports. mt.threads = lzma_cputhreads(); // If the number of CPU cores/threads cannot be detected, // use one thread. if (mt.threads == 0) mt.threads = 1; // If the number of CPU cores/threads exceeds threads_max, // limit the number of threads to keep memory usage lower. const uint32_t threads_max = 2; if (mt.threads > threads_max) mt.threads = threads_max; if (mt.threads > 1) // Initialize the threaded encoder ret = lzma_stream_encoder_mt(stream, &mt); else #endif // Initialize the single-threaded encoder ret = lzma_easy_encoder(stream, CR_CW_XZ_COMPRESSION_LEVEL, XZ_CHECK); } else { ret = lzma_auto_decoder(stream, XZ_MEMORY_USAGE_LIMIT, XZ_DECODER_FLAGS); } if (ret != LZMA_OK) { const char *err_msg; switch (ret) { case LZMA_MEM_ERROR: err_msg = "Cannot allocate memory"; break; case LZMA_OPTIONS_ERROR: err_msg = "Unsupported flags (options)"; break; case LZMA_PROG_ERROR: err_msg = "One or more of the parameters " "have values that will never be valid. " "(Possibly a bug)"; break; case LZMA_UNSUPPORTED_CHECK: err_msg = "Specified integrity check is not supported"; break; default: err_msg = "Unknown error"; } g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ error (%d): %s", ret, err_msg); g_free((void *) xz_file); break; } // Open input/output file FILE *f = fopen(filename, mode_str); if (!f) { g_set_error(err, ERR_DOMAIN, CRE_XZ, "fopen(): %s", g_strerror(errno)); lzma_end(&(xz_file->stream)); g_free((void *) xz_file); break; } xz_file->file = f; file->FILE = (void *) xz_file; break; } case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------- #ifdef WITH_ZCHUNK FILE *f = fopen(filename, mode_str); if (!f) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fopen(): %s", g_strerror(errno)); break; } file->INNERFILE = f; int fd = fileno(f); file->FILE = (void *) zck_create(); zckCtx *zck = file->FILE; if (mode == CR_CW_MODE_WRITE) { if (!file->FILE || !zck_init_write(zck, fd) || !zck_set_ioption(zck, ZCK_MANUAL_CHUNK, 1)) { zck_set_log_fd(STDOUT_FILENO); g_set_error(err, ERR_DOMAIN, CRE_IO, "%s", zck_get_error(zck)); g_free(file); break; } } else { if (!file->FILE || !zck_init_read(zck, fd)) { g_set_error(err, ERR_DOMAIN, CRE_IO, "%s", zck_get_error(zck)); g_free(file); break; } } break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- break; } if (!file->FILE) { // File is not open -> cleanup if (err && *err == NULL) g_set_error(err, ERR_DOMAIN, CRE_XZ, "Unknown error while opening: %s", filename); g_free(file); return NULL; } if (stat) { file->stat = stat; if (stat->checksum_type == CR_CHECKSUM_UNKNOWN) { file->checksum_ctx = NULL; } else { file->checksum_ctx = cr_checksum_new(stat->checksum_type, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); cr_close(file, NULL); return NULL; } } #ifdef WITH_ZCHUNK /* Fill zchunk header_stat with header information */ if (mode == CR_CW_MODE_READ && type == CR_CW_ZCK_COMPRESSION) { zckCtx *zck = (zckCtx *)file->FILE; cr_ChecksumType cktype = cr_cktype_from_zck(zck, err); if (cktype == CR_CHECKSUM_UNKNOWN) { /* Error is already set in cr_cktype_from_zck */ g_free(file); return NULL; } file->stat->hdr_checksum_type = cktype; file->stat->hdr_checksum = zck_get_header_digest(zck); file->stat->hdr_size = zck_get_header_length(zck); if (*err != NULL || file->stat->hdr_checksum == NULL || file->stat->hdr_size < 0) { g_free(file); return NULL; } } #endif // WITH_ZCHUNK } assert(!err || (!file && *err != NULL) || (file && *err == NULL)); return file; } int cr_set_dict(CR_FILE *cr_file, const void *dict, unsigned int len, GError **err) { int ret = CRE_OK; assert(!err || *err == NULL); if (len == 0) return CRE_OK; switch (cr_file->type) { case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *)cr_file->FILE; size_t wlen = (size_t)len; if (!zck_set_soption(zck, ZCK_COMP_DICT, dict, wlen)) { ret = CRE_ERROR; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Error setting dict"); break; } break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: { // --------------------------------------------------------- ret = CRE_ERROR; g_set_error(err, ERR_DOMAIN, CRE_ERROR, "Compression format doesn't support dict"); break; } } return ret; } int cr_close(CR_FILE *cr_file, GError **err) { int ret = CRE_ERROR; int rc; assert(!err || *err == NULL); if (!cr_file) return CRE_OK; switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- if (fclose((FILE *) cr_file->FILE) == 0) { ret = CRE_OK; } else { ret = CRE_IO; g_set_error(err, ERR_DOMAIN, CRE_IO, "fclose(): %s", g_strerror(errno)); } break; case (CR_CW_GZ_COMPRESSION): // --------------------------------------- rc = gzclose((gzFile) cr_file->FILE); if (rc == Z_OK) ret = CRE_OK; else { const char *err_msg; switch (rc) { case Z_STREAM_ERROR: err_msg = "file is not valid"; break; case Z_ERRNO: err_msg = "file operation error"; break; case Z_MEM_ERROR: err_msg = "if out of memory"; break; case Z_BUF_ERROR: err_msg = "last read ended in the middle of a stream"; break; default: err_msg = "error"; } ret = CRE_GZ; g_set_error(err, ERR_DOMAIN, CRE_GZ, "gzclose(): %s", err_msg); } break; case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- if (cr_file->mode == CR_CW_MODE_READ) BZ2_bzReadClose(&rc, (BZFILE *) cr_file->FILE); else BZ2_bzWriteClose(&rc, (BZFILE *) cr_file->FILE, BZ2_SKIP_FFLUSH, NULL, NULL); fclose(cr_file->INNERFILE); if (rc == BZ_OK) { ret = CRE_OK; } else { const char *err_msg; switch (rc) { case BZ_SEQUENCE_ERROR: // This really should not happen err_msg = "file was opened with BZ2_bzReadOpen"; break; case BZ_IO_ERROR: err_msg = "error writing the compressed file"; break; default: err_msg = "other error"; } ret = CRE_BZ2; g_set_error(err, ERR_DOMAIN, CRE_BZ2, "Bz2 error: %s", err_msg); } break; case (CR_CW_XZ_COMPRESSION): { // ------------------------------------- XzFile *xz_file = (XzFile *) cr_file->FILE; lzma_stream *stream = &(xz_file->stream); if (cr_file->mode == CR_CW_MODE_WRITE) { // Write out rest of buffer while (1) { stream->next_out = (uint8_t*) xz_file->buffer; stream->avail_out = XZ_BUFFER_SIZE; rc = lzma_code(stream, LZMA_FINISH); if (rc != LZMA_OK && rc != LZMA_STREAM_END) { // Error while coding const char *err_msg; switch (rc) { case LZMA_MEM_ERROR: err_msg = "Memory allocation failed"; break; case LZMA_DATA_ERROR: // This error is returned if the compressed // or uncompressed size get near 8 EiB // (2^63 bytes) because that's where the .xz // file format size limits currently are. // That is, the possibility of this error // is mostly theoretical unless you are doing // something very unusual. // // Note that strm->total_in and strm->total_out // have nothing to do with this error. Changing // those variables won't increase or decrease // the chance of getting this error. err_msg = "File size limits exceeded"; break; default: // This is most likely LZMA_PROG_ERROR. err_msg = "Unknown error, possibly a bug"; break; } ret = CRE_XZ; g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: lzma_code() error (%d): %s", rc, err_msg); break; } size_t olen = XZ_BUFFER_SIZE - stream->avail_out; if (fwrite(xz_file->buffer, 1, olen, xz_file->file) != olen) { // Error while writing ret = CRE_XZ; g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: fwrite() error: %s", g_strerror(errno)); break; } if (rc == LZMA_STREAM_END) { // Everything all right ret = CRE_OK; break; } } } else { ret = CRE_OK; } fclose(xz_file->file); lzma_end(stream); g_free(stream); break; } case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; ret = CRE_OK; if (cr_file->mode == CR_CW_MODE_WRITE) { if (zck_end_chunk(zck) < 0) { ret = CRE_ZCK; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Unable to end final chunk: %s", zck_get_error(zck)); } } if (!zck_close(zck)) { ret = CRE_ZCK; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Unable to close zchunk file: %s", zck_get_error(zck)); } cr_ChecksumType cktype = cr_cktype_from_zck(zck, err); if (cktype == CR_CHECKSUM_UNKNOWN) { /* Error is already set in cr_cktype_from_zck */ break; } if (cr_file->stat) { cr_file->stat->hdr_checksum_type = cktype; cr_file->stat->hdr_checksum = zck_get_header_digest(zck); cr_file->stat->hdr_size = zck_get_header_length(zck); if ((err && *err) || cr_file->stat->hdr_checksum == NULL || cr_file->stat->hdr_size < 0) { ret = CRE_ZCK; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Unable to get zchunk header information: %s", zck_get_error(zck)); break; } } zck_free(&zck); fclose(cr_file->INNERFILE); break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- ret = CRE_BADARG; g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); break; } if (cr_file->stat) { g_free(cr_file->stat->checksum); if (cr_file->checksum_ctx) cr_file->stat->checksum = cr_checksum_final(cr_file->checksum_ctx, NULL); else cr_file->stat->checksum = NULL; } g_free(cr_file); assert(!err || (ret != CRE_OK && *err != NULL) || (ret == CRE_OK && *err == NULL)); return ret; } int cr_read(CR_FILE *cr_file, void *buffer, unsigned int len, GError **err) { int bzerror; int ret = CR_CW_ERR; assert(cr_file); assert(buffer); assert(!err || *err == NULL); if (cr_file->mode != CR_CW_MODE_READ) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in read mode"); return CR_CW_ERR; } switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- ret = fread(buffer, 1, len, (FILE *) cr_file->FILE); if ((ret != (int) len) && !feof((FILE *) cr_file->FILE)) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_IO, "fread(): %s", g_strerror(errno)); } break; case (CR_CW_GZ_COMPRESSION): // --------------------------------------- ret = gzread((gzFile) cr_file->FILE, buffer, len); if (ret == -1) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_GZ, "fread(): %s", cr_gz_strerror((gzFile) cr_file->FILE)); } break; case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- ret = BZ2_bzRead(&bzerror, (BZFILE *) cr_file->FILE, buffer, len); if (!ret && bzerror == BZ_SEQUENCE_ERROR) // Next read after BZ_STREAM_END (EOF) return 0; if (bzerror != BZ_OK && bzerror != BZ_STREAM_END) { const char *err_msg; ret = CR_CW_ERR; switch (bzerror) { case BZ_PARAM_ERROR: // This should not happend err_msg = "bad function params!"; break; case BZ_SEQUENCE_ERROR: // This should not happend err_msg = "file was opened with BZ2_bzWriteOpen"; break; case BZ_IO_ERROR: err_msg = "error while reading from the compressed file"; break; case BZ_UNEXPECTED_EOF: err_msg = "the compressed file ended before " "the logical end-of-stream was detected"; break; case BZ_DATA_ERROR: err_msg = "data integrity error was detected in " "the compressed stream"; break; case BZ_DATA_ERROR_MAGIC: err_msg = "the stream does not begin with " "the requisite header bytes (ie, is not " "a bzip2 data file)."; break; case BZ_MEM_ERROR: err_msg = "insufficient memory was available"; break; default: err_msg = "other error"; } g_set_error(err, ERR_DOMAIN, CRE_BZ2, "Bz2 error: %s", err_msg); } break; case (CR_CW_XZ_COMPRESSION): { // ------------------------------------- XzFile *xz_file = (XzFile *) cr_file->FILE; lzma_stream *stream = &(xz_file->stream); stream->next_out = buffer; stream->avail_out = len; while (stream->avail_out) { int lret; // Fill input buffer if (stream->avail_in == 0) { if ((lret = fread(xz_file->buffer, 1, XZ_BUFFER_SIZE, xz_file->file)) < 0) { g_debug("%s: XZ: Error while fread", __func__); g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: fread(): %s", g_strerror(errno)); return CR_CW_ERR; // Error while reading input file } else if (lret == 0) { g_debug("%s: EOF", __func__); break; // EOF } stream->next_in = xz_file->buffer; stream->avail_in = lret; } // Decode lret = lzma_code(stream, LZMA_RUN); if (lret != LZMA_OK && lret != LZMA_STREAM_END) { const char *err_msg; switch (lret) { case LZMA_MEM_ERROR: err_msg = "Memory allocation failed"; break; case LZMA_FORMAT_ERROR: // .xz magic bytes weren't found. err_msg = "The input is not in the .xz format"; break; case LZMA_OPTIONS_ERROR: // For example, the headers specify a filter // that isn't supported by this liblzma // version (or it hasn't been enabled when // building liblzma, but no-one sane does // that unless building liblzma for an // embedded system). Upgrading to a newer // liblzma might help. // // Note that it is unlikely that the file has // accidentally became corrupt if you get this // error. The integrity of the .xz headers is // always verified with a CRC32, so // unintentionally corrupt files can be // distinguished from unsupported files. err_msg = "Unsupported compression options"; break; case LZMA_DATA_ERROR: err_msg = "Compressed file is corrupt"; break; case LZMA_BUF_ERROR: // Typically this error means that a valid // file has got truncated, but it might also // be a damaged part in the file that makes // the decoder think the file is truncated. // If you prefer, you can use the same error // message for this as for LZMA_DATA_ERROR. err_msg = "Compressed file is truncated or " "otherwise corrupt"; break; default: // This is most likely LZMA_PROG_ERROR. err_msg = "Unknown error, possibly a bug"; break; } g_debug("%s: XZ: Error while decoding (%d): %s", __func__, lret, err_msg); g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: Error while decoding (%d): %s", lret, err_msg); return CR_CW_ERR; // Error while decoding } if (lret == LZMA_STREAM_END) break; } ret = len - stream->avail_out; break; } case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; ssize_t rb = zck_read(zck, buffer, len); if (rb < 0) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "ZCK: Unable to read: %s", zck_get_error(zck)); break; } ret = rb; break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); break; } assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); if (cr_file->stat && ret != CR_CW_ERR) { cr_file->stat->size += ret; if (cr_file->checksum_ctx) { GError *tmp_err = NULL; cr_checksum_update(cr_file->checksum_ctx, buffer, ret, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return CR_CW_ERR; } } } return ret; } int cr_write(CR_FILE *cr_file, const void *buffer, unsigned int len, GError **err) { int bzerror; int ret = CR_CW_ERR; assert(cr_file); assert(buffer); assert(!err || *err == NULL); if (cr_file->mode != CR_CW_MODE_WRITE) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in read mode"); return ret; } if (cr_file->stat) { cr_file->stat->size += len; if (cr_file->checksum_ctx) { GError *tmp_err = NULL; cr_checksum_update(cr_file->checksum_ctx, buffer, len, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return CR_CW_ERR; } } } switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- if ((ret = (int) fwrite(buffer, 1, len, (FILE *) cr_file->FILE)) != (int) len) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_IO, "fwrite(): %s", g_strerror(errno)); } break; case (CR_CW_GZ_COMPRESSION): // --------------------------------------- if (len == 0) { ret = 0; break; } if ((ret = gzwrite((gzFile) cr_file->FILE, buffer, len)) == 0) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_GZ, "gzwrite(): %s", cr_gz_strerror((gzFile) cr_file->FILE)); } break; case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- BZ2_bzWrite(&bzerror, (BZFILE *) cr_file->FILE, (void *) buffer, len); if (bzerror == BZ_OK) { ret = len; } else { const char *err_msg; ret = CR_CW_ERR; switch (bzerror) { case BZ_PARAM_ERROR: // This should not happend err_msg = "bad function params!"; break; case BZ_SEQUENCE_ERROR: // This should not happend err_msg = "file was opened with BZ2_bzReadOpen"; break; case BZ_IO_ERROR: err_msg = "error while reading from the compressed file"; break; default: err_msg = "other error"; } g_set_error(err, ERR_DOMAIN, CRE_BZ2, "Bz2 error: %s", err_msg); } break; case (CR_CW_XZ_COMPRESSION): { // ------------------------------------- XzFile *xz_file = (XzFile *) cr_file->FILE; lzma_stream *stream = &(xz_file->stream); ret = len; stream->next_in = buffer; stream->avail_in = len; while (stream->avail_in) { int lret; stream->next_out = xz_file->buffer; stream->avail_out = XZ_BUFFER_SIZE; lret = lzma_code(stream, LZMA_RUN); if (lret != LZMA_OK) { const char *err_msg; ret = CR_CW_ERR; switch (lret) { case LZMA_MEM_ERROR: err_msg = "Memory allocation failed"; break; case LZMA_DATA_ERROR: // This error is returned if the compressed // or uncompressed size get near 8 EiB // (2^63 bytes) because that's where the .xz // file format size limits currently are. // That is, the possibility of this error // is mostly theoretical unless you are doing // something very unusual. // // Note that strm->total_in and strm->total_out // have nothing to do with this error. Changing // those variables won't increase or decrease // the chance of getting this error. err_msg = "File size limits exceeded"; break; default: // This is most likely LZMA_PROG_ERROR. err_msg = "Unknown error, possibly a bug"; break; } g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: lzma_code() error (%d): %s", lret, err_msg); break; // Error while coding } size_t out_len = XZ_BUFFER_SIZE - stream->avail_out; if ((fwrite(xz_file->buffer, 1, out_len, xz_file->file)) != out_len) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_XZ, "XZ: fwrite(): %s", g_strerror(errno)); break; // Error while writing } } break; } case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; ssize_t wb = zck_write(zck, buffer, len); if (wb < 0) { ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_ZCK, "ZCK: Unable to write: %s", zck_get_error(zck)); break; } ret = wb; break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); break; } assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); return ret; } int cr_puts(CR_FILE *cr_file, const char *str, GError **err) { size_t len; int ret = CR_CW_ERR; assert(cr_file); assert(!err || *err == NULL); if (!str) return 0; if (cr_file->mode != CR_CW_MODE_WRITE) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in write mode"); return CR_CW_ERR; } switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- case (CR_CW_GZ_COMPRESSION): // --------------------------------------- case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- case (CR_CW_XZ_COMPRESSION): // --------------------------------------- case (CR_CW_ZCK_COMPRESSION): // -------------------------------------- len = strlen(str); ret = cr_write(cr_file, str, len, err); if (ret != (int) len) ret = CR_CW_ERR; break; default: // ----------------------------------------------------------- g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); break; } assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); return ret; } int cr_end_chunk(CR_FILE *cr_file, GError **err) { int ret = CRE_OK; assert(cr_file); assert(!err || *err == NULL); if (cr_file->mode != CR_CW_MODE_WRITE) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in write mode"); return CR_CW_ERR; } switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- case (CR_CW_GZ_COMPRESSION): // --------------------------------------- case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- case (CR_CW_XZ_COMPRESSION): // --------------------------------------- break; case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; ssize_t wb = zck_end_chunk(zck); if (wb < 0) { g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Error ending chunk: %s", zck_get_error(zck)); return CR_CW_ERR; } ret = wb; break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); return CR_CW_ERR; break; } assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); return ret; } int cr_set_autochunk(CR_FILE *cr_file, gboolean auto_chunk, GError **err) { int ret = CRE_OK; assert(cr_file); assert(!err || *err == NULL); if (cr_file->mode != CR_CW_MODE_WRITE) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in write mode"); return CR_CW_ERR; } switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- case (CR_CW_GZ_COMPRESSION): // --------------------------------------- case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- case (CR_CW_XZ_COMPRESSION): // --------------------------------------- break; case (CR_CW_ZCK_COMPRESSION): { // ------------------------------------ #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; if (!zck_set_ioption(zck, ZCK_MANUAL_CHUNK, !auto_chunk)) { g_set_error(err, ERR_DOMAIN, CRE_ZCK, "Error setting auto_chunk: %s", zck_get_error(zck)); return CR_CW_ERR; } break; #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); break; #endif // WITH_ZCHUNK } default: // ----------------------------------------------------------- g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); return CR_CW_ERR; break; } assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); return ret; } int cr_printf(GError **err, CR_FILE *cr_file, const char *format, ...) { va_list vl; int ret; gchar *buf = NULL; assert(cr_file); assert(!err || *err == NULL); if (!format) return 0; if (cr_file->mode != CR_CW_MODE_WRITE) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in write mode"); return CR_CW_ERR; } // Fill format string va_start(vl, format); ret = g_vasprintf(&buf, format, vl); va_end(vl); if (ret < 0) { g_debug("%s: vasprintf() call failed", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "vasprintf() call failed"); return CR_CW_ERR; } assert(buf); int tmp_ret; switch (cr_file->type) { case (CR_CW_NO_COMPRESSION): // --------------------------------------- case (CR_CW_GZ_COMPRESSION): // --------------------------------------- case (CR_CW_BZ2_COMPRESSION): // -------------------------------------- case (CR_CW_XZ_COMPRESSION): // --------------------------------------- case (CR_CW_ZCK_COMPRESSION): // -------------------------------------- tmp_ret = cr_write(cr_file, buf, ret, err); if (tmp_ret != (int) ret) ret = CR_CW_ERR; break; default: // ----------------------------------------------------------- ret = CR_CW_ERR; g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); break; } g_free(buf); assert(!err || (ret == CR_CW_ERR && *err != NULL) || (ret != CR_CW_ERR && *err == NULL)); return ret; } ssize_t cr_get_zchunk_with_index(CR_FILE *cr_file, ssize_t zchunk_index, char **copy_buf, GError **err) { assert(cr_file); assert(!err || *err == NULL); if (cr_file->mode != CR_CW_MODE_READ) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "File is not opened in read mode"); return 0; } if (cr_file->type != CR_CW_ZCK_COMPRESSION){ g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad compressed file type"); return 0; } #ifdef WITH_ZCHUNK zckCtx *zck = (zckCtx *) cr_file->FILE; zckChunk *idx = zck_get_chunk(zck, zchunk_index); ssize_t chunk_size = zck_get_chunk_size(idx); if (chunk_size <= 0) return 0; *copy_buf = g_malloc(chunk_size); return zck_get_chunk_data(idx, *copy_buf, chunk_size); #else g_set_error(err, ERR_DOMAIN, CRE_IO, "createrepo_c wasn't compiled " "with zchunk support"); return 0; #endif // WITH_ZCHUNK } createrepo_c-0.17.0/src/compression_wrapper.h000066400000000000000000000200221400672373200212730ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_COMPRESSION_WRAPPER_H__ #define __C_CREATEREPOLIB_COMPRESSION_WRAPPER_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" /** \defgroup compression_wrapper Wrapper for compressed file. * \addtogroup compression_wrapper * @{ */ /** Compression type. */ typedef enum { CR_CW_AUTO_DETECT_COMPRESSION, /*!< Autodetection */ CR_CW_UNKNOWN_COMPRESSION, /*!< Unknown compression */ CR_CW_NO_COMPRESSION, /*!< No compression */ CR_CW_GZ_COMPRESSION, /*!< Gzip compression */ CR_CW_BZ2_COMPRESSION, /*!< BZip2 compression */ CR_CW_XZ_COMPRESSION, /*!< XZ compression */ CR_CW_ZCK_COMPRESSION, /*!< ZCK compression */ CR_CW_COMPRESSION_SENTINEL, /*!< Sentinel of the list */ } cr_CompressionType; /** Open modes. */ typedef enum { CR_CW_MODE_READ, /*!< Read mode */ CR_CW_MODE_WRITE, /*!< Write mode */ CR_CW_MODE_SENTINEL, /*!< Sentinel of the list */ } cr_OpenMode; /** Stat build about open content during compression (writting). */ typedef struct { gint64 size; /*!< Size of content */ cr_ChecksumType checksum_type; /*!< Checksum type */ char *checksum; /*!< Checksum */ gint64 hdr_size; /*!< Size of content */ cr_ChecksumType hdr_checksum_type; /*!< Checksum type */ char *hdr_checksum; /*!< Checksum */ } cr_ContentStat; /** Creates new cr_ContentStat object * @param type Type of checksum. (if CR_CHECKSUM_UNKNOWN is used, * no checksum calculation will be done) * @param err GError ** * @return cr_ContentStat object */ cr_ContentStat *cr_contentstat_new(cr_ChecksumType type, GError **err); /** Frees cr_ContentStat object. * @param cstat cr_ContentStat object * @param err GError ** */ void cr_contentstat_free(cr_ContentStat *cstat, GError **err); /** Structure represents a compressed file. */ typedef struct { cr_CompressionType type; /*!< Type of compression */ void *FILE; /*!< Pointer to gzFile, BZFILE, ... */ void *INNERFILE; /*!< Pointer to underlying FILE */ cr_OpenMode mode; /*!< Mode */ cr_ContentStat *stat; /*!< Content stats */ cr_ChecksumCtx *checksum_ctx; /*!< Checksum contenxt */ } CR_FILE; #define CR_CW_ERR -1 /*!< Return value - Error */ /** Returns a common suffix for the specified cr_CompressionType. * @param comtype compression type * @return common file suffix */ const char *cr_compression_suffix(cr_CompressionType comtype); /** Detect a compression type of the specified file. * @param filename filename * @param err GError ** * @return detected type of compression */ cr_CompressionType cr_detect_compression(const char* filename, GError **err); /** Return compression type. * @param name compression name * @return compression type */ cr_CompressionType cr_compression_type(const char *name); /** Open/Create the specified file. * @param FILENAME filename * @param MODE open mode * @param COMTYPE type of compression * @param ERR GError ** * @return pointer to a CR_FILE or NULL */ #define cr_open(FILENAME, MODE, COMTYPE, ERR) \ cr_sopen(FILENAME, MODE, COMTYPE, NULL, ERR) /** Open/Create the specified file. If opened for writting, you can pass * a cr_ContentStat object and after cr_close() get stats of * an open content (stats of uncompressed content). * @param filename filename * @param mode open mode * @param comtype type of compression * @param stat pointer to cr_ContentStat or NULL * @param err GError ** * @return pointer to a CR_FILE or NULL */ CR_FILE *cr_sopen(const char *filename, cr_OpenMode mode, cr_CompressionType comtype, cr_ContentStat *stat, GError **err); /** Sets the compression dictionary for a file * @param cr_file CR_FILE pointer * @param dict dictionary * @param len length of dictionary * @param err GError ** * @return CRE_OK or CR_CW_ERR (-1) */ int cr_set_dict(CR_FILE *cr_file, const void *dict, unsigned int len, GError **err); /** Reads an array of len bytes from the CR_FILE. * @param cr_file CR_FILE pointer * @param buffer target buffer * @param len number of bytes to read * @param err GError ** * @return number of readed bytes or CR_CW_ERR (-1) */ int cr_read(CR_FILE *cr_file, void *buffer, unsigned int len, GError **err); /** Writes the array of len bytes from buffer to the cr_file. * @param cr_file CR_FILE pointer * @param buffer source buffer * @param len number of bytes to read * @param err GError ** * @return number of uncompressed bytes readed (0 = EOF) * or CR_CW_ERR (-1) */ int cr_write(CR_FILE *cr_file, const void *buffer, unsigned int len, GError **err); /** Writes the string pointed by str into the cr_file. * @param cr_file CR_FILE pointer * @param str null terminated ('\0') string * @param err GError ** * @return number of uncompressed bytes writed or CR_CW_ERR */ int cr_puts(CR_FILE *cr_file, const char *str, GError **err); /** If compression format allows ending of chunks, tell it to end chunk * @param cr_file CR_FILE pointer * @param err GError ** * @return CRE_OK or CR_CW_ERR */ int cr_end_chunk(CR_FILE *cr_file, GError **err); /** Set zchunk auto-chunk algorithm. Must be done before first byte is written * @param cr_file CR_FILE pointer * @param auto_chunk Whether auto-chunking should be enabled * @param err GError ** * @return CRE_OK or CR_CW_ERR */ int cr_set_autochunk(CR_FILE *cr_file, gboolean auto_chunk, GError **err); /** Get specific zchunks data indentified by index * @param cr_file CR_FILE pointer * @param zchunk_index Index of wanted zchunk * @param copy_buf Output pointer, upon return contains wanted zchunk data * @param err GError ** * @return Size of data from zchunk indexed by zchunk_index */ ssize_t cr_get_zchunk_with_index(CR_FILE *f, ssize_t zchunk_index, char **copy_buf, GError **err); /** Writes a formatted string into the cr_file. * @param err GError ** * @param cr_file CR_FILE pointer * @param format format string * @param ... list of additional arguments as specified in format * @return Number of bytes written or CR_CW_ERR (-1) */ int cr_printf(GError **err, CR_FILE *cr_file, const char *format, ...); /** Closes the CR_FILE. * @param cr_file CR_FILE pointer * @param err GError ** * @return cr_Error code */ int cr_close(CR_FILE *cr_file, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_COMPRESSION_WRAPPER_H__ */ createrepo_c-0.17.0/src/constants.h000066400000000000000000000021621400672373200172130ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_CONSTANTS_H__ #define __C_CREATEREPOLIB_CONSTANTS_H__ #ifdef __cplusplus extern "C" { #endif /** \defgroup contants Global constants and enums. * \addtogroup constants * @{ */ /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_CONSTANTS_H__ */ createrepo_c-0.17.0/src/createrepo_c.c000066400000000000000000002531001400672373200176250ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cmd_parser.h" #include "compression_wrapper.h" #include "createrepo_shared.h" #include "deltarpms.h" #include "dumper_thread.h" #include "checksum.h" #include "cleanup.h" #include "error.h" #include "helpers.h" #include "load_metadata.h" #include "metadata_internal.h" #include "locate_metadata.h" #include "misc.h" #include "parsepkg.h" #include "repomd.h" #include "sqlite.h" #include "threads.h" #include "version.h" #include "xml_dump.h" #include "xml_file.h" #ifdef WITH_LIBMODULEMD #include #endif /* WITH_LIBMODULEMD */ #define OUTDELTADIR "drpms/" // TODO: Pass only exlude_masks list here /** Check if the filename is excluded by any exlude mask. * @param filename Filename (basename). * @param options Command line options. * @return TRUE if file should be included, FALSE otherwise */ static gboolean allowed_file(const gchar *filename, GSList *exclude_masks) { // Check file against exclude glob masks if (exclude_masks) { int str_len = strlen(filename); gchar *reversed_filename = g_utf8_strreverse(filename, str_len); GSList *element = exclude_masks; for (; element; element=g_slist_next(element)) { if (g_pattern_match((GPatternSpec *) element->data, str_len, filename, reversed_filename)) { g_free(reversed_filename); g_debug("Exclude masks hit - skipping: %s", filename); return FALSE; } } g_free(reversed_filename); } return TRUE; } static gboolean allowed_modulemd_module_metadata_file(const gchar *filename) { if (g_str_has_suffix (filename, ".modulemd.yaml") || g_str_has_suffix (filename, ".modulemd-defaults.yaml") || g_str_has_suffix (filename, "modules.yaml")) { return TRUE; } return FALSE; } /** Function used to sort pool tasks. * This function is responsible for order of packages in metadata. * * @param a_p Pointer to first struct PoolTask * @param b_p Pointer to second struct PoolTask * @param user_data Unused (user data) */ static int task_cmp(gconstpointer a_p, gconstpointer b_p, G_GNUC_UNUSED gpointer user_data) { int ret; const struct PoolTask *a = a_p; const struct PoolTask *b = b_p; ret = g_strcmp0(a->filename, b->filename); if (ret) return ret; return g_strcmp0(a->path, b->path); } /** Recursively walkt throught the input directory and add push the found * rpms to the thread pool (create a PoolTask and push it to the pool). * If the filelists is supplied then no recursive walk is done and only * files from filelists are pushed into the pool. * This function also filters out files that shouldn't be processed * (e.g. directories with .rpm suffix, files that match one of * the exclude masks, etc.). * * @param pool GThreadPool pool * @param in_dir Directory to scan * @param cmd_options Options specified on command line * @param current_pkglist Pointer to a list where basenames of files that * will be processed will be appended to. * @return Number of packages that are going to be processed */ static long fill_pool(GThreadPool *pool, gchar *in_dir, struct CmdOptions *cmd_options, GSList **current_pkglist, long *task_count, int media_id) { GQueue queue = G_QUEUE_INIT; struct PoolTask *task; if ( ! cmd_options->split ) { media_id = 0; } if ((cmd_options->pkglist || cmd_options->recycle_pkglist) && !cmd_options->include_pkgs) { g_warning("Used pkglist doesn't contain any useful items"); } else if (!(cmd_options->include_pkgs)) { // --pkglist (or --includepkg, or --recycle-pkglist) is not supplied // --> do dir walk g_message("Directory walk started"); size_t in_dir_len = strlen(in_dir); GStringChunk *sub_dirs_chunk = g_string_chunk_new(1024); GQueue *sub_dirs = g_queue_new(); gchar *input_dir_stripped; input_dir_stripped = g_string_chunk_insert_len(sub_dirs_chunk, in_dir, in_dir_len-1); g_queue_push_head(sub_dirs, input_dir_stripped); char *dirname; while ((dirname = g_queue_pop_head(sub_dirs))) { // Open dir GDir *dirp; dirp = g_dir_open (dirname, 0, NULL); if (!dirp) { g_warning("Cannot open directory: %s", dirname); continue; } const gchar *filename; while ((filename = g_dir_read_name(dirp))) { if (!allowed_file(filename, cmd_options->exclude_masks)) { continue; } gchar *full_path = g_strconcat(dirname, "/", filename, NULL); if (!g_file_test(full_path, G_FILE_TEST_IS_REGULAR)) { if (g_file_test(full_path, G_FILE_TEST_IS_DIR)) { // Directory gchar *sub_dir_in_chunk; sub_dir_in_chunk = g_string_chunk_insert(sub_dirs_chunk, full_path); g_queue_push_head(sub_dirs, sub_dir_in_chunk); g_debug("Dir to scan: %s", sub_dir_in_chunk); } g_free(full_path); continue; } // Skip symbolic links if --skip-symlinks arg is used if (cmd_options->skip_symlinks && g_file_test(full_path, G_FILE_TEST_IS_SYMLINK)) { g_debug("Skipped symlink: %s", full_path); g_free(full_path); continue; } if (allowed_modulemd_module_metadata_file(full_path)) { #ifdef WITH_LIBMODULEMD cmd_options->modulemd_metadata = g_slist_prepend( cmd_options->modulemd_metadata, (gpointer) full_path); #else g_warning("createrepo_c not compiled with libmodulemd support, " "ignoring found module metadata: %s", full_path); g_free(full_path); #endif /* WITH_LIBMODULEMD */ continue; } // Non .rpm files are ignored if (!g_str_has_suffix (filename, ".rpm")) { g_free(full_path); continue; } // Check filename against exclude glob masks const gchar *repo_relative_path = filename; if (in_dir_len < strlen(full_path)) // This probably should be always true repo_relative_path = full_path + in_dir_len; if (allowed_file(repo_relative_path, cmd_options->exclude_masks)) { // FINALLY! Add file into pool g_debug("Adding pkg: %s", full_path); task = g_malloc(sizeof(struct PoolTask)); task->full_path = full_path; task->filename = g_strdup(filename); task->path = g_strdup(dirname); *current_pkglist = g_slist_prepend(*current_pkglist, task->filename); // TODO: One common path for all tasks with the same path? g_queue_insert_sorted(&queue, task, task_cmp, NULL); } else { g_free(full_path); } } // Cleanup g_dir_close (dirp); } g_string_chunk_free (sub_dirs_chunk); g_queue_free(sub_dirs); } else { // pkglist is supplied - use only files in pkglist g_debug("Skipping dir walk - using pkglist"); GSList *element = cmd_options->include_pkgs; for (; element; element=g_slist_next(element)) { gchar *relative_path = (gchar *) element->data; // ^^^ path from pkglist e.g. packages/i386/foobar.rpm if (allowed_modulemd_module_metadata_file(relative_path)) { #ifdef WITH_LIBMODULEMD cmd_options->modulemd_metadata = g_slist_prepend( cmd_options->modulemd_metadata, (gpointer) g_strdup(relative_path)); #else g_warning("createrepo_c not compiled with libmodulemd support, " "ignoring found module metadata: %s", relative_path); #endif /* WITH_LIBMODULEMD */ continue; } gchar *filename; // foobar.rpm // Get index of last '/' int x = strlen(relative_path); for (; x > 0 && relative_path[x] != '/'; x--) ; if (!x) // There was no '/' in path filename = relative_path; else // Use only a last part of the path filename = relative_path + x + 1; if (allowed_file(relative_path, cmd_options->exclude_masks)) { // Check filename against exclude glob masks gchar *full_path = g_strconcat(in_dir, relative_path, NULL); // ^^^ /path/to/in_repo/packages/i386/foobar.rpm g_debug("Adding pkg: %s", full_path); task = g_malloc(sizeof(struct PoolTask)); task->full_path = full_path; task->filename = g_strdup(filename); // foobar.rpm task->path = strndup(relative_path, x); // packages/i386/ *current_pkglist = g_slist_prepend(*current_pkglist, task->filename); g_queue_insert_sorted(&queue, task, task_cmp, NULL); } } } // Push sorted tasks into the thread pool while ((task = g_queue_pop_head(&queue)) != NULL) { task->id = *task_count; task->media_id = media_id; g_thread_pool_push(pool, task, NULL); ++*task_count; } return *task_count; } /** Prepare cache dir for checksums. * Called only if --cachedir options is used. * It tries to create cache directory if it doesn't exist yet. * It also fill checksum_cachedir option in cmd_options structure. * * @param cmd_options Commandline options * @param out_dir Repo output directory * @param err GError ** * @return FALSE if err is set, TRUE otherwise */ static gboolean prepare_cache_dir(struct CmdOptions *cmd_options, const gchar *out_dir, GError **err) { if (!cmd_options->cachedir) return TRUE; if (g_str_has_prefix(cmd_options->cachedir, "/")) { // Absolute local path cmd_options->checksum_cachedir = cr_normalize_dir_path( cmd_options->cachedir); } else { // Relative path (from intput_dir) gchar *tmp = g_strconcat(out_dir, cmd_options->cachedir, NULL); cmd_options->checksum_cachedir = cr_normalize_dir_path(tmp); g_free(tmp); } // Create the cache directory if (g_mkdir(cmd_options->checksum_cachedir, S_IRWXU|S_IRWXG|S_IROTH|S_IXOTH)) { if (errno == EEXIST) { if (!g_file_test(cmd_options->checksum_cachedir, G_FILE_TEST_IS_DIR)) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "The %s already exists and it is not a directory!", cmd_options->checksum_cachedir); return FALSE; } } else { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "cannot use cachedir %s: %s", cmd_options->checksum_cachedir, g_strerror(errno)); return FALSE; } } g_debug("Cachedir for checksums is %s", cmd_options->checksum_cachedir); return TRUE; } /** Adds groupfile cr_RepomdRecords to additional_metadata_rec list. * Groupfile is a special case, because it's the only metadatum * that can be inputed to createrepo_c via command line option. * * @param group_metadatum Cr_Metadatum for used groupfile * @param additional_metadata_rec GSList of cr_RepomdRecords * @param comp_type Groupfile compression type * @param repomd_checksum_type * * @return GSList with added cr_RepomdRecords for * groupfile */ GSList* cr_create_repomd_records_for_groupfile_metadata(const cr_Metadatum *group_metadatum, GSList *additional_metadata_rec, cr_CompressionType comp_type, cr_ChecksumType repomd_checksum_type) { GError *tmp_err = NULL; char *compression_suffix = g_strdup(cr_compression_suffix(comp_type)); compression_suffix[0] = '_'; //replace '.' additional_metadata_rec = g_slist_prepend(additional_metadata_rec, cr_repomd_record_new( group_metadatum->type, group_metadatum->name )); gchar *compressed_record_type = g_strconcat(group_metadatum->type, compression_suffix, NULL); additional_metadata_rec = g_slist_prepend(additional_metadata_rec, cr_repomd_record_new( compressed_record_type, NULL )); cr_repomd_record_compress_and_fill(additional_metadata_rec->next->data, additional_metadata_rec->data, repomd_checksum_type, comp_type, NULL, &tmp_err); if (tmp_err) { g_critical("Cannot process %s %s: %s", group_metadatum->type, group_metadatum->name, tmp_err->message); g_free(compression_suffix); g_free(compressed_record_type); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(compressed_record_type); g_free(compression_suffix); return additional_metadata_rec; } /** Creates list of cr_RepomdRecords from list * of additional metadata (cr_Metadatum) * * @param additional_metadata List of cr_Metadatum * @param repomd_checksum_type * * @return New GSList of cr_RepomdRecords */ static GSList* cr_create_repomd_records_for_additional_metadata(GSList *additional_metadata, cr_ChecksumType repomd_checksum_type) { GError *tmp_err = NULL; GSList *additional_metadata_rec = NULL; GSList *element = additional_metadata; for (; element; element=g_slist_next(element)) { additional_metadata_rec = g_slist_prepend(additional_metadata_rec, cr_repomd_record_new( ((cr_Metadatum *) element->data)->type, ((cr_Metadatum *) element->data)->name )); cr_repomd_record_fill(additional_metadata_rec->data, repomd_checksum_type, &tmp_err); if (tmp_err) { g_critical("Cannot process %s %s: %s", ((cr_Metadatum *) element->data)->type, ((cr_Metadatum *) element->data)->name, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } } return additional_metadata_rec; } /** Check if task finished without error, if yes * use content stats of the new file * * @param task Rewrite pkg count task * @param filename Name of file with wrong package count * @param exit_val If errors occured set createrepo_c exit value * @param content_stat Content stats for filename * */ static void error_check_and_set_content_stat(cr_CompressionTask *task, char *filename, int *exit_val, cr_ContentStat **content_stat){ if (task->err) { g_critical("Cannot rewrite pkg count in %s: %s", filename, task->err->message); *exit_val = 2; }else{ cr_contentstat_free(*content_stat, NULL); *content_stat = task->stat; task->stat = NULL; } } static void load_old_metadata(cr_Metadata **md, struct cr_MetadataLocation **md_location, GSList *current_pkglist, struct CmdOptions *cmd_options, gchar *dir, GThreadPool *pool, GError *tmp_err) { *md_location = cr_locate_metadata(dir, TRUE, &tmp_err); if (tmp_err) { if (tmp_err->domain == CRE_MODULEMD) { g_thread_pool_free(pool, FALSE, FALSE); g_clear_pointer(md_location, cr_metadatalocation_free); g_critical("%s\n",tmp_err->message); exit(tmp_err->code); } else { g_debug("Old metadata from default outputdir not found: %s",tmp_err->message); g_clear_error(&tmp_err); } } *md = cr_metadata_new(CR_HT_KEY_HREF, 1, current_pkglist); cr_metadata_set_dupaction(*md, CR_HT_DUPACT_REMOVEALL); int ret; if (*md_location) { ret = cr_metadata_load_xml(*md, *md_location, &tmp_err); assert(ret == CRE_OK || tmp_err); if (ret == CRE_OK) { g_debug("Old metadata from: %s - loaded", (*md_location)->original_url); } else { g_debug("Old metadata from %s - loading failed: %s", (*md_location)->original_url, tmp_err->message); g_clear_error(&tmp_err); } } // Load repodata from --update-md-path GSList *element = cmd_options->l_update_md_paths; for (; element; element = g_slist_next(element)) { char *path = (char *) element->data; g_message("Loading metadata from md-path: %s", path); ret = cr_metadata_locate_and_load_xml(*md, path, &tmp_err); assert(ret == CRE_OK || tmp_err); if (ret == CRE_OK) { g_debug("Metadata from md-path %s - loaded", path); } else { g_warning("Metadata from md-path %s - loading failed: %s", path, tmp_err->message); g_clear_error(&tmp_err); } } g_message("Loaded information about %d packages", g_hash_table_size(cr_metadata_hashtable(*md))); } int main(int argc, char **argv) { struct CmdOptions *cmd_options; gboolean ret; GError *tmp_err = NULL; int exit_val = EXIT_SUCCESS; // Arguments parsing cmd_options = parse_arguments(&argc, &argv, &tmp_err); if (!cmd_options) { g_printerr("Argument parsing failed: %s\n", tmp_err->message); g_error_free(tmp_err); exit(EXIT_FAILURE); } // Arguments pre-check if (cmd_options->version) { // Just print version printf("Version: %s\n", cr_version_string_with_features()); free_options(cmd_options); exit(EXIT_SUCCESS); } if ( cmd_options->split ) { if (argc < 2) { g_printerr("Must specify at least one directory to index.\n"); g_printerr("Usage: %s [options] [directory_to_index] ...\n\n", cr_get_filename(argv[0])); free_options(cmd_options); exit(EXIT_FAILURE); } } else { if (argc != 2) { // No mandatory arguments g_printerr("Must specify exactly one directory to index.\n"); g_printerr("Usage: %s [options] \n\n", cr_get_filename(argv[0])); free_options(cmd_options); exit(EXIT_FAILURE); } } // Dirs gchar *in_dir = NULL; // path/to/repo/ gchar *in_repo = NULL; // path/to/repo/repodata/ gchar *out_dir = NULL; // path/to/out_repo/ gchar *out_repo = NULL; // path/to/out_repo/repodata/ gchar *tmp_out_repo = NULL; // usually path/to/out_repo/.repodata/ gchar *lock_dir = NULL; // path/to/out_repo/.repodata/ if (cmd_options->basedir && !g_str_has_prefix(argv[1], "/")) { gchar *tmp = cr_normalize_dir_path(argv[1]); in_dir = g_build_filename(cmd_options->basedir, tmp, NULL); g_free(tmp); } else { in_dir = cr_normalize_dir_path(argv[1]); } // Check if inputdir exists if (!g_file_test(in_dir, G_FILE_TEST_IS_DIR)) { g_printerr("Directory %s must exist\n", in_dir); g_free(in_dir); free_options(cmd_options); exit(EXIT_FAILURE); } // Check parsed arguments if (!check_arguments(cmd_options, in_dir, &tmp_err)) { g_printerr("%s\n", tmp_err->message); g_error_free(tmp_err); g_free(in_dir); free_options(cmd_options); exit(EXIT_FAILURE); } // Set logging stuff cr_setup_logging(cmd_options->quiet, cmd_options->verbose); // Emit debug message with version g_debug("Version: %s", cr_version_string_with_features()); // Set paths of input and output repos in_repo = g_strconcat(in_dir, "repodata/", NULL); if (cmd_options->outputdir) { out_dir = cr_normalize_dir_path(cmd_options->outputdir); out_repo = g_strconcat(out_dir, "repodata/", NULL); } else { out_dir = g_strdup(in_dir); out_repo = g_strdup(in_repo); } // Prepare cachedir for checksum if --cachedir is used if (!prepare_cache_dir(cmd_options, out_dir, &tmp_err)) { g_printerr("%s\n", tmp_err->message); g_error_free(tmp_err); g_free(in_dir); g_free(in_repo); g_free(out_dir); g_free(out_repo); free_options(cmd_options); exit(EXIT_FAILURE); } // Block signals that terminates the process if (!cr_block_terminating_signals(&tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } // Check if lock exists & Create lock dir if (!cr_lock_repo(out_dir, cmd_options->ignore_lock, &lock_dir, &tmp_out_repo, &tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } // Setup cleanup handlers if (!cr_set_cleanup_handler(lock_dir, tmp_out_repo, &tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } // Unblock the blocked signals if (!cr_unblock_terminating_signals(&tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } // Open package list FILE *output_pkg_list = NULL; if (cmd_options->read_pkgs_list) { output_pkg_list = fopen(cmd_options->read_pkgs_list, "w"); if (!output_pkg_list) { g_critical("Cannot open \"%s\" for writing: %s", cmd_options->read_pkgs_list, g_strerror(errno)); exit(EXIT_FAILURE); } } // Init package parser cr_package_parser_init(); cr_xml_dump_init(); // Thread pool - Creation struct UserData user_data = {0}; GThreadPool *pool = g_thread_pool_new(cr_dumper_thread, &user_data, 0, TRUE, NULL); g_debug("Thread pool ready"); long task_count = 0; GSList *current_pkglist = NULL; /* ^^^ List with basenames of files which will be processed */ // Load old metadata if --update struct cr_MetadataLocation *old_metadata_location = NULL; cr_Metadata *old_metadata = NULL; gchar *old_metadata_dir = cmd_options->outputdir ? out_dir : in_dir; if (cmd_options->recycle_pkglist) { // load the old metadata early, so we can read the list of RPMs load_old_metadata(&old_metadata, &old_metadata_location, NULL /* no filter wanted in this case */, cmd_options, old_metadata_dir, pool, tmp_err); GHashTableIter iter; g_hash_table_iter_init(&iter, cr_metadata_hashtable(old_metadata)); gpointer pkg_pointer; while (g_hash_table_iter_next(&iter, NULL, &pkg_pointer)) { cr_Package *pkg = (cr_Package *)pkg_pointer; cmd_options->include_pkgs = g_slist_prepend( cmd_options->include_pkgs, (gpointer) g_strdup(pkg->location_href)); } } for (int media_id = 1; media_id < argc; media_id++ ) { gchar *tmp_in_dir = cr_normalize_dir_path(argv[media_id]); // Thread pool - Fill with tasks fill_pool(pool, tmp_in_dir, cmd_options, ¤t_pkglist, &task_count, media_id); g_free(tmp_in_dir); } g_debug("Package count: %ld", task_count); g_message("Directory walk done - %ld packages", task_count); if (cmd_options->update) { if (old_metadata) g_debug("Old metadata already loaded."); else if (!task_count) g_debug("No packages found - skipping metadata loading"); else load_old_metadata(&old_metadata, &old_metadata_location, current_pkglist, cmd_options, old_metadata_dir, pool, tmp_err); } g_slist_free(current_pkglist); current_pkglist = NULL; GSList *additional_metadata = NULL; // Setup compression types const char *xml_compression_suffix = NULL; const char *sqlite_compression_suffix = NULL; const char *compression_suffix = NULL; cr_CompressionType xml_compression = CR_CW_GZ_COMPRESSION; cr_CompressionType sqlite_compression = CR_CW_BZ2_COMPRESSION; cr_CompressionType compression = CR_CW_GZ_COMPRESSION; if (cmd_options->compression_type != CR_CW_UNKNOWN_COMPRESSION) { sqlite_compression = cmd_options->compression_type; compression = cmd_options->compression_type; } if (cmd_options->general_compression_type != CR_CW_UNKNOWN_COMPRESSION) { xml_compression = cmd_options->general_compression_type; sqlite_compression = cmd_options->general_compression_type; compression = cmd_options->general_compression_type; } xml_compression_suffix = cr_compression_suffix(xml_compression); sqlite_compression_suffix = cr_compression_suffix(sqlite_compression); compression_suffix = cr_compression_suffix(compression); cr_Metadatum *new_groupfile_metadatum = NULL; // Groupfile specified as argument if (cmd_options->groupfile_fullpath) { new_groupfile_metadatum = g_malloc0(sizeof(cr_Metadatum)); new_groupfile_metadatum->name = cr_copy_metadatum(cmd_options->groupfile_fullpath, tmp_out_repo, &tmp_err); new_groupfile_metadatum->type = g_strdup("group"); //remove old groupfile(s) (every [compressed] variant) if (old_metadata_location){ GSList *node_iter = old_metadata_location->additional_metadata; while (node_iter != NULL){ cr_Metadatum *m = node_iter->data; GSList *next = g_slist_next(node_iter); if(g_str_has_prefix(m->type, "group")){ old_metadata_location->additional_metadata = g_slist_delete_link(old_metadata_location->additional_metadata, node_iter); cr_metadatum_free(m); } node_iter = next; } } } #ifdef WITH_LIBMODULEMD // module metadata found in repo if (cmd_options->modulemd_metadata) { ModulemdModuleIndexMerger *merger = modulemd_module_index_merger_new(); if (!merger) { g_critical("Could not allocate module merger"); exit(EXIT_FAILURE); } ModulemdModuleIndex *moduleindex; //load all found module metatada and associate it with merger GSList *element = cmd_options->modulemd_metadata; for (; element; element=g_slist_next(element)) { moduleindex = modulemd_module_index_new(); if (!moduleindex) { g_critical("Could not allocate new module index"); g_clear_pointer(&merger, g_object_unref); exit(EXIT_FAILURE); } g_autoptr (GPtrArray) failures = NULL; gboolean result = modulemd_module_index_update_from_file(moduleindex, ((char *) element->data), TRUE, &failures, &tmp_err); if (!result) { g_critical("Could not update module index from file %s: %s", (char *) element->data, (tmp_err ? tmp_err->message : "Unknown error")); g_clear_error(&tmp_err); g_clear_pointer(&moduleindex, g_object_unref); g_clear_pointer(&merger, g_object_unref); exit(EXIT_FAILURE); } modulemd_module_index_merger_associate_index(merger, moduleindex, 0); g_clear_pointer(&moduleindex, g_object_unref); } if (cmd_options->update && cmd_options->keep_all_metadata && old_metadata_location && old_metadata_location->additional_metadata){ //associate old metadata into the merger if (cr_metadata_modulemd(old_metadata)){ modulemd_module_index_merger_associate_index(merger, cr_metadata_modulemd(old_metadata), 0); if (tmp_err) { g_critical("%s: Cannot merge old module index with new: %s", __func__, tmp_err->message); g_clear_error(&tmp_err); g_clear_pointer(&merger, g_object_unref); exit(EXIT_FAILURE); } } //remove old modules (every [compressed] variant) GSList *node_iter = old_metadata_location->additional_metadata; while (node_iter != NULL){ GSList *next = g_slist_next(node_iter); cr_Metadatum *m = node_iter->data; if(g_str_has_prefix(m->type, "modules")){ old_metadata_location->additional_metadata = g_slist_delete_link( old_metadata_location->additional_metadata, node_iter); cr_metadatum_free(m); } node_iter = next; } } //merge module metadata and dump it to string moduleindex = modulemd_module_index_merger_resolve (merger, &tmp_err); g_clear_pointer(&merger, g_object_unref); char *moduleindex_str = modulemd_module_index_dump_to_string (moduleindex, &tmp_err); g_clear_pointer(&moduleindex, g_object_unref); if (tmp_err) { g_critical("%s: Cannot dump module index: %s", __func__, tmp_err->message); free(moduleindex_str); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } //compress new module metadata string to a file in temporary .repodata gchar *modules_metadata_path = g_strconcat(tmp_out_repo, "modules.yaml", compression_suffix, NULL); CR_FILE *modules_file = NULL; modules_file = cr_open(modules_metadata_path, CR_CW_MODE_WRITE, compression, &tmp_err); if (modules_file == NULL) { g_critical("%s: Cannot open source file %s: %s", __func__, modules_metadata_path, (tmp_err ? tmp_err->message : "Unknown error")); g_clear_error(&tmp_err); free(moduleindex_str); free(modules_metadata_path); exit(EXIT_FAILURE); } cr_puts(modules_file, moduleindex_str, &tmp_err); free(moduleindex_str); cr_close(modules_file, &tmp_err); if (tmp_err) { g_critical("%s: Error while closing: : %s", __func__, tmp_err->message); g_clear_error(&tmp_err); free(modules_metadata_path); exit(EXIT_FAILURE); } //create additional metadatum for new module metadata file cr_Metadatum *new_modules_metadatum = g_malloc0(sizeof(cr_Metadatum)); new_modules_metadatum->name = modules_metadata_path; new_modules_metadatum->type = g_strdup("modules"); additional_metadata = g_slist_prepend(additional_metadata, new_modules_metadatum); } #endif /* WITH_LIBMODULEMD */ if (cmd_options->update && cmd_options->keep_all_metadata && old_metadata_location && old_metadata_location->additional_metadata) { GSList *element = old_metadata_location->additional_metadata; cr_Metadatum *m; for (; element; element=g_slist_next(element)) { m = g_malloc0(sizeof(cr_Metadatum)); m->name = cr_copy_metadatum(((cr_Metadatum *) element->data)->name, tmp_out_repo, &tmp_err); m->type = g_strdup(((cr_Metadatum *) element->data)->type); additional_metadata = g_slist_prepend(additional_metadata, m); } } cr_metadatalocation_free(old_metadata_location); old_metadata_location = NULL; // Create and open new compressed files cr_XmlFile *pri_cr_file; cr_XmlFile *fil_cr_file; cr_XmlFile *oth_cr_file; cr_ContentStat *pri_stat; cr_ContentStat *fil_stat; cr_ContentStat *oth_stat; gchar *pri_xml_filename; gchar *fil_xml_filename; gchar *oth_xml_filename; g_message("Temporary output repo path: %s", tmp_out_repo); g_debug("Creating .xml.gz files"); pri_xml_filename = g_strconcat(tmp_out_repo, "/primary.xml", xml_compression_suffix, NULL); fil_xml_filename = g_strconcat(tmp_out_repo, "/filelists.xml", xml_compression_suffix, NULL); oth_xml_filename = g_strconcat(tmp_out_repo, "/other.xml", xml_compression_suffix, NULL); pri_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); pri_cr_file = cr_xmlfile_sopen_primary(pri_xml_filename, xml_compression, pri_stat, &tmp_err); assert(pri_cr_file || tmp_err); if (!pri_cr_file) { g_critical("Cannot open file %s: %s", pri_xml_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); exit(EXIT_FAILURE); } fil_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); fil_cr_file = cr_xmlfile_sopen_filelists(fil_xml_filename, xml_compression, fil_stat, &tmp_err); assert(fil_cr_file || tmp_err); if (!fil_cr_file) { g_critical("Cannot open file %s: %s", fil_xml_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); cr_xmlfile_close(pri_cr_file, NULL); exit(EXIT_FAILURE); } oth_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); oth_cr_file = cr_xmlfile_sopen_other(oth_xml_filename, xml_compression, oth_stat, &tmp_err); assert(oth_cr_file || tmp_err); if (!oth_cr_file) { g_critical("Cannot open file %s: %s", oth_xml_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); cr_xmlfile_close(fil_cr_file, NULL); cr_xmlfile_close(pri_cr_file, NULL); exit(EXIT_FAILURE); } // Set number of packages g_debug("Setting number of packages"); cr_xmlfile_set_num_of_pkgs(pri_cr_file, task_count, NULL); cr_xmlfile_set_num_of_pkgs(fil_cr_file, task_count, NULL); cr_xmlfile_set_num_of_pkgs(oth_cr_file, task_count, NULL); // Open sqlite databases gchar *pri_db_filename = NULL; gchar *fil_db_filename = NULL; gchar *oth_db_filename = NULL; cr_SqliteDb *pri_db = NULL; cr_SqliteDb *fil_db = NULL; cr_SqliteDb *oth_db = NULL; if (!cmd_options->no_database) { _cleanup_file_close_ int pri_db_fd = -1; _cleanup_file_close_ int fil_db_fd = -1; _cleanup_file_close_ int oth_db_fd = -1; g_message("Preparing sqlite DBs"); if (!cmd_options->local_sqlite) { g_debug("Creating databases"); pri_db_filename = g_strconcat(tmp_out_repo, "/primary.sqlite", NULL); fil_db_filename = g_strconcat(tmp_out_repo, "/filelists.sqlite", NULL); oth_db_filename = g_strconcat(tmp_out_repo, "/other.sqlite", NULL); } else { g_debug("Creating databases localy"); const gchar *tmpdir = g_get_tmp_dir(); pri_db_filename = g_build_filename(tmpdir, "primary.XXXXXX.sqlite", NULL); fil_db_filename = g_build_filename(tmpdir, "filelists.XXXXXX.sqlite", NULL); oth_db_filename = g_build_filename(tmpdir, "other.XXXXXXX.sqlite", NULL); pri_db_fd = g_mkstemp(pri_db_filename); g_debug("%s", pri_db_filename); if (pri_db_fd == -1) { g_critical("Cannot open %s: %s", pri_db_filename, g_strerror(errno)); exit(EXIT_FAILURE); } fil_db_fd = g_mkstemp(fil_db_filename); g_debug("%s", fil_db_filename); if (fil_db_fd == -1) { g_critical("Cannot open %s: %s", fil_db_filename, g_strerror(errno)); exit(EXIT_FAILURE); } oth_db_fd = g_mkstemp(oth_db_filename); g_debug("%s", oth_db_filename); if (oth_db_fd == -1) { g_critical("Cannot open %s: %s", oth_db_filename, g_strerror(errno)); exit(EXIT_FAILURE); } } pri_db = cr_db_open_primary(pri_db_filename, &tmp_err); assert(pri_db || tmp_err); if (!pri_db) { g_critical("Cannot open %s: %s", pri_db_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } fil_db = cr_db_open_filelists(fil_db_filename, &tmp_err); assert(fil_db || tmp_err); if (!fil_db) { g_critical("Cannot open %s: %s", fil_db_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } oth_db = cr_db_open_other(oth_db_filename, &tmp_err); assert(oth_db || tmp_err); if (!oth_db) { g_critical("Cannot open %s: %s", oth_db_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } } gchar *pri_zck_filename = NULL; gchar *fil_zck_filename = NULL; gchar *oth_zck_filename = NULL; cr_XmlFile *pri_cr_zck = NULL; cr_XmlFile *fil_cr_zck = NULL; cr_XmlFile *oth_cr_zck = NULL; cr_ContentStat *pri_zck_stat = NULL; cr_ContentStat *fil_zck_stat = NULL; cr_ContentStat *oth_zck_stat = NULL; gchar *pri_dict = NULL; gchar *fil_dict = NULL; gchar *oth_dict = NULL; size_t pri_dict_size = 0; size_t fil_dict_size = 0; size_t oth_dict_size = 0; gchar *pri_dict_file = NULL; gchar *fil_dict_file = NULL; gchar *oth_dict_file = NULL; if (cmd_options->zck_dict_dir) { pri_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "primary.xml"); fil_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "filelists.xml"); oth_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "other.xml"); if (pri_dict_file && !g_file_get_contents(pri_dict_file, &pri_dict, &pri_dict_size, &tmp_err)) { g_critical("Error reading zchunk primary dict %s: %s", pri_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } if (fil_dict_file && !g_file_get_contents(fil_dict_file, &fil_dict, &fil_dict_size, &tmp_err)) { g_critical("Error reading zchunk filelists dict %s: %s", fil_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } if (oth_dict_file && !g_file_get_contents(oth_dict_file, &oth_dict, &oth_dict_size, &tmp_err)) { g_critical("Error reading zchunk other dict %s: %s", oth_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } } if (cmd_options->zck_compression) { g_debug("Creating .xml.zck files"); pri_zck_filename = g_strconcat(tmp_out_repo, "/primary.xml.zck", NULL); fil_zck_filename = g_strconcat(tmp_out_repo, "/filelists.xml.zck", NULL); oth_zck_filename = g_strconcat(tmp_out_repo, "/other.xml.zck", NULL); pri_zck_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); pri_cr_zck = cr_xmlfile_sopen_primary(pri_zck_filename, CR_CW_ZCK_COMPRESSION, pri_zck_stat, &tmp_err); assert(pri_cr_zck || tmp_err); if (!pri_cr_zck) { g_critical("Cannot open file %s: %s", pri_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); exit(EXIT_FAILURE); } cr_set_dict(pri_cr_zck->f, pri_dict, pri_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting primary dict %s: %s", pri_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(pri_dict); fil_zck_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); fil_cr_zck = cr_xmlfile_sopen_filelists(fil_zck_filename, CR_CW_ZCK_COMPRESSION, fil_zck_stat, &tmp_err); assert(fil_cr_zck || tmp_err); if (!fil_cr_zck) { g_critical("Cannot open file %s: %s", fil_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); cr_xmlfile_close(pri_cr_zck, NULL); exit(EXIT_FAILURE); } cr_set_dict(fil_cr_zck->f, fil_dict, fil_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting filelists dict %s: %s", fil_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(fil_dict); oth_zck_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); oth_cr_zck = cr_xmlfile_sopen_other(oth_zck_filename, CR_CW_ZCK_COMPRESSION, oth_zck_stat, &tmp_err); assert(oth_cr_zck || tmp_err); if (!oth_cr_zck) { g_critical("Cannot open file %s: %s", oth_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); cr_xmlfile_close(fil_cr_zck, NULL); cr_xmlfile_close(pri_cr_zck, NULL); exit(EXIT_FAILURE); } cr_set_dict(oth_cr_zck->f, oth_dict, oth_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting other dict %s: %s", oth_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(oth_dict); // Set number of packages g_debug("Setting number of packages"); cr_xmlfile_set_num_of_pkgs(pri_cr_zck, task_count, NULL); cr_xmlfile_set_num_of_pkgs(fil_cr_zck, task_count, NULL); cr_xmlfile_set_num_of_pkgs(oth_cr_zck, task_count, NULL); } // Thread pool - User data initialization user_data.pri_f = pri_cr_file; user_data.fil_f = fil_cr_file; user_data.oth_f = oth_cr_file; user_data.pri_db = pri_db; user_data.fil_db = fil_db; user_data.oth_db = oth_db; user_data.pri_zck = pri_cr_zck; user_data.fil_zck = fil_cr_zck; user_data.oth_zck = oth_cr_zck; if (cmd_options->compatibility && cmd_options->changelog_limit == DEFAULT_CHANGELOG_LIMIT ) { user_data.changelog_limit = -1; } else { user_data.changelog_limit = cmd_options->changelog_limit; } user_data.location_base = cmd_options->location_base; user_data.checksum_type_str = cr_checksum_name_str(cmd_options->checksum_type); user_data.checksum_type = cmd_options->checksum_type; user_data.checksum_cachedir = cmd_options->checksum_cachedir; user_data.skip_symlinks = cmd_options->skip_symlinks; user_data.repodir_name_len = strlen(in_dir); user_data.task_count = task_count; user_data.package_count = 0; user_data.skip_stat = cmd_options->skip_stat; user_data.old_metadata = old_metadata; user_data.id_pri = 0; user_data.id_fil = 0; user_data.id_oth = 0; user_data.buffer = g_queue_new(); user_data.deltas = cmd_options->deltas; user_data.max_delta_rpm_size= cmd_options->max_delta_rpm_size; user_data.deltatargetpackages = NULL; user_data.cut_dirs = cmd_options->cut_dirs; user_data.location_prefix = cmd_options->location_prefix; user_data.had_errors = 0; user_data.output_pkg_list = output_pkg_list; g_mutex_init(&(user_data.mutex_output_pkg_list)); g_mutex_init(&(user_data.mutex_pri)); g_mutex_init(&(user_data.mutex_fil)); g_mutex_init(&(user_data.mutex_oth)); g_cond_init(&(user_data.cond_pri)); g_cond_init(&(user_data.cond_fil)); g_cond_init(&(user_data.cond_oth)); g_mutex_init(&(user_data.mutex_buffer)); g_mutex_init(&(user_data.mutex_old_md)); g_mutex_init(&(user_data.mutex_deltatargetpackages)); g_debug("Thread pool user data ready"); // Start pool g_thread_pool_set_max_threads(pool, cmd_options->workers, NULL); g_message("Pool started (with %d workers)", cmd_options->workers); // Wait until pool is finished g_thread_pool_free(pool, FALSE, TRUE); // if there were any errors, exit nonzero if ( cmd_options->error_exit_val && user_data.had_errors ) { exit_val = 2; } g_message("Pool finished%s", (user_data.had_errors ? " with errors" : "")); cr_xml_dump_cleanup(); if (output_pkg_list) fclose(output_pkg_list); cr_xmlfile_close(pri_cr_file, &tmp_err); if (!tmp_err) cr_xmlfile_close(fil_cr_file, &tmp_err); if (!tmp_err) cr_xmlfile_close(oth_cr_file, &tmp_err); if (tmp_err) { g_critical("Error while closing xml files: %s", tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } cr_xmlfile_close(pri_cr_zck, &tmp_err); if (tmp_err) { g_critical("%s: %s", pri_zck_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } cr_xmlfile_close(fil_cr_zck, &tmp_err); if (tmp_err) { g_critical("%s: %s", fil_zck_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } cr_xmlfile_close(oth_cr_zck, &tmp_err); if (tmp_err) { g_critical("%s: %s", oth_zck_filename, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } /* At the time of writing xml metadata headers we haven't yet parsed all * the packages and we don't know whether there were some invalid ones, * therefore we write the task count into the headers instead of the actual package count. * If there actually were some invalid packages we have to correct this value * that unfortunately means we have to decompress metadata files change package * count value and compress them again. */ if (user_data.package_count != user_data.task_count){ g_message("Warning: There were some invalid packages: we have to recompress other, filelists and primary xml metadata files in order to have correct package counts"); GThreadPool *rewrite_pkg_count_pool = g_thread_pool_new(cr_rewrite_pkg_count_thread, &user_data, 3, FALSE, NULL); cr_CompressionTask *pri_rewrite_pkg_count_task = NULL; cr_CompressionTask *fil_rewrite_pkg_count_task = NULL; cr_CompressionTask *oth_rewrite_pkg_count_task = NULL; cr_CompressionTask *pri_zck_rewrite_pkg_count_task = NULL; cr_CompressionTask *fil_zck_rewrite_pkg_count_task = NULL; cr_CompressionTask *oth_zck_rewrite_pkg_count_task = NULL; pri_rewrite_pkg_count_task = cr_compressiontask_new(pri_xml_filename, NULL, xml_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, pri_rewrite_pkg_count_task, NULL); fil_rewrite_pkg_count_task = cr_compressiontask_new(fil_xml_filename, NULL, xml_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, fil_rewrite_pkg_count_task, NULL); oth_rewrite_pkg_count_task = cr_compressiontask_new(oth_xml_filename, NULL, xml_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, oth_rewrite_pkg_count_task, NULL); if (cmd_options->zck_compression) { pri_zck_rewrite_pkg_count_task = cr_compressiontask_new(pri_zck_filename, NULL, CR_CW_ZCK_COMPRESSION, cmd_options->repomd_checksum_type, pri_dict_file, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, pri_zck_rewrite_pkg_count_task, NULL); fil_zck_rewrite_pkg_count_task = cr_compressiontask_new(fil_zck_filename, NULL, CR_CW_ZCK_COMPRESSION, cmd_options->repomd_checksum_type, fil_dict_file, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, fil_zck_rewrite_pkg_count_task, NULL); oth_zck_rewrite_pkg_count_task = cr_compressiontask_new(oth_zck_filename, NULL, CR_CW_ZCK_COMPRESSION, cmd_options->repomd_checksum_type, oth_dict_file, FALSE, 1, &tmp_err); g_thread_pool_push(rewrite_pkg_count_pool, oth_zck_rewrite_pkg_count_task, NULL); } g_thread_pool_free(rewrite_pkg_count_pool, FALSE, TRUE); error_check_and_set_content_stat(pri_rewrite_pkg_count_task, pri_xml_filename, &exit_val, &pri_stat); error_check_and_set_content_stat(fil_rewrite_pkg_count_task, fil_xml_filename, &exit_val, &fil_stat); error_check_and_set_content_stat(oth_rewrite_pkg_count_task, oth_xml_filename, &exit_val, &oth_stat); cr_compressiontask_free(pri_rewrite_pkg_count_task, NULL); cr_compressiontask_free(fil_rewrite_pkg_count_task, NULL); cr_compressiontask_free(oth_rewrite_pkg_count_task, NULL); if (cmd_options->zck_compression){ error_check_and_set_content_stat(pri_zck_rewrite_pkg_count_task, pri_zck_filename, &exit_val, &pri_zck_stat); error_check_and_set_content_stat(fil_zck_rewrite_pkg_count_task, fil_zck_filename, &exit_val, &fil_zck_stat); error_check_and_set_content_stat(oth_zck_rewrite_pkg_count_task, oth_zck_filename, &exit_val, &oth_zck_stat); cr_compressiontask_free(pri_zck_rewrite_pkg_count_task, NULL); cr_compressiontask_free(fil_zck_rewrite_pkg_count_task, NULL); cr_compressiontask_free(oth_zck_rewrite_pkg_count_task, NULL); } } if (cmd_options->zck_compression){ g_free(pri_dict_file); g_free(fil_dict_file); g_free(oth_dict_file); } g_queue_free(user_data.buffer); g_mutex_clear(&(user_data.mutex_output_pkg_list)); g_mutex_clear(&(user_data.mutex_pri)); g_mutex_clear(&(user_data.mutex_fil)); g_mutex_clear(&(user_data.mutex_oth)); g_cond_clear(&(user_data.cond_pri)); g_cond_clear(&(user_data.cond_fil)); g_cond_clear(&(user_data.cond_oth)); g_mutex_clear(&(user_data.mutex_buffer)); g_mutex_clear(&(user_data.mutex_old_md)); g_mutex_clear(&(user_data.mutex_deltatargetpackages)); // Create repomd records for each file g_debug("Generating repomd.xml"); cr_Repomd *repomd_obj = cr_repomd_new(); cr_RepomdRecord *pri_xml_rec = cr_repomd_record_new("primary", pri_xml_filename); cr_RepomdRecord *fil_xml_rec = cr_repomd_record_new("filelists", fil_xml_filename); cr_RepomdRecord *oth_xml_rec = cr_repomd_record_new("other", oth_xml_filename); cr_RepomdRecord *pri_db_rec = NULL; cr_RepomdRecord *fil_db_rec = NULL; cr_RepomdRecord *oth_db_rec = NULL; cr_RepomdRecord *pri_zck_rec = NULL; cr_RepomdRecord *fil_zck_rec = NULL; cr_RepomdRecord *oth_zck_rec = NULL; cr_RepomdRecord *prestodelta_rec = NULL; cr_RepomdRecord *prestodelta_zck_rec = NULL; // List of cr_RepomdRecords GSList *additional_metadata_rec = NULL; // XML cr_repomd_record_load_contentstat(pri_xml_rec, pri_stat); cr_repomd_record_load_contentstat(fil_xml_rec, fil_stat); cr_repomd_record_load_contentstat(oth_xml_rec, oth_stat); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); GThreadPool *fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_fill_task; cr_RepomdRecordFillTask *fil_fill_task; cr_RepomdRecordFillTask *oth_fill_task; pri_fill_task = cr_repomdrecordfilltask_new(pri_xml_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, pri_fill_task, NULL); fil_fill_task = cr_repomdrecordfilltask_new(fil_xml_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, fil_fill_task, NULL); oth_fill_task = cr_repomdrecordfilltask_new(oth_xml_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, oth_fill_task, NULL); additional_metadata_rec = cr_create_repomd_records_for_additional_metadata(additional_metadata, cmd_options->repomd_checksum_type); if (new_groupfile_metadatum) { additional_metadata_rec = cr_create_repomd_records_for_groupfile_metadata(new_groupfile_metadatum, additional_metadata_rec, compression, cmd_options->repomd_checksum_type); //NOTE(amatej): Now we can add groupfile metadata to the additional_metadata list, for unified handlig while zck compressing additional_metadata = g_slist_prepend(additional_metadata, new_groupfile_metadatum); cr_Metadatum *compressed_new_groupfile_metadatum = g_malloc0(sizeof(cr_Metadatum)); compressed_new_groupfile_metadatum->name = g_strdup(((cr_RepomdRecord *) additional_metadata_rec->data)->location_real); compressed_new_groupfile_metadatum->type = g_strdup(((cr_RepomdRecord *) additional_metadata_rec->data)->type); additional_metadata = g_slist_prepend(additional_metadata, compressed_new_groupfile_metadatum); } // Wait till repomd record fill task of xml files ends. g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_fill_task, NULL); cr_repomdrecordfilltask_free(fil_fill_task, NULL); cr_repomdrecordfilltask_free(oth_fill_task, NULL); // Sqlite db if (!cmd_options->no_database) { gchar *pri_db_name = g_strconcat(tmp_out_repo, "/primary.sqlite", sqlite_compression_suffix, NULL); gchar *fil_db_name = g_strconcat(tmp_out_repo, "/filelists.sqlite", sqlite_compression_suffix, NULL); gchar *oth_db_name = g_strconcat(tmp_out_repo, "/other.sqlite", sqlite_compression_suffix, NULL); cr_db_dbinfo_update(pri_db, pri_xml_rec->checksum, &tmp_err); if (!tmp_err) cr_db_dbinfo_update(fil_db, fil_xml_rec->checksum, &tmp_err); if (!tmp_err) cr_db_dbinfo_update(oth_db, oth_xml_rec->checksum, &tmp_err); if (tmp_err) { g_critical("Error updating dbinfo: %s", tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } cr_db_close(pri_db, &tmp_err); if (!tmp_err) cr_db_close(fil_db, &tmp_err); if (!tmp_err) cr_db_close(oth_db, &tmp_err); if (tmp_err) { g_critical("Error while closing db: %s", tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } // Compress dbs GThreadPool *compress_pool = g_thread_pool_new(cr_compressing_thread, NULL, 3, FALSE, NULL); cr_CompressionTask *pri_db_task; cr_CompressionTask *fil_db_task; cr_CompressionTask *oth_db_task; pri_db_task = cr_compressiontask_new(pri_db_filename, pri_db_name, sqlite_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, pri_db_task, NULL); fil_db_task = cr_compressiontask_new(fil_db_filename, fil_db_name, sqlite_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, fil_db_task, NULL); oth_db_task = cr_compressiontask_new(oth_db_filename, oth_db_name, sqlite_compression, cmd_options->repomd_checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, oth_db_task, NULL); g_thread_pool_free(compress_pool, FALSE, TRUE); if (!cmd_options->local_sqlite) { cr_rm(pri_db_filename, CR_RM_FORCE, NULL, NULL); cr_rm(fil_db_filename, CR_RM_FORCE, NULL, NULL); cr_rm(oth_db_filename, CR_RM_FORCE, NULL, NULL); } // Prepare repomd records pri_db_rec = cr_repomd_record_new("primary_db", pri_db_name); fil_db_rec = cr_repomd_record_new("filelists_db", fil_db_name); oth_db_rec = cr_repomd_record_new("other_db", oth_db_name); g_free(pri_db_name); g_free(fil_db_name); g_free(oth_db_name); cr_repomd_record_load_contentstat(pri_db_rec, pri_db_task->stat); cr_repomd_record_load_contentstat(fil_db_rec, fil_db_task->stat); cr_repomd_record_load_contentstat(oth_db_rec, oth_db_task->stat); cr_compressiontask_free(pri_db_task, NULL); cr_compressiontask_free(fil_db_task, NULL); cr_compressiontask_free(oth_db_task, NULL); fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_db_fill_task; cr_RepomdRecordFillTask *fil_db_fill_task; cr_RepomdRecordFillTask *oth_db_fill_task; pri_db_fill_task = cr_repomdrecordfilltask_new(pri_db_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, pri_db_fill_task, NULL); fil_db_fill_task = cr_repomdrecordfilltask_new(fil_db_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, fil_db_fill_task, NULL); oth_db_fill_task = cr_repomdrecordfilltask_new(oth_db_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, oth_db_fill_task, NULL); g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_db_fill_task, NULL); cr_repomdrecordfilltask_free(fil_db_fill_task, NULL); cr_repomdrecordfilltask_free(oth_db_fill_task, NULL); } // Zchunk if (cmd_options->zck_compression) { // Prepare repomd records pri_zck_rec = cr_repomd_record_new("primary_zck", pri_zck_filename); fil_zck_rec = cr_repomd_record_new("filelists_zck", fil_zck_filename); oth_zck_rec = cr_repomd_record_new("other_zck", oth_zck_filename); cr_repomd_record_load_zck_contentstat(pri_zck_rec, pri_zck_stat); cr_repomd_record_load_zck_contentstat(fil_zck_rec, fil_zck_stat); cr_repomd_record_load_zck_contentstat(oth_zck_rec, oth_zck_stat); fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_zck_fill_task; cr_RepomdRecordFillTask *fil_zck_fill_task; cr_RepomdRecordFillTask *oth_zck_fill_task; pri_zck_fill_task = cr_repomdrecordfilltask_new(pri_zck_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, pri_zck_fill_task, NULL); fil_zck_fill_task = cr_repomdrecordfilltask_new(fil_zck_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, fil_zck_fill_task, NULL); oth_zck_fill_task = cr_repomdrecordfilltask_new(oth_zck_rec, cmd_options->repomd_checksum_type, NULL); g_thread_pool_push(fill_pool, oth_zck_fill_task, NULL); g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_zck_fill_task, NULL); cr_repomdrecordfilltask_free(fil_zck_fill_task, NULL); cr_repomdrecordfilltask_free(oth_zck_fill_task, NULL); //ZCK for additional metadata GSList *element = additional_metadata; for (; element; element=g_slist_next(element)) { cr_CompressionType com_type = cr_detect_compression(((cr_Metadatum *) element->data)->name, &tmp_err); gchar *elem_type = g_strdup(((cr_Metadatum *) element->data)->type); gchar *elem_name = g_strdup(((cr_Metadatum *) element->data)->name); if (com_type != CR_CW_NO_COMPRESSION){ const gchar *compression_suffix = cr_compression_suffix(com_type); //remove suffixes if present if (g_str_has_suffix(elem_name, compression_suffix)){ gchar *tmp = elem_name; elem_name = g_strndup(elem_name, (strlen(elem_name) - strlen(compression_suffix))); g_free(tmp); } gchar *type_compression_suffix = g_strdup(compression_suffix); type_compression_suffix[0] = '_'; //replace '.' if (g_str_has_suffix(elem_type, type_compression_suffix)){ gchar *tmp = elem_type; elem_type = g_strndup(elem_type, (strlen(elem_type) - strlen(type_compression_suffix))); g_free(tmp); } g_free(type_compression_suffix); } gchar *additional_metadatum_rec_zck_type = g_strconcat(elem_type, "_zck", NULL); gchar *additional_metadatum_rec_zck_name = g_strconcat(elem_name, ".zck", NULL); g_free(elem_name); g_free(elem_type); if (tmp_err) { g_critical("Cannot detect compression type of %s: %s", ((cr_Metadatum *) element->data)->name, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } /* Only create additional_metadata_zck if additional_metadata isn't already zchunk * and its zck version doesn't yet exists */ if (com_type != CR_CW_ZCK_COMPRESSION && !g_slist_find_custom(additional_metadata_rec, additional_metadatum_rec_zck_type, cr_cmp_repomd_record_type)) { GSList *additional_metadatum_rec_elem = g_slist_find_custom(additional_metadata_rec, ((cr_Metadatum *) element->data)->type, cr_cmp_repomd_record_type); additional_metadata_rec = g_slist_prepend(additional_metadata_rec, cr_repomd_record_new( additional_metadatum_rec_zck_type, additional_metadatum_rec_zck_name )); cr_repomd_record_compress_and_fill(additional_metadatum_rec_elem->data, additional_metadata_rec->data, cmd_options->repomd_checksum_type, CR_CW_ZCK_COMPRESSION, cmd_options->zck_dict_dir, &tmp_err); if (tmp_err) { g_critical("Cannot process %s %s: %s", ((cr_Metadatum *) element->data)->type, ((cr_Metadatum *) element->data)->name, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } } g_free(additional_metadatum_rec_zck_type); g_free(additional_metadatum_rec_zck_name); } } cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); #ifdef CR_DELTA_RPM_SUPPORT // Delta generation if (cmd_options->deltas) { gchar *filename, *outdeltadir = NULL; gchar *prestodelta_xml_filename = NULL; gchar *prestodelta_zck_filename = NULL; GHashTable *ht_oldpackagedirs = NULL; cr_XmlFile *prestodelta_cr_file = NULL; cr_XmlFile *prestodelta_cr_zck_file = NULL; cr_ContentStat *prestodelta_stat = NULL; cr_ContentStat *prestodelta_zck_stat = NULL; filename = g_strconcat("prestodelta.xml", compression_suffix, NULL); outdeltadir = g_build_filename(out_dir, OUTDELTADIR, NULL); prestodelta_xml_filename = g_build_filename(tmp_out_repo, filename, NULL); g_free(filename); // 0) Prepare outdeltadir if (g_file_test(outdeltadir, G_FILE_TEST_EXISTS)) { if (!g_file_test(outdeltadir, G_FILE_TEST_IS_DIR)) { g_critical("The file %s already exists and it is not a directory", outdeltadir); goto deltaerror; } } else if (g_mkdir(outdeltadir, S_IRWXU|S_IRWXG|S_IROTH|S_IXOTH)) { g_critical("Cannot create %s: %s", outdeltadir, g_strerror(errno)); goto deltaerror; } // 1) Scan old package directories ht_oldpackagedirs = cr_deltarpms_scan_oldpackagedirs(cmd_options->oldpackagedirs_paths, cmd_options->max_delta_rpm_size, &tmp_err); if (!ht_oldpackagedirs) { g_critical("cr_deltarpms_scan_oldpackagedirs failed: %s\n", tmp_err->message); g_clear_error(&tmp_err); goto deltaerror; } // 2) Generate drpms in parallel ret = cr_deltarpms_parallel_deltas(user_data.deltatargetpackages, ht_oldpackagedirs, outdeltadir, cmd_options->num_deltas, cmd_options->workers, cmd_options->max_delta_rpm_size, cmd_options->max_delta_rpm_size, &tmp_err); if (!ret) { g_critical("Parallel generation of drpms failed: %s", tmp_err->message); g_clear_error(&tmp_err); goto deltaerror; } // 3) Generate prestodelta.xml file prestodelta_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); prestodelta_cr_file = cr_xmlfile_sopen_prestodelta(prestodelta_xml_filename, compression, prestodelta_stat, &tmp_err); if (!prestodelta_cr_file) { g_critical("Cannot open %s: %s", prestodelta_xml_filename, tmp_err->message); g_clear_error(&tmp_err); goto deltaerror; } if (cmd_options->zck_compression && compression != CR_CW_ZCK_COMPRESSION) { filename = g_strconcat("prestodelta.xml", cr_compression_suffix(CR_CW_ZCK_COMPRESSION), NULL); prestodelta_zck_filename = g_build_filename(tmp_out_repo, filename, NULL); g_free(filename); prestodelta_zck_stat = cr_contentstat_new(cmd_options->repomd_checksum_type, NULL); prestodelta_cr_zck_file = cr_xmlfile_sopen_prestodelta(prestodelta_zck_filename, CR_CW_ZCK_COMPRESSION, prestodelta_zck_stat, &tmp_err); if (!prestodelta_cr_zck_file) { g_critical("Cannot open %s: %s", prestodelta_zck_filename, tmp_err->message); g_clear_error(&tmp_err); goto deltaerror; } } ret = cr_deltarpms_generate_prestodelta_file( outdeltadir, prestodelta_cr_file, prestodelta_cr_zck_file, //cmd_options->checksum_type, CR_CHECKSUM_SHA256, // Createrepo always uses SHA256 cmd_options->workers, out_dir, &tmp_err); if (!ret) { g_critical("Cannot generate %s: %s", prestodelta_xml_filename, tmp_err->message); g_clear_error(&tmp_err); goto deltaerror; } cr_xmlfile_close(prestodelta_cr_file, NULL); prestodelta_cr_file = NULL; cr_xmlfile_close(prestodelta_cr_zck_file, NULL); prestodelta_cr_zck_file = NULL; // 4) Prepare repomd record prestodelta_rec = cr_repomd_record_new("prestodelta", prestodelta_xml_filename); cr_repomd_record_load_contentstat(prestodelta_rec, prestodelta_stat); cr_repomd_record_fill(prestodelta_rec, cmd_options->repomd_checksum_type, NULL); if (prestodelta_zck_stat) { prestodelta_zck_rec = cr_repomd_record_new("prestodelta_zck", prestodelta_zck_filename); cr_repomd_record_load_contentstat(prestodelta_zck_rec, prestodelta_zck_stat); cr_repomd_record_fill(prestodelta_zck_rec, cmd_options->repomd_checksum_type, NULL); } deltaerror: // 5) Cleanup g_hash_table_destroy(ht_oldpackagedirs); g_free(outdeltadir); g_free(prestodelta_xml_filename); g_free(prestodelta_zck_filename); cr_xmlfile_close(prestodelta_cr_file, NULL); cr_xmlfile_close(prestodelta_cr_zck_file, NULL); cr_contentstat_free(prestodelta_stat, NULL); cr_contentstat_free(prestodelta_zck_stat, NULL); cr_slist_free_full(user_data.deltatargetpackages, (GDestroyNotify) cr_deltatargetpackage_free); } #endif // Add checksums into files names if (cmd_options->unique_md_filenames) { cr_repomd_record_rename_file(pri_xml_rec, NULL); cr_repomd_record_rename_file(fil_xml_rec, NULL); cr_repomd_record_rename_file(oth_xml_rec, NULL); cr_repomd_record_rename_file(pri_db_rec, NULL); cr_repomd_record_rename_file(fil_db_rec, NULL); cr_repomd_record_rename_file(oth_db_rec, NULL); cr_repomd_record_rename_file(pri_zck_rec, NULL); cr_repomd_record_rename_file(fil_zck_rec, NULL); cr_repomd_record_rename_file(oth_zck_rec, NULL); cr_repomd_record_rename_file(prestodelta_rec, NULL); cr_repomd_record_rename_file(prestodelta_zck_rec, NULL); GSList *element = additional_metadata_rec; for (; element; element=g_slist_next(element)) { cr_repomd_record_rename_file(element->data, NULL); } } if (cmd_options->set_timestamp_to_revision) { // validated already in cmd_parser.c:check_arguments gint64 revision = strtoll(cmd_options->revision, NULL, 0); cr_repomd_record_set_timestamp(pri_xml_rec, revision); cr_repomd_record_set_timestamp(fil_xml_rec, revision); cr_repomd_record_set_timestamp(oth_xml_rec, revision); cr_repomd_record_set_timestamp(pri_db_rec, revision); cr_repomd_record_set_timestamp(fil_db_rec, revision); cr_repomd_record_set_timestamp(oth_db_rec, revision); cr_repomd_record_set_timestamp(prestodelta_rec, revision); GSList *element = additional_metadata_rec; for (; element; element=g_slist_next(element)) { cr_repomd_record_set_timestamp(element->data, revision); } } // Gen xml cr_repomd_set_record(repomd_obj, pri_xml_rec); cr_repomd_set_record(repomd_obj, fil_xml_rec); cr_repomd_set_record(repomd_obj, oth_xml_rec); cr_repomd_set_record(repomd_obj, pri_db_rec); cr_repomd_set_record(repomd_obj, fil_db_rec); cr_repomd_set_record(repomd_obj, oth_db_rec); cr_repomd_set_record(repomd_obj, pri_zck_rec); cr_repomd_set_record(repomd_obj, fil_zck_rec); cr_repomd_set_record(repomd_obj, oth_zck_rec); cr_repomd_set_record(repomd_obj, prestodelta_rec); cr_repomd_set_record(repomd_obj, prestodelta_zck_rec); GSList *elem = additional_metadata_rec; for (; elem; elem=g_slist_next(elem)) { cr_repomd_set_record(repomd_obj, elem->data); } int i = 0; while (cmd_options->repo_tags && cmd_options->repo_tags[i]) cr_repomd_add_repo_tag(repomd_obj, cmd_options->repo_tags[i++]); i = 0; while (cmd_options->content_tags && cmd_options->content_tags[i]) cr_repomd_add_content_tag(repomd_obj, cmd_options->content_tags[i++]); if (cmd_options->distro_cpeids && cmd_options->distro_values) { GSList *cpeid = cmd_options->distro_cpeids; GSList *val = cmd_options->distro_values; while (cpeid && val) { cr_repomd_add_distro_tag(repomd_obj, cpeid->data, val->data); cpeid = g_slist_next(cpeid); val = g_slist_next(val); } } if (cmd_options->revision) cr_repomd_set_revision(repomd_obj, cmd_options->revision); cr_repomd_sort_records(repomd_obj); char *repomd_xml = cr_xml_dump_repomd(repomd_obj, &tmp_err); assert(repomd_xml || tmp_err); cr_repomd_free(repomd_obj); if (!repomd_xml) { g_critical("Cannot generate repomd.xml: %s", tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } // Write repomd.xml gchar *repomd_path = g_strconcat(tmp_out_repo, "repomd.xml", NULL); FILE *frepomd = fopen(repomd_path, "w"); if (!frepomd) { g_critical("Cannot open %s: %s", repomd_path, g_strerror(errno)); exit(EXIT_FAILURE); } fputs(repomd_xml, frepomd); fclose(frepomd); g_free(repomd_xml); g_free(repomd_path); // Final move // Copy selected metadata from the old repository cr_RetentionType retentiontype = CR_RETENTION_DEFAULT; gint64 retentionval = (gint64) cmd_options->retain_old; if (cmd_options->retain_old_md_by_age) { retentiontype = CR_RETENTION_BYAGE; retentionval = cmd_options->md_max_age; } else if (cmd_options->compatibility) { retentiontype = CR_RETENTION_COMPATIBILITY; } ret = cr_old_metadata_retention(out_repo, tmp_out_repo, retentiontype, retentionval, &tmp_err); if (!ret) { g_critical("%s", tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } gboolean old_repodata_renamed = FALSE; // === This section should be maximally atomic === sigset_t new_mask, old_mask; sigemptyset(&old_mask); sigfillset(&new_mask); sigdelset(&new_mask, SIGKILL); // These two signals cannot be sigdelset(&new_mask, SIGSTOP); // blocked sigprocmask(SIG_BLOCK, &new_mask, &old_mask); // Rename out_repo to "repodata.old.pid.date.microsecs" gchar *tmp_dirname = cr_append_pid_and_datetime("repodata.old.", NULL); gchar *old_repodata_path = g_build_filename(out_dir, tmp_dirname, NULL); g_free(tmp_dirname); if (g_rename(out_repo, old_repodata_path) == -1) { g_debug("Old repodata doesn't exists: Cannot rename %s -> %s: %s", out_repo, old_repodata_path, g_strerror(errno)); } else { g_debug("Renamed %s -> %s", out_repo, old_repodata_path); old_repodata_renamed = TRUE; } // Rename tmp_out_repo to out_repo if (g_rename(tmp_out_repo, out_repo) == -1) { g_critical("Cannot rename %s -> %s: %s", tmp_out_repo, out_repo, g_strerror(errno)); exit(EXIT_FAILURE); } else { g_debug("Renamed %s -> %s", tmp_out_repo, out_repo); } // Remove lock if (g_strcmp0(lock_dir, tmp_out_repo)) // If lock_dir is not same as temporary repo dir then remove it cr_remove_dir(lock_dir, NULL); // Disable path stored for exit handler cr_unset_cleanup_handler(NULL); sigprocmask(SIG_SETMASK, &old_mask, NULL); // === End of section that has to be maximally atomic === if (old_repodata_renamed) { // Remove "metadata.old" dir if (cr_rm(old_repodata_path, CR_RM_RECURSIVE, NULL, &tmp_err)) { g_debug("Old repo %s removed", old_repodata_path); } else { g_warning("Cannot remove %s: %s", old_repodata_path, tmp_err->message); g_clear_error(&tmp_err); } } // Clean up g_debug("Memory cleanup"); if (old_metadata) cr_metadata_free(old_metadata); g_free(user_data.prev_srpm); g_free(user_data.cur_srpm); g_free(old_repodata_path); g_free(in_repo); g_free(out_repo); g_free(tmp_out_repo); g_free(in_dir); g_free(out_dir); g_free(lock_dir); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); g_free(pri_db_filename); g_free(fil_db_filename); g_free(oth_db_filename); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); g_slist_free_full(additional_metadata, (GDestroyNotify) cr_metadatum_free); g_slist_free(additional_metadata_rec); free_options(cmd_options); cr_package_parser_cleanup(); g_debug("All done"); exit(exit_val); } createrepo_c-0.17.0/src/createrepo_c.h000066400000000000000000000031661400672373200176370ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_H__ #define __C_CREATEREPOLIB_H__ #ifdef __cplusplus extern "C" { #endif /*! \mainpage createrepo_c library * * \section intro_sec Introduction * * Usage: * \code * #include * \endcode * */ /** \defgroup main Complete API of createrepo_c library */ #include #include "checksum.h" #include "compression_wrapper.h" #include "deltarpms.h" #include "error.h" #include "load_metadata.h" #include "locate_metadata.h" #include "misc.h" #include "package.h" #include "parsehdr.h" #include "parsepkg.h" #include "repomd.h" #include "sqlite.h" #include "threads.h" #include "updateinfo.h" #include "version.h" #include "xml_dump.h" #include "xml_file.h" #include "xml_parser.h" #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_H__ */ createrepo_c-0.17.0/src/createrepo_c.pc.cmake000066400000000000000000000005411400672373200210630ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ libdir=@LIB_INSTALL_DIR@ includedir=@CMAKE_INSTALL_PREFIX@/include Name: createrepo_c Description: Library for manipulation with repodata. Version: @VERSION@ Requires: glib-2.0 rpm libcurl sqlite3 Requires.private: zlib libxml-2.0 Libs: -L${libdir} -lcreaterepo_c Libs.private: -lmagic -lbz2 -lzma Cflags: -I${includedir} createrepo_c-0.17.0/src/createrepo_shared.c000066400000000000000000000217751400672373200206640ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2015 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "misc.h" #include "cleanup.h" char *global_lock_dir = NULL; // Path to .repodata/ dir that is used as a lock char *global_tmp_out_repo = NULL; // Path to temporary repodata directory, // if NULL that it's same as // the global_lock_dir /** * Clean up function called on normal program termination. * It removes temporary .repodata/ directory that servers as a lock * for other createrepo[_c] processes. * This functions acts only if exit status != EXIST_SUCCESS. * */ static void exit_cleanup() { if (global_lock_dir) { g_debug("Removing %s", global_lock_dir); cr_remove_dir(global_lock_dir, NULL); } if (global_tmp_out_repo) { g_debug("Removing %s", global_tmp_out_repo); cr_remove_dir(global_tmp_out_repo, NULL); } } /** Signal handler * @param sig Signal number */ static void sigint_catcher(int sig) { g_message("%s caught: Terminating...", strsignal(sig)); exit(1); } gboolean cr_set_cleanup_handler(const char *lock_dir, const char *tmp_out_repo, G_GNUC_UNUSED GError **err) { assert(!err || *err == NULL); // Set global variables global_lock_dir = g_strdup(lock_dir); if (g_strcmp0(lock_dir, tmp_out_repo)) global_tmp_out_repo = g_strdup(tmp_out_repo); else global_tmp_out_repo = NULL; // Register on exit cleanup function if (atexit(exit_cleanup)) g_warning("Cannot set exit cleanup function by atexit()"); // Prepare signal handler configuration g_debug("Signal handler setup"); struct sigaction sigact; sigact.sa_handler = sigint_catcher; sigemptyset(&sigact.sa_mask); sigact.sa_flags = 0; // Handle signals that terminate (from the POSIX.1-1990) sigaction(SIGHUP, &sigact, NULL); sigaction(SIGINT, &sigact, NULL); sigaction(SIGPIPE, &sigact, NULL); sigaction(SIGALRM, &sigact, NULL); sigaction(SIGTERM, &sigact, NULL); sigaction(SIGUSR1, &sigact, NULL); sigaction(SIGUSR2, &sigact, NULL); // Handle signals that terminate (from the POSIX.1-2001) #ifdef SIGPOLL sigaction(SIGPOLL, &sigact, NULL); #endif sigaction(SIGPROF, &sigact, NULL); sigaction(SIGVTALRM, &sigact, NULL); return TRUE; } gboolean cr_unset_cleanup_handler(G_GNUC_UNUSED GError **err) { g_free(global_lock_dir); global_lock_dir = NULL; g_free(global_tmp_out_repo); global_tmp_out_repo = NULL; return TRUE; } gboolean cr_block_terminating_signals(GError **err) { assert(!err || *err == NULL); sigset_t intmask; sigemptyset(&intmask); sigaddset(&intmask, SIGHUP); sigaddset(&intmask, SIGINT); sigaddset(&intmask, SIGPIPE); sigaddset(&intmask, SIGALRM); sigaddset(&intmask, SIGTERM); sigaddset(&intmask, SIGUSR1); sigaddset(&intmask, SIGUSR2); #ifdef SIGPOLL sigaddset(&intmask, SIGPOLL); #endif sigaddset(&intmask, SIGPROF); sigaddset(&intmask, SIGVTALRM); if (sigprocmask(SIG_BLOCK, &intmask, NULL)) { g_set_error(err, CREATEREPO_C_ERROR, CRE_SIGPROCMASK, "Cannot block terminating signals: %s", g_strerror(errno)); return FALSE; } return TRUE; } gboolean cr_unblock_terminating_signals(GError **err) { assert(!err || *err == NULL); sigset_t intmask; sigemptyset(&intmask); sigaddset(&intmask, SIGHUP); sigaddset(&intmask, SIGINT); sigaddset(&intmask, SIGPIPE); sigaddset(&intmask, SIGALRM); sigaddset(&intmask, SIGTERM); sigaddset(&intmask, SIGUSR1); sigaddset(&intmask, SIGUSR2); #ifdef SIGPOLL sigaddset(&intmask, SIGPOLL); #endif sigaddset(&intmask, SIGPROF); sigaddset(&intmask, SIGVTALRM); if (sigprocmask(SIG_UNBLOCK, &intmask, NULL)) { g_set_error(err, CREATEREPO_C_ERROR, CRE_SIGPROCMASK, "Cannot unblock terminating signals: %s", g_strerror(errno)); return FALSE; } return TRUE; } gboolean cr_lock_repo(const gchar *repo_dir, gboolean ignore_lock, gchar **lock_dir_p, gchar **tmp_repodata_dir_p, GError **err) { assert(!err || *err == NULL); _cleanup_free_ gchar *lock_dir = NULL; _cleanup_error_free_ GError *tmp_err = NULL; lock_dir = g_build_filename(repo_dir, ".repodata/", NULL); *lock_dir_p = g_strdup(lock_dir); if (g_mkdir(lock_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { if (errno != EEXIST) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Error while creating temporary repodata " "directory: %s: %s", lock_dir, g_strerror(errno)); return FALSE; } g_debug("Temporary repodata directory: %s already exists! " "(Another createrepo process is running?)", lock_dir); if (ignore_lock == FALSE) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Temporary repodata directory %s already exists! " "(Another createrepo process is running?)", lock_dir); return FALSE; } // The next section takes place only if the --ignore-lock is used // Ugly, but user wants it -> it's his fault if something gets broken // Remove existing .repodata/ g_debug("(--ignore-lock enabled) Let's remove the old .repodata/"); if (cr_rm(lock_dir, CR_RM_RECURSIVE, NULL, &tmp_err)) { g_debug("(--ignore-lock enabled) Removed: %s", lock_dir); } else { g_critical("(--ignore-lock enabled) Cannot remove %s: %s", lock_dir, tmp_err->message); g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot remove %s (--ignore-lock enabled) :%s", lock_dir, tmp_err->message); return FALSE; } // Try to create own - just as a lock if (g_mkdir(lock_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { g_critical("(--ignore-lock enabled) Cannot create %s: %s", lock_dir, g_strerror(errno)); g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot create: %s (--ignore-lock enabled): %s", lock_dir, g_strerror(errno)); return FALSE; } else { g_debug("(--ignore-lock enabled) Own and empty %s created " "(serves as a lock)", lock_dir); } // To data generation use a different one _cleanup_free_ gchar *tmp_repodata_dir = NULL; _cleanup_free_ gchar *tmp = NULL; tmp = g_build_filename(repo_dir, ".repodata.", NULL); tmp_repodata_dir = cr_append_pid_and_datetime(tmp, "/"); if (g_mkdir(tmp_repodata_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { g_critical("(--ignore-lock enabled) Cannot create %s: %s", tmp_repodata_dir, g_strerror(errno)); g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot create: %s (--ignore-lock enabled): %s", tmp_repodata_dir, g_strerror(errno)); return FALSE; } else { g_debug("(--ignore-lock enabled) For data generation is used: %s", tmp_repodata_dir); } *tmp_repodata_dir_p = g_strdup(tmp_repodata_dir); } else { *tmp_repodata_dir_p = g_strdup(lock_dir); } return TRUE; } void cr_setup_logging(gboolean quiet, gboolean verbose) { if (quiet) { // Quiet mode GLogLevelFlags hidden_levels = G_LOG_LEVEL_MESSAGE | G_LOG_LEVEL_INFO | G_LOG_LEVEL_DEBUG | G_LOG_LEVEL_WARNING; g_log_set_default_handler (cr_log_fn, GINT_TO_POINTER(hidden_levels)); } else if (verbose) { // Verbose mode g_log_set_default_handler (cr_log_fn, GINT_TO_POINTER(0)); } else { // Standard mode GLogLevelFlags hidden_levels = G_LOG_LEVEL_DEBUG; g_log_set_default_handler (cr_log_fn, GINT_TO_POINTER(hidden_levels)); } } createrepo_c-0.17.0/src/createrepo_shared.h000066400000000000000000000106441400672373200206620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_CREATEREPO_SHARED_H__ #define __C_CREATEREPOLIB_CREATEREPO_SHARED_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" #include "compression_wrapper.h" #include "package.h" /** \defgroup createrepo_shared Createrepo API. * * Module with createrepo API * * \addtogroup createrepo_shared * @{ */ /** * This function does: * Sets a signal handler for signals that lead to process temination. * (List obtained from the "man 7 signal") * Signals that are ignored (SIGCHILD) or lead just to stop (SIGSTOP, ...) * don't get this handler - these signals do not terminate the process! * This handler assures that the cleanup function that is hooked on exit * gets called. * * @param lock_dir Dir that serves as lock (".repodata/") * @param tmp_out_repo Dir that is really used for repodata generation * (usually exactly the same as lock dir if not * --ignore-lock is specified). Could be NULL. * @return TRUE on success, FALSE if err is set. */ gboolean cr_set_cleanup_handler(const char *lock_dir, const char *tmp_out_repo, GError **err); /** * Block process terminating signals. * (Useful for creating pseudo-atomic sections in code) */ gboolean cr_block_terminating_signals(GError **err); /** * Unblock process terminating signals. */ gboolean cr_unblock_terminating_signals(GError **err); /** * This function does: * - Tries to create repo/.repodata/ dir. * - If it doesn't exists, it's created and function returns TRUE. * - If it exists and ignore_lock is FALSE, returns FALSE and err is set. * - If it exists and ignore_lock is TRUE it: * - Removes the existing .repodata/ dir and all its content * - Creates (empty) new one (just as a lock dir - place holder) * - Creates .repodata.pid.datetime.usec/ that should be used for * repodata generation * * @param repo_dir Path to repo (a dir that contains repodata/ subdir) * @param ignore_lock Ignore existing .repodata/ dir - remove it and * create a new one. * @param lock_dir Location to store path to a directory used as * a lock. Always repodir+"/.repodata/". * Even if FALSE is returned, the content of this * variable IS DEFINED. * @param tmp_repodata_dir Location to store a path to a directory used as * a temporary directory for repodata generation. * If ignore_lock is FALSE than * lock_dir is same as tmp_repodata_dir. * If FALSE is returned, the content of this variable * is undefined. * @param err GError ** * @return TRUE on success, FALSE if err is set. */ gboolean cr_lock_repo(const gchar *repo_dir, gboolean ignore_lock, gchar **lock_dir, gchar **tmp_repodata_dir, GError **err); /** * Unset cleanup handler. * @param err GError ** * @return TRUE on success, FALSE if err is set. */ gboolean cr_unset_cleanup_handler(GError **err); /** * Setup logging for the application. */ void cr_setup_logging(gboolean quiet, gboolean verbose); /** * Set global pointer to exit value that is used in function set by atexit * @param exit_val Pointer to exit_value int */ void cr_set_global_exit_value(int *exit_val); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_CREATEREPO_SHARED__ */ createrepo_c-0.17.0/src/deltarpms.c000066400000000000000000000657411400672373200172010ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include "deltarpms.h" #ifdef CR_DELTA_RPM_SUPPORT #include #endif #include "package.h" #include "parsepkg.h" #include "misc.h" #include "error.h" #define ERR_DOMAIN CREATEREPO_C_ERROR gboolean cr_drpm_support(void) { #ifdef CR_DELTA_RPM_SUPPORT return TRUE; #endif return FALSE; } #ifdef CR_DELTA_RPM_SUPPORT char * cr_drpm_create(cr_DeltaTargetPackage *old, cr_DeltaTargetPackage *new, const char *destdir, GError **err) { gchar *drpmfn, *drpmpath; drpmfn = g_strdup_printf("%s-%s-%s_%s-%s.%s.drpm", old->name, old->version, old->release, new->version, new->release, old->arch); drpmpath = g_build_filename(destdir, drpmfn, NULL); g_free(drpmfn); drpm_make_options *opts; drpm_make_options_init(&opts); drpm_make_options_defaults(opts); int ret = drpm_make(old->path, new->path, drpmpath, opts); if (ret != DRPM_ERR_OK) { g_set_error(err, ERR_DOMAIN, CRE_DELTARPM, "Deltarpm cannot make %s (%d) from old: %s and new: %s", drpmpath, ret, old->path, new->path); free(drpmpath); drpm_make_options_destroy(&opts); return NULL; } drpm_make_options_destroy(&opts); return drpmpath; } void cr_deltapackage_free(cr_DeltaPackage *deltapackage) { if (!deltapackage) return; cr_package_free(deltapackage->package); g_string_chunk_free(deltapackage->chunk); g_free(deltapackage); } cr_DeltaPackage * cr_deltapackage_from_drpm_base(const char *filename, int changelog_limit, cr_HeaderReadingFlags flags, GError **err) { struct drpm *delta = NULL; cr_DeltaPackage *deltapackage = NULL; char *str; int ret; assert(!err || *err == NULL); deltapackage = g_new0(cr_DeltaPackage, 1); deltapackage->chunk = g_string_chunk_new(0); deltapackage->package = cr_package_from_rpm_base(filename, changelog_limit, flags, err); if (!deltapackage->package) goto errexit; ret = drpm_read(&delta, filename); if (ret != DRPM_ERR_OK) { g_set_error(err, ERR_DOMAIN, CRE_DELTARPM, "Deltarpm cannot read %s (%d)", filename, ret); goto errexit; } ret = drpm_get_string(delta, DRPM_TAG_SRCNEVR, &str); if (ret != DRPM_ERR_OK) { g_set_error(err, ERR_DOMAIN, CRE_DELTARPM, "Deltarpm cannot read source NEVR from %s (%d)", filename, ret); goto errexit; } deltapackage->nevr = cr_safe_string_chunk_insert_null( deltapackage->chunk, str); ret = drpm_get_string(delta, DRPM_TAG_SEQUENCE, &str); if (ret != DRPM_ERR_OK) { g_set_error(err, ERR_DOMAIN, CRE_DELTARPM, "Deltarpm cannot read delta sequence from %s (%d)", filename, ret); goto errexit; } deltapackage->sequence = cr_safe_string_chunk_insert_null( deltapackage->chunk, str); drpm_destroy(&delta); return deltapackage; errexit: if (delta) drpm_destroy(&delta); cr_deltapackage_free(deltapackage); return NULL; } static void cr_free_gslist_of_strings(gpointer list) { if (!list) return; cr_slist_free_full((GSList *) list, (GDestroyNotify) g_free); } /* * 1) Scanning for old candidate rpms */ GHashTable * cr_deltarpms_scan_oldpackagedirs(GSList *oldpackagedirs, gint64 max_delta_rpm_size, GError **err) { GHashTable *ht = NULL; assert(!err || *err == NULL); ht = g_hash_table_new_full(g_str_hash, g_str_equal, (GDestroyNotify) g_free, (GDestroyNotify) cr_free_gslist_of_strings); for (GSList *elem = oldpackagedirs; elem; elem = g_slist_next(elem)) { gchar *dirname = elem->data; const gchar *filename; GDir *dirp; GSList *filenames = NULL; dirp = g_dir_open(dirname, 0, NULL); if (!dirp) { g_warning("Cannot open directory %s", dirname); continue; } while ((filename = g_dir_read_name(dirp))) { gchar *full_path; struct stat st; if (!g_str_has_suffix(filename, ".rpm")) continue; // Skip non rpm files full_path = g_build_filename(dirname, filename, NULL); if (stat(full_path, &st) == -1) { g_warning("Cannot stat %s: %s", full_path, g_strerror(errno)); g_free(full_path); continue; } if (st.st_size > max_delta_rpm_size) { g_debug("%s: Skipping %s that is > max_delta_rpm_size", __func__, full_path); g_free(full_path); continue; } g_free(full_path); filenames = g_slist_prepend(filenames, g_strdup(filename)); } if (filenames) { g_hash_table_replace(ht, (gpointer) g_strdup(dirname), (gpointer) filenames); } g_dir_close(dirp); } return ht; } /* * 2) Parallel delta generation */ typedef struct { cr_DeltaTargetPackage *tpkg; } cr_DeltaTask; typedef struct { const char *outdeltadir; gint num_deltas; GHashTable *oldpackages; GMutex mutex; gint64 active_work_size; gint active_tasks; GCond cond_task_finished; } cr_DeltaThreadUserData; static gint cmp_deltatargetpackage_evr(gconstpointer aa, gconstpointer bb) { const cr_DeltaTargetPackage *a = aa; const cr_DeltaTargetPackage *b = bb; return cr_cmp_evr(a->epoch, a->version, a->release, b->epoch, b->version, b->release); } static void cr_delta_thread(gpointer data, gpointer udata) { cr_DeltaTask *task = data; cr_DeltaThreadUserData *user_data = udata; cr_DeltaTargetPackage *tpkg = task->tpkg; // Shortcut GHashTableIter iter; gpointer key, value; // Iterate through specified oldpackage directories g_hash_table_iter_init(&iter, user_data->oldpackages); while (g_hash_table_iter_next(&iter, &key, &value)) { gchar *dirname = key; GSList *local_candidates = NULL; // Select appropriate candidates from the directory for (GSList *elem = value; elem; elem = g_slist_next(elem)) { gchar *filename = elem->data; if (g_str_has_prefix(filename, tpkg->name)) { cr_DeltaTargetPackage *l_tpkg; gchar *path = g_build_filename(dirname, filename, NULL); l_tpkg = cr_deltatargetpackage_from_rpm(path, NULL); g_free(path); if (!l_tpkg) continue; // Check the candidate more carefully if (g_strcmp0(tpkg->name, l_tpkg->name)) { cr_deltatargetpackage_free(l_tpkg); continue; } if (g_strcmp0(tpkg->arch, l_tpkg->arch)) { cr_deltatargetpackage_free(l_tpkg); continue; } if (cr_cmp_evr(tpkg->epoch, tpkg->version, tpkg->release, l_tpkg->epoch, l_tpkg->version, l_tpkg->release) <= 0) { cr_deltatargetpackage_free(l_tpkg); continue; } // This candidate looks good local_candidates = g_slist_prepend(local_candidates, l_tpkg); } } // Sort the candidates local_candidates = g_slist_sort(local_candidates, cmp_deltatargetpackage_evr); local_candidates = g_slist_reverse(local_candidates); // Generate deltas int x = 0; for (GSList *lelem = local_candidates; lelem; lelem = g_slist_next(lelem)){ GError *tmp_err = NULL; cr_DeltaTargetPackage *old = lelem->data; g_debug("Generating delta %s -> %s", old->path, tpkg->path); cr_drpm_create(old, tpkg, user_data->outdeltadir, &tmp_err); if (tmp_err) { g_warning("Cannot generate delta %s -> %s : %s", old->path, tpkg->path, tmp_err->message); g_error_free(tmp_err); continue; } if (++x == user_data->num_deltas) break; } } g_debug("Deltas for \"%s\" (%"G_GINT64_FORMAT") generated", tpkg->name, tpkg->size_installed); g_mutex_lock(&(user_data->mutex)); user_data->active_work_size -= tpkg->size_installed; user_data->active_tasks--; g_cond_signal(&(user_data->cond_task_finished)); g_mutex_unlock(&(user_data->mutex)); g_free(task); } static gint cmp_deltatargetpackage_sizes(gconstpointer a, gconstpointer b) { const cr_DeltaTargetPackage *dtpk_a = a; const cr_DeltaTargetPackage *dtpk_b = b; if (dtpk_a->size_installed < dtpk_b->size_installed) return -1; else if (dtpk_a->size_installed == dtpk_b->size_installed) return 0; else return 1; } gboolean cr_deltarpms_parallel_deltas(GSList *targetpackages, GHashTable *oldpackages, const char *outdeltadir, gint num_deltas, gint workers, gint64 max_delta_rpm_size, gint64 max_work_size, GError **err) { GThreadPool *pool; cr_DeltaThreadUserData user_data; GList *targets = NULL; GError *tmp_err = NULL; assert(!err || *err == NULL); if (num_deltas < 1) return TRUE; if (workers < 1) { g_set_error(err, ERR_DOMAIN, CRE_DELTARPM, "Number of delta workers must be a positive integer number"); return FALSE; } // Init user_data user_data.outdeltadir = outdeltadir; user_data.num_deltas = num_deltas; user_data.oldpackages = oldpackages; user_data.active_work_size = G_GINT64_CONSTANT(0); user_data.active_tasks = 0; g_mutex_init(&(user_data.mutex)); g_cond_init(&(user_data.cond_task_finished)); // Make sorted list of targets without packages // that are bigger then max_delta_rpm_size for (GSList *elem = targetpackages; elem; elem = g_slist_next(elem)) { cr_DeltaTargetPackage *tpkg = elem->data; if (tpkg->size_installed < max_delta_rpm_size) targets = g_list_insert_sorted(targets, tpkg, cmp_deltatargetpackage_sizes); } targets = g_list_reverse(targets); // Setup the pool of workers pool = g_thread_pool_new(cr_delta_thread, &user_data, workers, TRUE, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Cannot create delta pool: "); return FALSE; } // Push tasks into the pool while (targets) { gboolean inserted = FALSE; gint64 active_work_size; gint64 active_tasks; g_mutex_lock(&(user_data.mutex)); while (user_data.active_tasks == workers) // Wait if all available threads are busy g_cond_wait(&(user_data.cond_task_finished), &(user_data.mutex)); active_work_size = user_data.active_work_size; active_tasks = user_data.active_tasks; g_mutex_unlock(&(user_data.mutex)); for (GList *elem = targets; elem; elem = g_list_next(elem)) { cr_DeltaTargetPackage *tpkg = elem->data; if ((active_work_size + tpkg->size_installed) <= max_work_size) { cr_DeltaTask *task = g_new0(cr_DeltaTask, 1); task->tpkg = tpkg; g_mutex_lock(&(user_data.mutex)); user_data.active_work_size += tpkg->size_installed; user_data.active_tasks++; g_mutex_unlock(&(user_data.mutex)); g_thread_pool_push(pool, task, NULL); targets = g_list_delete_link(targets, elem); inserted = TRUE; break; } } if (!inserted) { // In this iteration, no task was pushed to the pool g_mutex_lock(&(user_data.mutex)); while (user_data.active_tasks == active_tasks) // Wait until any of running tasks finishes g_cond_wait(&(user_data.cond_task_finished), &(user_data.mutex)); g_mutex_unlock(&(user_data.mutex)); } } g_thread_pool_free(pool, FALSE, TRUE); g_list_free(targets); g_mutex_clear(&(user_data.mutex)); g_cond_clear(&(user_data.cond_task_finished)); return TRUE; } cr_DeltaTargetPackage * cr_deltatargetpackage_from_package(cr_Package *pkg, const char *path, GError **err) { cr_DeltaTargetPackage *tpkg; assert(pkg); assert(!err || *err == NULL); tpkg = g_new0(cr_DeltaTargetPackage, 1); tpkg->chunk = g_string_chunk_new(0); tpkg->name = cr_safe_string_chunk_insert(tpkg->chunk, pkg->name); tpkg->arch = cr_safe_string_chunk_insert(tpkg->chunk, pkg->arch); tpkg->epoch = cr_safe_string_chunk_insert(tpkg->chunk, pkg->epoch); tpkg->version = cr_safe_string_chunk_insert(tpkg->chunk, pkg->version); tpkg->release = cr_safe_string_chunk_insert(tpkg->chunk, pkg->release); tpkg->location_href = cr_safe_string_chunk_insert(tpkg->chunk, pkg->location_href); tpkg->size_installed = pkg->size_installed; tpkg->path = cr_safe_string_chunk_insert(tpkg->chunk, path); return tpkg; } cr_DeltaTargetPackage * cr_deltatargetpackage_from_rpm(const char *path, GError **err) { cr_Package *pkg; cr_DeltaTargetPackage *tpkg; assert(!err || *err == NULL); pkg = cr_package_from_rpm_base(path, 0, 0, err); if (!pkg) return NULL; tpkg = cr_deltatargetpackage_from_package(pkg, path, err); cr_package_free(pkg); return tpkg; } void cr_deltatargetpackage_free(cr_DeltaTargetPackage *tpkg) { if (!tpkg) return; g_string_chunk_free(tpkg->chunk); g_free(tpkg); } GSList * cr_deltarpms_scan_targetdir(const char *path, gint64 max_delta_rpm_size, GError **err) { GSList *targets = NULL; GQueue *sub_dirs = g_queue_new(); GStringChunk *sub_dirs_chunk = g_string_chunk_new(1024); assert(!err || *err == NULL); g_queue_push_head(sub_dirs, g_strdup(path)); // Recursively walk the dir gchar *dirname; while ((dirname = g_queue_pop_head(sub_dirs))) { // Open the directory GDir *dirp = g_dir_open(dirname, 0, NULL); if (!dirp) { g_warning("Cannot open directory %s", dirname); g_string_chunk_free(sub_dirs_chunk); return NULL; } // Iterate over files in directory const gchar *filename; while ((filename = g_dir_read_name(dirp))) { gchar *full_path; struct stat st; cr_DeltaTargetPackage *tpkg; full_path = g_build_filename(dirname, filename, NULL); if (!g_str_has_suffix(filename, ".rpm")) { if (g_file_test(full_path, G_FILE_TEST_IS_DIR)) { // Directory gchar *sub_dir_in_chunk; sub_dir_in_chunk = g_string_chunk_insert(sub_dirs_chunk, full_path); g_queue_push_head(sub_dirs, sub_dir_in_chunk); g_debug("Dir to scan: %s", sub_dir_in_chunk); } g_free(full_path); continue; } if (stat(full_path, &st) == -1) { g_warning("Cannot stat %s: %s", full_path, g_strerror(errno)); g_free(full_path); continue; } if (st.st_size > max_delta_rpm_size) { g_debug("%s: Skipping %s that is > max_delta_rpm_size", __func__, full_path); g_free(full_path); continue; } tpkg = cr_deltatargetpackage_from_rpm(full_path, NULL); if (tpkg) targets = g_slist_prepend(targets, tpkg); g_free(full_path); } g_dir_close(dirp); } cr_queue_free_full(sub_dirs, g_free); g_string_chunk_free(sub_dirs_chunk); return targets; } /* * 3) Parallel xml chunk generation */ typedef struct { gchar *full_path; } cr_PrestoDeltaTask; typedef struct { GMutex mutex; GHashTable *ht; cr_ChecksumType checksum_type; const gchar *prefix_to_strip; size_t prefix_len; } cr_PrestoDeltaUserData; void cr_prestodeltatask_free(cr_PrestoDeltaTask *task) { if (!task) return; g_free(task->full_path); g_free(task); } static gboolean walk_drpmsdir(const gchar *drpmsdir, GSList **inlist, GError **err) { gboolean ret = TRUE; GSList *candidates = NULL; GQueue *sub_dirs = g_queue_new(); GStringChunk *sub_dirs_chunk = g_string_chunk_new(1024); assert(drpmsdir); assert(inlist); assert(!err || *err == NULL); g_queue_push_head(sub_dirs, g_strdup(drpmsdir)); // Recursively walk the drpmsdir gchar *dirname; while ((dirname = g_queue_pop_head(sub_dirs))) { // Open the directory GDir *dirp = g_dir_open(drpmsdir, 0, NULL); if (!dirp) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open directory %s", drpmsdir); goto exit; } // Iterate over files in directory const gchar *filename; while ((filename = g_dir_read_name(dirp))) { gchar *full_path = g_build_filename(dirname, filename, NULL); // Non .rpm files if (!g_str_has_suffix (filename, ".drpm")) { if (g_file_test(full_path, G_FILE_TEST_IS_DIR)) { // Directory gchar *sub_dir_in_chunk; sub_dir_in_chunk = g_string_chunk_insert(sub_dirs_chunk, full_path); g_queue_push_head(sub_dirs, sub_dir_in_chunk); g_debug("Dir to scan: %s", sub_dir_in_chunk); } g_free(full_path); continue; } // Take the file cr_PrestoDeltaTask *task = g_new0(cr_PrestoDeltaTask, 1); task->full_path = full_path; candidates = g_slist_prepend(candidates, task); } g_free(dirname); g_dir_close(dirp); } *inlist = candidates; candidates = NULL; exit: g_slist_free_full(candidates, (GDestroyNotify) cr_prestodeltatask_free); cr_queue_free_full(sub_dirs, g_free); g_string_chunk_free(sub_dirs_chunk); return ret; } static void cr_prestodelta_thread(gpointer data, gpointer udata) { cr_PrestoDeltaTask *task = data; cr_PrestoDeltaUserData *user_data = udata; cr_DeltaPackage *dpkg = NULL; struct stat st; gchar *xml_chunk = NULL, *key = NULL, *checksum = NULL; GError *tmp_err = NULL; printf("%s\n", task->full_path); // Load delta package dpkg = cr_deltapackage_from_drpm_base(task->full_path, 0, 0, &tmp_err); if (!dpkg) { g_warning("Cannot read drpm %s: %s", task->full_path, tmp_err->message); g_error_free(tmp_err); goto exit; } // Set the filename dpkg->package->location_href = cr_safe_string_chunk_insert( dpkg->package->chunk, task->full_path + user_data->prefix_len); // Stat the package (to get the size) if (stat(task->full_path, &st) == -1) { g_warning("%s: stat(%s) error (%s)", __func__, task->full_path, g_strerror(errno)); goto exit; } else { dpkg->package->size_package = st.st_size; } // Calculate the checksum checksum = cr_checksum_file(task->full_path, user_data->checksum_type, &tmp_err); if (!checksum) { g_warning("Cannot calculate checksum for %s: %s", task->full_path, tmp_err->message); g_error_free(tmp_err); goto exit; } dpkg->package->checksum_type = cr_safe_string_chunk_insert( dpkg->package->chunk, cr_checksum_name_str( user_data->checksum_type)); dpkg->package->pkgId = cr_safe_string_chunk_insert(dpkg->package->chunk, checksum); // Generate XML xml_chunk = cr_xml_dump_deltapackage(dpkg, &tmp_err); if (tmp_err) { g_warning("Cannot generate xml for drpm %s: %s", task->full_path, tmp_err->message); g_error_free(tmp_err); goto exit; } // Put the XML into the shared hash table gpointer pkey = NULL; gpointer pval = NULL; key = cr_package_nevra(dpkg->package); g_mutex_lock(&(user_data->mutex)); if (g_hash_table_lookup_extended(user_data->ht, key, &pkey, &pval)) { // Key exists in the table // 1. Remove the key and value from the table without freeing them g_hash_table_steal(user_data->ht, key); // 2. Append to the list (the value from the hash table) GSList *list = (GSList *) pval; list = g_slist_append(pval, xml_chunk); // 3. Insert the modified list again g_hash_table_insert(user_data->ht, pkey, list); } else { // Key doesn't exist yet GSList *list = g_slist_prepend(NULL, xml_chunk); g_hash_table_insert(user_data->ht, g_strdup(key), list); } g_mutex_unlock(&(user_data->mutex)); exit: g_free(checksum); g_free(key); cr_deltapackage_free(dpkg); } static gchar * gen_newpackage_xml_chunk(const char *strnevra, GSList *delta_chunks) { cr_NEVRA *nevra; GString *chunk; if (!delta_chunks) return NULL; nevra = cr_str_to_nevra(strnevra); chunk = g_string_new(NULL); g_string_printf(chunk, " \n", nevra->name, nevra->epoch ? nevra->epoch : "0", nevra->version, nevra->release, nevra->arch); cr_nevra_free(nevra); for (GSList *elem = delta_chunks; elem; elem = g_slist_next(elem)) { gchar *delta_chunk = elem->data; g_string_append(chunk, delta_chunk); } g_string_append(chunk, " \n"); return g_string_free(chunk, FALSE); } gboolean cr_deltarpms_generate_prestodelta_file(const gchar *drpmsdir, cr_XmlFile *f, cr_XmlFile *zck_f, cr_ChecksumType checksum_type, gint workers, const gchar *prefix_to_strip, GError **err) { gboolean ret = TRUE; GSList *candidates = NULL; GThreadPool *pool; cr_PrestoDeltaUserData user_data; GHashTable *ht = NULL; GHashTableIter iter; gpointer key, value; GError *tmp_err = NULL; assert(drpmsdir); assert(f); assert(!err || *err == NULL); // Walk the drpms directory if (!walk_drpmsdir(drpmsdir, &candidates, &tmp_err)) { g_propagate_prefixed_error(err, tmp_err, "%s: ", __func__); ret = FALSE; goto exit; } // Setup pool of workers ht = g_hash_table_new_full(g_str_hash, g_str_equal, (GDestroyNotify) g_free, (GDestroyNotify) cr_free_gslist_of_strings); user_data.ht = ht; user_data.checksum_type = checksum_type; user_data.prefix_to_strip = prefix_to_strip, user_data.prefix_len = prefix_to_strip ? strlen(prefix_to_strip) : 0; g_mutex_init(&(user_data.mutex)); pool = g_thread_pool_new(cr_prestodelta_thread, &user_data, workers, TRUE, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Cannot create pool for prestodelta file generation: "); ret = FALSE; goto exit; } // Push tasks to the pool for (GSList *elem = candidates; elem; elem = g_slist_next(elem)) { g_thread_pool_push(pool, elem->data, NULL); } // Wait until the pool finishes g_thread_pool_free(pool, FALSE, TRUE); // Write out the results g_hash_table_iter_init(&iter, user_data.ht); while (g_hash_table_iter_next(&iter, &key, &value)) { gchar *chunk = NULL; gchar *nevra = key; chunk = gen_newpackage_xml_chunk(nevra, (GSList *) value); cr_xmlfile_add_chunk(f, chunk, NULL); /* Write out zchunk file */ if (zck_f) { cr_xmlfile_add_chunk(zck_f, chunk, NULL); cr_end_chunk(zck_f->f, &tmp_err); if (tmp_err) { g_free(chunk); g_propagate_prefixed_error(err, tmp_err, "Cannot create pool for prestodelta file generation: "); ret = FALSE; goto exit; } } g_free(chunk); } exit: g_slist_free_full(candidates, (GDestroyNotify) cr_prestodeltatask_free); g_mutex_clear(&(user_data.mutex)); g_hash_table_destroy(ht); return ret; } #endif createrepo_c-0.17.0/src/deltarpms.h.in000066400000000000000000000072211400672373200176000ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_DELTARPMS_H__ #define __C_CREATEREPOLIB_DELTARPMS_H__ #ifdef __cplusplus extern "C" { #endif #include #include #include "package.h" #include "parsehdr.h" #include "xml_file.h" /** \defgroup deltarpms Support for deltarpms * \addtogroup deltarpms * @{ */ #ifndef CR_DELTA_RPM_SUPPORT #cmakedefine CR_DELTA_RPM_SUPPORT #endif #define CR_DEFAULT_MAX_DELTA_RPM_SIZE 100000000 typedef struct { cr_Package *package; char *nevr; char *sequence; GStringChunk *chunk; } cr_DeltaPackage; typedef struct { char *name; char *arch; char *epoch; char *version; char *release; char *location_href; gint64 size_installed; char *path; GStringChunk *chunk; } cr_DeltaTargetPackage; gboolean cr_drpm_support(void); #ifdef CR_DELTA_RPM_SUPPORT char * cr_drpm_create(cr_DeltaTargetPackage *old, cr_DeltaTargetPackage *new, const char *destdir, GError **err); cr_DeltaPackage * cr_deltapackage_from_drpm_base(const char *filename, int changelog_limit, cr_HeaderReadingFlags flags, GError **err); void cr_deltapackage_free(cr_DeltaPackage *deltapackage); GHashTable * cr_deltarpms_scan_oldpackagedirs(GSList *oldpackagedirs, gint64 max_delta_rpm_size, GError **err); cr_DeltaTargetPackage * cr_deltatargetpackage_from_package(cr_Package *pkg, const char *path, GError **err); cr_DeltaTargetPackage * cr_deltatargetpackage_from_rpm(const char *path, GError **err); void cr_deltatargetpackage_free(cr_DeltaTargetPackage *tpkg); gboolean cr_deltarpms_parallel_deltas(GSList *targetpackages, GHashTable *oldpackages, const char *outdeltadir, gint num_deltas, gint workers, gint64 max_delta_rpm_size, gint64 max_work_size, GError **err); GSList * cr_deltarpms_scan_targetdir(const char *path, gint64 max_delta_rpm_size, GError **err); gboolean cr_deltarpms_generate_prestodelta_file(const gchar *drpmdir, cr_XmlFile *f, cr_XmlFile *zck_f, cr_ChecksumType checksum_type, gint workers, const gchar *prefix_to_strip, GError **err); #endif /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_DELTARPMS_H__ */ createrepo_c-0.17.0/src/dumper_thread.c000066400000000000000000000541021400672373200200160ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include "checksum.h" #include "cleanup.h" #include "deltarpms.h" #include "dumper_thread.h" #include "error.h" #include "misc.h" #include "parsepkg.h" #include "xml_dump.h" #include #define MAX_TASK_BUFFER_LEN 20 #define CACHEDCHKSUM_BUFFER_LEN 2048 struct BufferedTask { long id; // ID of the task struct cr_XmlStruct res; // XML for primary, filelists and other cr_Package *pkg; // Package structure char *location_href; // location_href path char *location_base; // location_base path int pkg_from_md; // If true - package structure if from // old metadata and must not be freed! // If false - package is from file and // it must be freed! }; static gint buf_task_sort_func(gconstpointer a, gconstpointer b, G_GNUC_UNUSED gpointer data) { const struct BufferedTask *task_a = a; const struct BufferedTask *task_b = b; if (task_a->id < task_b->id) return -1; if (task_a->id == task_b->id) return 0; return 1; } static void write_pkg(long id, struct cr_XmlStruct res, cr_Package *pkg, struct UserData *udata) { GError *tmp_err = NULL; // Write primary data g_mutex_lock(&(udata->mutex_pri)); while (udata->id_pri != id) g_cond_wait (&(udata->cond_pri), &(udata->mutex_pri)); udata->package_count++; g_free(udata->prev_srpm); udata->prev_srpm = udata->cur_srpm; udata->cur_srpm = g_strdup(pkg->rpm_sourcerpm); gboolean new_pkg = FALSE; if (g_strcmp0(udata->prev_srpm, udata->cur_srpm) != 0) new_pkg = TRUE; ++udata->id_pri; cr_xmlfile_add_chunk(udata->pri_f, (const char *) res.primary, &tmp_err); if (tmp_err) { g_critical("Cannot add primary chunk:\n%s\nError: %s", res.primary, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } if (udata->pri_db) { cr_db_add_pkg(udata->pri_db, pkg, &tmp_err); if (tmp_err) { g_critical("Cannot add record of %s (%s) to primary db: %s", pkg->name, pkg->pkgId, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } if (udata->pri_zck) { if (new_pkg) { cr_end_chunk(udata->pri_zck->f, &tmp_err); if (tmp_err) { g_critical("Unable to end primary zchunk: %s", tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } cr_xmlfile_add_chunk(udata->pri_zck, (const char *) res.primary, &tmp_err); if (tmp_err) { g_critical("Cannot add primary zchunk:\n%s\nError: %s", res.primary, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } g_cond_broadcast(&(udata->cond_pri)); g_mutex_unlock(&(udata->mutex_pri)); // Write fielists data g_mutex_lock(&(udata->mutex_fil)); while (udata->id_fil != id) g_cond_wait (&(udata->cond_fil), &(udata->mutex_fil)); ++udata->id_fil; cr_xmlfile_add_chunk(udata->fil_f, (const char *) res.filelists, &tmp_err); if (tmp_err) { g_critical("Cannot add filelists chunk:\n%s\nError: %s", res.filelists, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } if (udata->fil_db) { cr_db_add_pkg(udata->fil_db, pkg, &tmp_err); if (tmp_err) { g_critical("Cannot add record of %s (%s) to filelists db: %s", pkg->name, pkg->pkgId, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } if (udata->fil_zck) { if (new_pkg) { cr_end_chunk(udata->fil_zck->f, &tmp_err); if (tmp_err) { g_critical("Unable to end filelists zchunk: %s", tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } cr_xmlfile_add_chunk(udata->fil_zck, (const char *) res.filelists, &tmp_err); if (tmp_err) { g_critical("Cannot add filelists zchunk:\n%s\nError: %s", res.filelists, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } g_cond_broadcast(&(udata->cond_fil)); g_mutex_unlock(&(udata->mutex_fil)); // Write other data g_mutex_lock(&(udata->mutex_oth)); while (udata->id_oth != id) g_cond_wait (&(udata->cond_oth), &(udata->mutex_oth)); ++udata->id_oth; cr_xmlfile_add_chunk(udata->oth_f, (const char *) res.other, &tmp_err); if (tmp_err) { g_critical("Cannot add other chunk:\n%s\nError: %s", res.other, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } if (udata->oth_db) { cr_db_add_pkg(udata->oth_db, pkg, NULL); if (tmp_err) { g_critical("Cannot add record of %s (%s) to other db: %s", pkg->name, pkg->pkgId, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } if (udata->oth_zck) { if (new_pkg) { cr_end_chunk(udata->oth_zck->f, &tmp_err); if (tmp_err) { g_critical("Unable to end other zchunk: %s", tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } cr_xmlfile_add_chunk(udata->oth_zck, (const char *) res.other, &tmp_err); if (tmp_err) { g_critical("Cannot add other zchunk:\n%s\nError: %s", res.other, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); } } g_cond_broadcast(&(udata->cond_oth)); g_mutex_unlock(&(udata->mutex_oth)); } static char * get_checksum(const char *filename, cr_ChecksumType type, cr_Package *pkg, const char *cachedir, GError **err) { GError *tmp_err = NULL; char *checksum = NULL; char *cachefn = NULL; if (cachedir) { // Prepare cache fn char *key; cr_ChecksumCtx *ctx = cr_checksum_new(type, err); if (!ctx) return NULL; if (pkg->siggpg) cr_checksum_update(ctx, pkg->siggpg->data, pkg->siggpg->size, NULL); if (pkg->sigpgp) cr_checksum_update(ctx, pkg->sigpgp->data, pkg->sigpgp->size, NULL); if (pkg->hdrid) cr_checksum_update(ctx, pkg->hdrid, strlen(pkg->hdrid), NULL); key = cr_checksum_final(ctx, err); if (!key) return NULL; cachefn = g_strdup_printf("%s%s-%s-%"G_GINT64_FORMAT"-%"G_GINT64_FORMAT, cachedir, cr_get_filename(pkg->location_href), key, pkg->size_installed, pkg->time_file); free(key); // Try to load checksum FILE *f = fopen(cachefn, "r"); if (f) { char buf[CACHEDCHKSUM_BUFFER_LEN]; size_t readed = fread(buf, 1, CACHEDCHKSUM_BUFFER_LEN, f); if (!ferror(f) && readed > 0) { checksum = g_strndup(buf, readed); } fclose(f); } if (checksum) { g_debug("Cached checksum used: %s: \"%s\"", cachefn, checksum); goto exit; } } // Calculate checksum checksum = cr_checksum_file(filename, type, &tmp_err); if (!checksum) { g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation: "); goto exit; } // Cache the checksum value if (cachefn && !g_file_test(cachefn, G_FILE_TEST_EXISTS)) { gchar *template = g_strconcat(cachefn, "-XXXXXX", NULL); // Files should not be executable so use only 0666 gint fd = g_mkstemp_full(template, O_RDWR, 0666); if (fd < 0) { g_free(template); goto exit; } write(fd, checksum, strlen(checksum)); close(fd); if (g_rename(template, cachefn) == -1) g_remove(template); g_free(template); } exit: g_free(cachefn); return checksum; } gchar * prepare_split_media_baseurl(int media_id, const char *location_base) { // Default location_base "media:" in split mode if (!location_base || !*location_base) return g_strdup_printf("media:#%d", media_id); // Location doesn't end with "://" -> just append "#MEDIA_ID" if (!g_str_has_suffix(location_base, "://")) return g_strdup_printf("%s#%d", location_base, media_id); // Calculate location_base -> replace ending "//" with "#MEDIA_ID" size_t lb_length = strlen(location_base); _cleanup_free_ gchar *tmp_location_base = NULL; tmp_location_base = g_strndup(location_base, (lb_length-2)); return g_strdup_printf("%s#%d", tmp_location_base, media_id); } static cr_Package * load_rpm(const char *fullpath, cr_ChecksumType checksum_type, const char *checksum_cachedir, const char *location_href, const char *location_base, int changelog_limit, struct stat *stat_buf, cr_HeaderReadingFlags hdrrflags, GError **err) { cr_Package *pkg = NULL; GError *tmp_err = NULL; assert(fullpath); assert(!err || *err == NULL); // Get a package object pkg = cr_package_from_rpm_base(fullpath, changelog_limit, hdrrflags, err); if (!pkg) goto errexit; // Locations pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, location_href); pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, location_base); // Get checksum type string pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk, cr_checksum_name_str(checksum_type)); // Get file stat if (!stat_buf) { struct stat stat_buf_own; if (stat(fullpath, &stat_buf_own) == -1) { g_warning("%s: stat(%s) error (%s)", __func__, fullpath, g_strerror(errno)); g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "stat(%s) failed: %s", fullpath, g_strerror(errno)); goto errexit; } pkg->time_file = stat_buf_own.st_mtime; pkg->size_package = stat_buf_own.st_size; } else { pkg->time_file = stat_buf->st_mtime; pkg->size_package = stat_buf->st_size; } // Compute checksum char *checksum = get_checksum(fullpath, checksum_type, pkg, checksum_cachedir, &tmp_err); if (!checksum) { g_propagate_error(err, tmp_err); goto errexit; } pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, checksum); g_free(checksum); // Get header range struct cr_HeaderRangeStruct hdr_r = cr_get_header_byte_range(fullpath, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error while determining header range: "); goto errexit; } pkg->rpm_header_start = hdr_r.start; pkg->rpm_header_end = hdr_r.end; return pkg; errexit: cr_package_free(pkg); return NULL; } void cr_dumper_thread(gpointer data, gpointer user_data) { GError *tmp_err = NULL; gboolean old_used = FALSE; // To use old metadata? cr_Package *md = NULL; // Package from loaded MetaData cr_Package *pkg = NULL; // Package from file struct stat stat_buf; // Struct with info from stat() on file struct cr_XmlStruct res; // Structure for generated XML cr_HeaderReadingFlags hdrrflags = CR_HDRR_NONE; struct UserData *udata = (struct UserData *) user_data; struct PoolTask *task = (struct PoolTask *) data; // get location_href without leading part of path (path to repo) // including '/' char _cleanup_free_ gchar *location_href = NULL; location_href = g_strdup(task->full_path + udata->repodir_name_len); _cleanup_free_ gchar *location_base = NULL; location_base = g_strdup(udata->location_base); // User requested modification of the location href if (udata->cut_dirs) { gchar *tmp = location_href; location_href = g_strdup(cr_cut_dirs(location_href, udata->cut_dirs)); g_free(tmp); } if (udata->location_prefix) { gchar *tmp = location_href; location_href = g_build_filename(udata->location_prefix, tmp, NULL); g_free(tmp); } // Prepare location base (if split option is used) if (task->media_id) { gchar *new_location_base = prepare_split_media_baseurl(task->media_id, location_base); g_free(location_base); location_base = new_location_base; } // If --cachedir is used, load signatures and hdrid from packages too if (udata->checksum_cachedir) hdrrflags = CR_HDRR_LOADHDRID | CR_HDRR_LOADSIGNATURES; // Get stat info about file if (udata->old_metadata && !(udata->skip_stat)) { if (stat(task->full_path, &stat_buf) == -1) { g_critical("Stat() on %s: %s", task->full_path, g_strerror(errno)); goto task_cleanup; } } // Update stuff if (udata->old_metadata) { char *cache_key = cr_get_cleaned_href(location_href); // We have old metadata g_mutex_lock(&(udata->mutex_old_md)); md = (cr_Package *) g_hash_table_lookup( cr_metadata_hashtable(udata->old_metadata), cache_key); // Remove the pkg from the hash table of old metadata, so that no other // thread can use it as CACHE, because later we modify it destructively g_hash_table_steal(cr_metadata_hashtable(udata->old_metadata), cache_key); g_mutex_unlock(&(udata->mutex_old_md)); if (md) { g_debug("CACHE HIT %s", task->filename); if (udata->skip_stat) { old_used = TRUE; } else if (stat_buf.st_mtime == md->time_file && stat_buf.st_size == md->size_package && !strcmp(udata->checksum_type_str, md->checksum_type)) { old_used = TRUE; } else { g_debug("%s metadata are obsolete -> generating new", task->filename); } if (old_used) { // We have usable old data, but we have to set proper locations // WARNING! This two lines destructively modifies content of // packages in old metadata. md->location_href = location_href; md->location_base = location_base; // ^^^ The location_base not location_href are properly saved // into pkg chunk this is intentional as after the metadata // are written (dumped) none should use them again. } } } // Load package and gen XML metadata if (!old_used) { // Load package from file pkg = load_rpm(task->full_path, udata->checksum_type, udata->checksum_cachedir, location_href, location_base, udata->changelog_limit, NULL, hdrrflags, &tmp_err); assert(pkg || tmp_err); if (!pkg) { g_warning("Cannot read package: %s: %s", task->full_path, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); goto task_cleanup; } res = cr_xml_dump(pkg, &tmp_err); if (tmp_err) { g_critical("Cannot dump XML for %s (%s): %s", pkg->name, pkg->pkgId, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); goto task_cleanup; } if (udata->output_pkg_list){ g_mutex_lock(&(udata->mutex_output_pkg_list)); fprintf(udata->output_pkg_list, "%s\n", pkg->location_href); g_mutex_unlock(&(udata->mutex_output_pkg_list)); } } else { // Just gen XML from old loaded metadata pkg = md; res = cr_xml_dump(md, &tmp_err); if (tmp_err) { g_critical("Cannot dump XML for %s (%s): %s", md->name, md->pkgId, tmp_err->message); udata->had_errors = TRUE; g_clear_error(&tmp_err); goto task_cleanup; } } #ifdef CR_DELTA_RPM_SUPPORT // Delta candidate if (udata->deltas && !old_used && pkg->size_installed < udata->max_delta_rpm_size) { cr_DeltaTargetPackage *tpkg; tpkg = cr_deltatargetpackage_from_package(pkg, task->full_path, NULL); if (tpkg) { g_mutex_lock(&(udata->mutex_deltatargetpackages)); udata->deltatargetpackages = g_slist_prepend( udata->deltatargetpackages, tpkg); g_mutex_unlock(&(udata->mutex_deltatargetpackages)); } else { g_warning("Cannot create deltatargetpackage for: %s-%s-%s", pkg->name, pkg->version, pkg->release); } } #endif // Buffering stuff g_mutex_lock(&(udata->mutex_buffer)); if (g_queue_get_length(udata->buffer) < MAX_TASK_BUFFER_LEN && udata->id_pri != task->id && udata->task_count > (task->id + 1)) { // If: // * this isn't our turn // * the buffer isn't full // * this isn't the last task // Then: save the task to the buffer struct BufferedTask *buf_task = malloc(sizeof(struct BufferedTask)); buf_task->id = task->id; buf_task->res = res; buf_task->pkg = pkg; buf_task->location_href = NULL; buf_task->location_base = NULL; buf_task->pkg_from_md = (pkg == md) ? 1 : 0; if (pkg == md) { // We MUST store locations for reused packages who goes to the buffer buf_task->location_href = g_strdup(location_href); buf_task->pkg->location_href = buf_task->location_href; buf_task->location_base = g_strdup(location_base); buf_task->pkg->location_base = buf_task->location_base; } g_queue_insert_sorted(udata->buffer, buf_task, buf_task_sort_func, NULL); g_mutex_unlock(&(udata->mutex_buffer)); g_free(task->full_path); g_free(task->filename); g_free(task->path); g_free(task); return; } g_mutex_unlock(&(udata->mutex_buffer)); // Dump XML and SQLite write_pkg(task->id, res, pkg, udata); // Clean up cr_package_free(pkg); g_free(res.primary); g_free(res.filelists); g_free(res.other); task_cleanup: if (udata->id_pri <= task->id) { // An error was encountered and we have to wait to increment counters g_mutex_lock(&(udata->mutex_pri)); while (udata->id_pri != task->id) g_cond_wait (&(udata->cond_pri), &(udata->mutex_pri)); ++udata->id_pri; g_cond_broadcast(&(udata->cond_pri)); g_mutex_unlock(&(udata->mutex_pri)); g_mutex_lock(&(udata->mutex_fil)); while (udata->id_fil != task->id) g_cond_wait (&(udata->cond_fil), &(udata->mutex_fil)); ++udata->id_fil; g_cond_broadcast(&(udata->cond_fil)); g_mutex_unlock(&(udata->mutex_fil)); g_mutex_lock(&(udata->mutex_oth)); while (udata->id_oth != task->id) g_cond_wait (&(udata->cond_oth), &(udata->mutex_oth)); ++udata->id_oth; g_cond_broadcast(&(udata->cond_oth)); g_mutex_unlock(&(udata->mutex_oth)); } g_free(task->full_path); g_free(task->filename); g_free(task->path); g_free(task); // Try to write all results from buffer which was waiting for us while (1) { struct BufferedTask *buf_task; g_mutex_lock(&(udata->mutex_buffer)); buf_task = g_queue_peek_head(udata->buffer); if (buf_task && buf_task->id == udata->id_pri) { buf_task = g_queue_pop_head (udata->buffer); g_mutex_unlock(&(udata->mutex_buffer)); // Dump XML and SQLite write_pkg(buf_task->id, buf_task->res, buf_task->pkg, udata); // Clean up cr_package_free(buf_task->pkg); g_free(buf_task->res.primary); g_free(buf_task->res.filelists); g_free(buf_task->res.other); g_free(buf_task->location_href); g_free(buf_task->location_base); g_free(buf_task); } else { g_mutex_unlock(&(udata->mutex_buffer)); break; } } return; } createrepo_c-0.17.0/src/dumper_thread.h000066400000000000000000000117551400672373200200320ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_DUMPER_THREAD_H__ #define __C_CREATEREPOLIB_DUMPER_THREAD_H__ #ifdef __cplusplus extern "C" { #endif #include #include #include "load_metadata.h" #include "locate_metadata.h" #include "misc.h" #include "package.h" #include "sqlite.h" #include "xml_file.h" /** \defgroup dumperthread Implementation of concurent dumping used in createrepo_c * \addtogroup dumperthread * @{ */ struct PoolTask { long id; // ID of the task long media_id; // ID of media in split mode, 0 if not in split mode char* full_path; // Complete path - /foo/bar/packages/foo.rpm char* filename; // Just filename - foo.rpm char* path; // Just path - /foo/bar/packages }; struct UserData { cr_XmlFile *pri_f; // Opened compressed primary.xml.* cr_XmlFile *fil_f; // Opened compressed filelists.xml.* cr_XmlFile *oth_f; // Opened compressed other.xml.* cr_SqliteDb *pri_db; // Primary db cr_SqliteDb *fil_db; // Filelists db cr_SqliteDb *oth_db; // Other db cr_XmlFile *pri_zck; // Opened compressed primary.xml.zck cr_XmlFile *fil_zck; // Opened compressed filelists.xml.zck cr_XmlFile *oth_zck; // Opened compressed other.xml.zck char *prev_srpm; // Previous srpm char *cur_srpm; // Current srpm int changelog_limit; // Max number of changelogs for a package const char *location_base; // Base location url int repodir_name_len; // Len of path to repo /foo/bar/repodata // This part |<----->| const char *checksum_type_str; // Name of selected checksum cr_ChecksumType checksum_type; // Constant representing selected checksum const char *checksum_cachedir; // Dir with cached checksums gboolean skip_symlinks; // Skip symlinks long task_count; // Total number of task to process long package_count; // Total number of packages processed // Update stuff gboolean skip_stat; // Skip stat() while updating cr_Metadata *old_metadata; // Loaded metadata GMutex mutex_old_md; // Mutex for accessing old metadata // Thread serialization GMutex mutex_pri; // Mutex for primary metadata GMutex mutex_fil; // Mutex for filelists metadata GMutex mutex_oth; // Mutex for other metadata GCond cond_pri; // Condition for primary metadata GCond cond_fil; // Condition for filelists metadata GCond cond_oth; // Condition for other metadata volatile long id_pri; // ID of task on turn (write primary metadata) volatile long id_fil; // ID of task on turn (write filelists metadata) volatile long id_oth; // ID of task on turn (write other metadata) // Buffering GQueue *buffer; // Buffer for done tasks GMutex mutex_buffer; // Mutex for accessing the buffer // Delta generation gboolean deltas; // Are deltas enabled? gint64 max_delta_rpm_size; // Max size of an rpm that to run // deltarpm against GMutex mutex_deltatargetpackages; // Mutex GSList *deltatargetpackages; // List of cr_DeltaTargetPackages // Location href modifiers gint cut_dirs; // Ignore *num* of directory components // in location href path gchar *location_prefix; // Append this prefix into location_href // during repodata generation gboolean had_errors; // Any errors encountered? FILE *output_pkg_list; // File where a list of read packages is written GMutex mutex_output_pkg_list; // Mutex for output_pkg_list file }; void cr_dumper_thread(gpointer data, gpointer user_data); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_DUMPER_THREAD_H__ */ createrepo_c-0.17.0/src/error.c000066400000000000000000000065611400672373200163320ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include "error.h" const char * cr_strerror(cr_Error rc) { switch (rc) { case CRE_OK: return "No error"; case CRE_ERROR: return "No specified error"; case CRE_IO: return "Input/Output error"; case CRE_MEMORY: return "Out of memory"; case CRE_STAT: return "stat() system call failed"; case CRE_DB: return "Database error"; case CRE_BADARG: return "Bad function argument(s)"; case CRE_NOFILE: return "File doesn't exist"; case CRE_NODIR: return "Directory doesn't exist"; case CRE_EXISTS: return "File/Directory already exists"; case CRE_UNKNOWNCHECKSUMTYPE: return "Unknown/Unsupported checksum type"; case CRE_UNKNOWNCOMPRESSION: return "Unknown/Usupported compression"; case CRE_XMLPARSER: return "Error while parsing XML"; case CRE_XMLDATA: return "Loaded XML data are bad"; case CRE_CBINTERRUPTED: return "Interrupted by callback"; case CRE_BADXMLPRIMARY: return "Bad primary XML"; case CRE_BADXMLFILELISTS: return "Bad filelists XML"; case CRE_BADXMLOTHER: return "Bad other XML"; case CRE_MAGIC: return "Magic Number Recognition Library (libmagic) error"; case CRE_GZ: return "Gzip library related error"; case CRE_BZ2: return "Bzip2 library related error"; case CRE_XZ: return "XZ (lzma) library related error"; case CRE_OPENSSL: return "OpenSSL library related error"; case CRE_CURL: return "Curl library related error"; case CRE_ASSERT: return "Assert error"; case CRE_BADCMDARG: return "Bad command line argument(s)"; case CRE_SPAWNERRCODE: return "Child process exited with error code != 0"; case CRE_SPAWNKILLED: return "Child process killed by signal"; case CRE_SPAWNSTOPED: return "Child process stopped by signal"; case CRE_SPAWNABNORMAL: return "Child process exited abnormally"; case CRE_DELTARPM: return "Deltarpm error"; default: return "Unknown error"; } } GQuark createrepo_c_error_quark(void) { static GQuark quark = 0; if (!quark) quark = g_quark_from_static_string ("createrepo_c_error"); return quark; } createrepo_c-0.17.0/src/error.h000066400000000000000000000076071400672373200163410ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_ERROR_H__ #define __C_CREATEREPOLIB_ERROR_H__ #include /* Error codes */ typedef enum { CRE_OK, /*!< (0) No error */ CRE_ERROR, /*!< (1) No specified error */ CRE_IO, /*!< (2) Input/Output error (cannot open file, etc.) */ CRE_MEMORY, /*!< (3) Cannot allocate memory */ CRE_STAT, /*!< (4) Stat() call failed */ CRE_DB, /*!< (5) A database error */ CRE_BADARG, /*!< (6) At least one argument of function is bad or non complete */ CRE_NOFILE, /*!< (7) File doesn't exist */ CRE_NODIR, /*!< (8) Directory doesn't exist (not a dir or path doesn't exists) */ CRE_EXISTS, /*!< (9) File/Directory already exists */ CRE_UNKNOWNCHECKSUMTYPE, /*!< (10) Unknown/Unsupported checksum type */ CRE_UNKNOWNCOMPRESSION, /*!< (11) Unknown/Unsupported compression type */ CRE_XMLPARSER, /*!< (12) XML parser error (non valid xml, corrupted xml, ..) */ CRE_XMLDATA, /*!< (13) Loaded xml metadata are bad */ CRE_CBINTERRUPTED, /*!< (14) Interrupted by callback. */ CRE_BADXMLPRIMARY, /*!< (15) Bad filelists.xml file */ CRE_BADXMLFILELISTS, /*!< (16) Bad filelists.xml file */ CRE_BADXMLOTHER, /*!< (17) Bad filelists.xml file */ CRE_BADXMLREPOMD, /*!< (18) Bad repomd.xml file */ CRE_MAGIC, /*!< (19) Magic Number Recognition Library (libmagic) error */ CRE_GZ, /*!< (20) Gzip library related error */ CRE_BZ2, /*!< (21) Bzip2 library related error */ CRE_XZ, /*!< (22) Xz (lzma) library related error */ CRE_OPENSSL, /*!< (23) OpenSSL library related error */ CRE_CURL, /*!< (24) Curl library related error */ CRE_ASSERT, /*!< (25) Ideally this error should never happend. Nevertheless if it happend, probable reason is that some values of createrepo_c object was changed (by you - a programmer) in a bad way */ CRE_BADCMDARG, /*!< (26) Bad command line argument(s) */ CRE_SPAWNERRCODE, /*!< (27) Child process exited with error code != 0 */ CRE_SPAWNKILLED, /*!< (28) Child process killed by signal */ CRE_SPAWNSTOPED, /*!< (29) Child process stopped by signal */ CRE_SPAWNABNORMAL, /*!< (30) Child process exited abnormally */ CRE_DELTARPM, /*!< (31) Deltarpm related error */ CRE_BADXMLUPDATEINFO, /*!< (32) Bad updateinfo.xml file */ CRE_SIGPROCMASK, /*!< (33) Cannot change blocked signals */ CRE_ZCK, /*!< (34) ZCK library related error */ CRE_MODULEMD, /*!< (35) modulemd related error */ CRE_SENTINEL, /*!< (XX) Sentinel */ } cr_Error; /** Converts cr_Error return code to error string. * @param rc cr_Error return code * @return Error string */ const char *cr_strerror(cr_Error rc); /* Error domains */ #define CREATEREPO_C_ERROR createrepo_c_error_quark() GQuark createrepo_c_error_quark(void); #endif createrepo_c-0.17.0/src/helpers.c000066400000000000000000000356621400672373200166470ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "helpers.h" #include "error.h" #include "misc.h" #include "checksum.h" #include "modifyrepo_shared.h" #include "compression_wrapper.h" #include "threads.h" #include "xml_dump.h" #include "locate_metadata.h" #define ERR_DOMAIN CREATEREPO_C_ERROR typedef struct _old_file { time_t mtime; gchar *path; } OldFile; static void cr_free_old_file(gpointer data) { OldFile *old_file = (OldFile *) data; g_free(old_file->path); g_free(old_file); } static gint cr_cmp_old_repodata_files(gconstpointer a, gconstpointer b) { if (((OldFile *) a)->mtime < ((OldFile *) b)->mtime) return 1; if (((OldFile *) a)->mtime > ((OldFile *) b)->mtime) return -1; return 0; } static void cr_stat_and_insert(const gchar *dirname, const gchar *filename, GSList **list) { struct stat buf; OldFile *old_file; gchar *path = g_strconcat(dirname, filename, NULL); if (stat(path, &buf) == -1) buf.st_mtime = 1; old_file = g_malloc0(sizeof(OldFile)); old_file->mtime = buf.st_mtime; old_file->path = path; *list = g_slist_insert_sorted(*list, old_file, cr_cmp_old_repodata_files); } /* List files that should be removed from the repo or not copied * to the new repo. (except the repomd.xml) */ static gboolean cr_repodata_blacklist_classic(const char *repodata_path, int retain, GSList **blacklist, GError **err) { /* This piece of code implement the retain_old functionality in * the same way as original createrepo does. * The way is pretty stupid. Because: * - Old metadata are kept in the repodata/ but not referenced by * repomd.xml * - Thus, old repodata are searched by its filename * - It manipulate only with primary, filelists, other and * related databases. */ /* By default, createrepo_c keeps (copy from the old repo * to the new repo) all files that are in the repodata/ directory * but are not referenced by the repomd.xml. * * But this hack appends to the old_basenames list a metadata * that should be ignored (that should not be copied to the * new repository). */ GSList *pri_lst = NULL, *pri_db_lst = NULL; GSList *fil_lst = NULL, *fil_db_lst = NULL; GSList *oth_lst = NULL, *oth_db_lst = NULL; GSList **lists[] = { &pri_lst, &pri_db_lst, &fil_lst, &fil_db_lst, &oth_lst, &oth_db_lst }; const int num_of_lists = CR_ARRAYLEN(lists); GDir *dirp = NULL; const gchar *filename; GError *tmp_err = NULL; assert(blacklist); assert(!err || *err == NULL); *blacklist = NULL; if (retain == -1) { // -1 means retain all - nothing to be blacklisted return TRUE; } else if (retain < 0) { // other negative values are error g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Number of retained old metadatas " "must be integer number >= -1"); return FALSE; } // Open the repodata/ directory dirp = g_dir_open (repodata_path, 0, &tmp_err); if (!dirp) { g_warning("Cannot open directory: %s: %s", repodata_path, tmp_err->message); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open directory: %s: %s", repodata_path, tmp_err->message); g_error_free(tmp_err); return FALSE; } // Create sorted (by mtime) lists of old metadata files // More recent files are first while ((filename = g_dir_read_name (dirp))) { // Get filename without suffix gchar *name_without_suffix; gchar *lastdot = strrchr(filename, '.'); if (!lastdot) continue; // Filename doesn't contain '.' name_without_suffix = g_strndup(filename, (lastdot - filename)); // XXX: This detection is pretty shitty, but it mimics // behaviour of original createrepo if (g_str_has_suffix(name_without_suffix, "primary.xml")) { cr_stat_and_insert(repodata_path, filename, &pri_lst); } else if (g_str_has_suffix(name_without_suffix, "primary.sqlite")) { cr_stat_and_insert(repodata_path, filename, &pri_db_lst); } else if (g_str_has_suffix(name_without_suffix, "filelists.xml")) { cr_stat_and_insert(repodata_path, filename, &fil_lst); } else if (g_str_has_suffix(name_without_suffix, "filelists.sqlite")) { cr_stat_and_insert(repodata_path, filename, &fil_db_lst); } else if (g_str_has_suffix(name_without_suffix, "other.xml")) { cr_stat_and_insert(repodata_path, filename, &oth_lst); } else if (g_str_has_suffix(name_without_suffix, "other.sqlite")) { cr_stat_and_insert(repodata_path, filename, &oth_db_lst); } g_free(name_without_suffix); } g_dir_close(dirp); dirp = NULL; // Append files to the blacklist for (int x = 0; x < num_of_lists; x++) { for (GSList *el = g_slist_nth(*(lists[x]), retain); el; el = g_slist_next(el)) { OldFile *of = (OldFile *) el->data; *blacklist = g_slist_prepend(*blacklist, g_path_get_basename(of->path)); } // Free the list cr_slist_free_full(*(lists[x]), cr_free_old_file); } return TRUE; } /* List files that should be removed from the repo or not copied * to the new repo. (except the repomd.xml) * This function blacklist all metadata files listed in repomd.xml * if retain == 0, otherwise it don't blacklist any file */ static gboolean cr_repodata_blacklist(const char *repodata_path, int retain, GSList **blacklist, GError **err) { gchar *old_repomd_path = NULL; cr_Repomd *repomd = NULL; GError *tmp_err = NULL; assert(blacklist); assert(!err || *err == NULL); *blacklist = NULL; if (retain == -1 || retain > 0) { // retain all - nothing to be blacklisted return TRUE; } else if (retain < 0) { // other negative values are error g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Number of retained old metadatas " "must be integer number >= -1"); return FALSE; } // Parse old repomd.xml old_repomd_path = g_build_filename(repodata_path, "repomd.xml", NULL); repomd = cr_repomd_new(); cr_xml_parse_repomd(old_repomd_path, repomd, NULL, NULL, &tmp_err); if (tmp_err) { g_warning("Cannot parse repomd: %s", old_repomd_path); g_clear_error(&tmp_err); cr_repomd_free(repomd); repomd = cr_repomd_new(); } g_free(old_repomd_path); // Parse the old repomd.xml and append its items // to the old_basenames list for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; if (!rec->location_href) { // Ignore bad records (records without location_href) g_warning("Record without location href in old repo"); continue; } if (rec->location_base) { // Ignore files with base location g_debug("Old repomd record with base location is ignored: " "%s - %s", rec->location_base, rec->location_href); continue; } *blacklist = g_slist_prepend(*blacklist, g_path_get_basename(rec->location_href)); } cr_repomd_free(repomd); return TRUE; } static gboolean cr_repodata_blacklist_by_age(const char *repodata_path, gint64 md_max_age, GSList **blacklist, GError **err) { GDir *dirp = NULL; const gchar *filename; time_t current_time; GError *tmp_err = NULL; assert(blacklist); assert(!err || *err == NULL); *blacklist = NULL; if (md_max_age < 0) { // A negative value means retain all - nothing to be blacklisted return TRUE; } // Open the repodata/ directory dirp = g_dir_open (repodata_path, 0, &tmp_err); if (!dirp) { g_warning("Cannot open directory: %s: %s", repodata_path, tmp_err->message); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open directory: %s: %s", repodata_path, tmp_err->message); g_error_free(tmp_err); return FALSE; } current_time = time(NULL); // Create sorted (by mtime) lists of old metadata files // More recent files are first while ((filename = g_dir_read_name (dirp))) { struct stat buf; gchar *fullpath = g_strconcat(repodata_path, filename, NULL); if (stat(fullpath, &buf) == -1) { g_warning("Cannot stat %s", fullpath); g_free(fullpath); continue; } g_free(fullpath); // Check file age (current_time - mtime) gint64 age = difftime(current_time, buf.st_mtime); if (age <= md_max_age) continue; // The file is young // Debug g_debug("File is too old (%"G_GINT64_FORMAT" > %"G_GINT64_FORMAT") %s", age, md_max_age, filename); // Add the file to the blacklist *blacklist = g_slist_prepend(*blacklist, g_strdup(filename)); } g_dir_close(dirp); return TRUE; } int cr_remove_metadata_classic(const char *repopath, int retain, GError **err) { int rc = CRE_OK; gboolean ret = TRUE; gchar *full_repopath = NULL; GSList *blacklist = NULL; GDir *dirp = NULL; const gchar *filename; GError *tmp_err = NULL; assert(repopath); assert(!err || *err == NULL); full_repopath = g_strconcat(repopath, "/repodata/", NULL); // Get list of files that should be deleted ret = cr_repodata_blacklist_classic(full_repopath, retain, &blacklist, err); if (!ret) return FALSE; // Always remove repomd.xml blacklist = g_slist_prepend(blacklist, g_strdup("repomd.xml")); // Open the repodata/ directory dirp = g_dir_open(full_repopath, 0, &tmp_err); if (tmp_err) { g_debug("%s: Path %s doesn't exist", __func__, repopath); g_propagate_prefixed_error(err, tmp_err, "Cannot open a dir: "); rc = CRE_IO; goto cleanup; } // Iterate over the files in the repository and remove all files // that are listed on blacklist while ((filename = g_dir_read_name(dirp))) { gchar *full_path; if (!g_slist_find_custom(blacklist, filename, (GCompareFunc) g_strcmp0)) // The filename is not blacklisted, skip it continue; full_path = g_strconcat(full_repopath, filename, NULL); // REMOVE // TODO: Use more sophisticated function if (g_remove(full_path) != -1) g_debug("Removed %s", full_path); else g_warning("Cannot remove %s: %s", full_path, g_strerror(errno)); g_free(full_path); } cleanup: cr_slist_free_full(blacklist, g_free); g_free(full_repopath); if (dirp) g_dir_close(dirp); return rc; } gboolean cr_old_metadata_retention(const char *old_repo, const char *new_repo, cr_RetentionType type, gint64 val, GError **err) { gboolean ret = TRUE; GSList *blacklist = NULL; GDir *dirp = NULL; const gchar *filename; GError *tmp_err = NULL; assert(!err || *err == NULL); if (!g_file_test(old_repo, G_FILE_TEST_EXISTS)) return TRUE; g_debug("Copying files from old repository to the new one"); // Get list of file that should be skiped during copying g_debug("Retention type: %d (%"G_GINT64_FORMAT")", type, val); if (type == CR_RETENTION_BYAGE) ret = cr_repodata_blacklist_by_age(old_repo, val, &blacklist, err); else if (type == CR_RETENTION_COMPATIBILITY) ret = cr_repodata_blacklist_classic(old_repo, (int) val, &blacklist, err); else // CR_RETENTION_DEFAULT ret = cr_repodata_blacklist(old_repo, (int) val, &blacklist, err); if (!ret) return FALSE; // Never copy old repomd.xml to the new repository blacklist = g_slist_prepend(blacklist, g_strdup("repomd.xml")); // Open directory with old repo dirp = g_dir_open (old_repo, 0, &tmp_err); if (!dirp) { g_warning("Cannot open directory: %s: %s", old_repo, tmp_err->message); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open directory: %s: %s", old_repo, tmp_err->message); g_error_free(tmp_err); ret = FALSE; goto exit; } // Iterate over the files in the old repository and copy all // that are not listed on blacklist while ((filename = g_dir_read_name(dirp))) { if (g_slist_find_custom(blacklist, filename, (GCompareFunc) g_strcmp0)) { g_debug("Blacklisted: %s", filename); continue; } gchar *full_path = g_strconcat(old_repo, filename, NULL); gchar *new_full_path = g_strconcat(new_repo, filename, NULL); // Do not override new file with the old one if (g_file_test(new_full_path, G_FILE_TEST_EXISTS)) { g_debug("Skipped copy: %s -> %s (file already exists)", full_path, new_full_path); g_free(full_path); g_free(new_full_path); continue; } // COPY! cr_cp(full_path, new_full_path, CR_CP_RECURSIVE|CR_CP_PRESERVE_ALL, NULL, &tmp_err); if (tmp_err) { g_warning("Cannot copy %s -> %s: %s", full_path, new_full_path, tmp_err->message); g_clear_error(&tmp_err); } else { g_debug("Copied %s -> %s", full_path, new_full_path); } g_free(full_path); g_free(new_full_path); } exit: // Cleanup cr_slist_free_full(blacklist, g_free); if (dirp) g_dir_close(dirp); return ret; } createrepo_c-0.17.0/src/helpers.h000066400000000000000000000052771400672373200166530ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_HELPERS_H__ #define __C_CREATEREPOLIB_HELPERS_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" #include "compression_wrapper.h" #include "package.h" /** \defgroup helpers Helpers for createrepo_c, modifyrepo_c, mergerepo_c * * Module with helpers for createrepo_c, modifyrepo_c, mergerepo_c * * \addtogroup helpers * @{ */ typedef enum { CR_RETENTION_DEFAULT, CR_RETENTION_COMPATIBILITY, CR_RETENTION_BYAGE, } cr_RetentionType; gboolean cr_old_metadata_retention(const char *old_repo, const char *new_repo, cr_RetentionType type, gint64 val, GError **err); /** Remove files related to repodata from the specified path. * Files not listed in repomd.xml and with nonstandard names (standard names * are names with suffixes like primary.xml.*, primary.sqlite.*, other.xml.*, * etc.) are keep untouched (repodata/ subdirectory IS NOT removed!). * @param repopath path to directory with repodata/ subdirectory * @param err GError ** * @return number of removed files */ int cr_remove_metadata(const char *repopath, GError **err); /** Remove repodata in same manner as classic createrepo. * This function removes only (primary|filelists|other)[.sqlite].* files * from repodata. * @param repopath path to directory with repodata/subdirectory * @param retain keep around the latest N old, uniquely named primary, * filelists and otherdata xml and sqlite files. * If <1 no old files will be kept. * @param err GError ** * @return cr_Error code */ int cr_remove_metadata_classic(const char *repopath, int retain, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_HELPERS__ */ createrepo_c-0.17.0/src/koji.c000066400000000000000000000261311400672373200161300ustar00rootroot00000000000000/* * Copyright (C) 2018 Red Hat, Inc. * * Licensed under the GNU Lesser General Public License Version 2.1 * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "koji.h" #include "error.h" #include "load_metadata.h" #include "misc.h" void cr_srpm_val_destroy(gpointer data) { struct srpm_val *val = data; g_free(val->sourcerpm); g_free(val); } void koji_stuff_destroy(struct KojiMergedReposStuff **koji_stuff_ptr) { struct KojiMergedReposStuff *koji_stuff; if (!koji_stuff_ptr || !*koji_stuff_ptr) return; koji_stuff = *koji_stuff_ptr; g_clear_pointer (&koji_stuff->blocked_srpms, g_hash_table_destroy); g_clear_pointer (&koji_stuff->include_srpms, g_hash_table_destroy); g_clear_pointer (&koji_stuff->seen_rpms, g_hash_table_destroy); cr_close(koji_stuff->pkgorigins, NULL); g_free(koji_stuff); } static int pkgorigins_prepare_file (const gchar *tmpdir, CR_FILE **pkgorigins_file) { gchar *pkgorigins_path = NULL; GError *tmp_err = NULL; pkgorigins_path = g_strconcat(tmpdir, "pkgorigins.gz", NULL); *pkgorigins_file = cr_open(pkgorigins_path, CR_CW_MODE_WRITE, CR_CW_GZ_COMPRESSION, &tmp_err); if (tmp_err) { g_critical("Cannot open %s: %s", pkgorigins_path, tmp_err->message); g_error_free(tmp_err); g_free(pkgorigins_path); return 1; } g_free(pkgorigins_path); return 0; } /* Limited version of koji_stuff_prepare() that sets up only pkgorigins */ int pkgorigins_prepare(struct KojiMergedReposStuff **koji_stuff_ptr, const gchar *tmpdir) { int result; struct KojiMergedReposStuff *koji_stuff = g_malloc0(sizeof(struct KojiMergedReposStuff)); // Prepare pkgorigin file result = pkgorigins_prepare_file (tmpdir, &koji_stuff->pkgorigins); if (result != 0) { g_free (koji_stuff); return result; } *koji_stuff_ptr = koji_stuff; return 0; // All ok } int koji_stuff_prepare(struct KojiMergedReposStuff **koji_stuff_ptr, struct CmdOptions *cmd_options, GSList *repos) { struct KojiMergedReposStuff *koji_stuff; GSList *element; int repoid; int result; // Pointers to elements in the koji_stuff_ptr GHashTable *include_srpms = NULL; // XXX koji_stuff = g_malloc0(sizeof(struct KojiMergedReposStuff)); *koji_stuff_ptr = koji_stuff; // Prepare hashtables koji_stuff->include_srpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, cr_srpm_val_destroy); koji_stuff->seen_rpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); include_srpms = koji_stuff->include_srpms; // Load list of blocked srpm packages if (cmd_options->blocked) { int x = 0; char *content = NULL; char **names; GError *err = NULL; if (!g_file_get_contents(cmd_options->blocked, &content, NULL, &err)) { g_critical("Error while reading blocked file: %s", err->message); g_error_free(err); g_free(content); return 1; } koji_stuff->blocked_srpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); names = g_strsplit(content, "\n", 0); while (names && names[x] != NULL) { if (strlen(names[x])) g_hash_table_replace(koji_stuff->blocked_srpms, g_strdup(names[x]), NULL); x++; } g_strfreev(names); g_free(content); } koji_stuff->simple = cmd_options->koji_simple; // Prepare pkgorigin file result = pkgorigins_prepare_file (cmd_options->tmp_out_repo, &koji_stuff->pkgorigins); if (result != 0) { return result; } // Iterate over every repo and fill include_srpms hashtable g_debug("Preparing list of allowed srpm builds"); repoid = 0; for (element = repos; element; element = g_slist_next(element)) { struct cr_MetadataLocation *ml; cr_Metadata *metadata; GHashTableIter iter; gpointer key, void_pkg; ml = (struct cr_MetadataLocation *) element->data; if (!ml) { g_critical("Bad repo location"); repoid++; break; } metadata = cr_metadata_new(CR_HT_KEY_HASH, 0, NULL); g_debug("Loading srpms from: %s", ml->original_url); if (cr_metadata_load_xml(metadata, ml, NULL) != CRE_OK) { cr_metadata_free(metadata); g_critical("Cannot load repo: \"%s\"", ml->original_url); repoid++; break; } // Iterate over every package in repo and what "builds" // we're allowing into the repo g_hash_table_iter_init(&iter, cr_metadata_hashtable(metadata)); while (g_hash_table_iter_next(&iter, &key, &void_pkg)) { cr_Package *pkg = (cr_Package *) void_pkg; cr_NEVRA *nevra; gpointer data; struct srpm_val *srpm_value_new; if (!pkg->rpm_sourcerpm) { g_warning("Package '%s' from '%s' doesn't have specified source srpm", pkg->location_href, ml->original_url); continue; } nevra = cr_split_rpm_filename(pkg->rpm_sourcerpm); if (!nevra) { g_debug("Srpm name is invalid: %s", pkg->rpm_sourcerpm); continue; } data = g_hash_table_lookup(include_srpms, nevra->name); if (data) { // We have already seen build with the same name int cmp; cr_NEVRA *nevra_existing; struct srpm_val *srpm_value_existing = data; if (srpm_value_existing->repo_id != repoid) { // We found a rpm built from an srpm with the same name in // a previous repo. The previous repo takes precendence, // so ignore the srpm found here. cr_nevra_free(nevra); g_debug("Srpm already loaded from previous repo %s", pkg->rpm_sourcerpm); continue; } // We're in the same repo, so compare srpm NVRs nevra_existing = cr_split_rpm_filename(srpm_value_existing->sourcerpm); cmp = cr_cmp_nevra(nevra, nevra_existing); cr_nevra_free(nevra_existing); if (cmp < 1) { // Existing package is from the newer srpm cr_nevra_free(nevra); g_debug("Srpm already exists in newer version %s", pkg->rpm_sourcerpm); continue; } } // The current package we're processing is from a newer srpm // than the existing srpm in the dict, so update the dict // OR // We found a new build so we add it to the dict g_debug("Adding srpm: %s", pkg->rpm_sourcerpm); srpm_value_new = g_malloc0(sizeof(struct srpm_val)); srpm_value_new->repo_id = repoid; srpm_value_new->sourcerpm = g_strdup(pkg->rpm_sourcerpm); g_hash_table_replace(include_srpms, g_strdup(nevra->name), srpm_value_new); cr_nevra_free(nevra); } cr_metadata_free(metadata); repoid++; } return 0; // All ok } gboolean koji_allowed(cr_Package *pkg, struct KojiMergedReposStuff *koji_stuff) { gchar *nvra; gboolean seen; if (pkg->rpm_sourcerpm) { // Sometimes, there are metadata that don't contain sourcerpm // items for their packages. // I don't know if better is to include or exclude such packages. // Original mergerepos script doesn't expect such situation. // So for now, include them. But it can be changed anytime // in future. cr_NEVRA *nevra = cr_split_rpm_filename(pkg->rpm_sourcerpm); if (!nevra) { g_debug("Package %s has invalid srpm %s", pkg->name, pkg->rpm_sourcerpm); return 0; } if (koji_stuff->blocked_srpms) { gboolean is_blocked; is_blocked = g_hash_table_lookup_extended(koji_stuff->blocked_srpms, nevra->name, NULL, NULL); if (is_blocked) { // Srpm of the package is not allowed g_debug("Package %s has blocked srpm %s", pkg->name, pkg->rpm_sourcerpm); cr_nevra_free(nevra); return 0; } } if (!koji_stuff->simple && koji_stuff->include_srpms) { struct srpm_val *value; value = g_hash_table_lookup(koji_stuff->include_srpms, nevra->name); if (!value || g_strcmp0(pkg->rpm_sourcerpm, value->sourcerpm)) { // Srpm of the package is not allowed g_debug("Package %s has forbidden srpm %s", pkg->name, pkg->rpm_sourcerpm); cr_nevra_free(nevra); return 0; } } cr_nevra_free(nevra); } if (!koji_stuff->simple && koji_stuff->seen_rpms) { // Check if we have already seen this package before nvra = cr_package_nvra(pkg); seen = g_hash_table_lookup_extended(koji_stuff->seen_rpms, nvra, NULL, NULL); if (seen) { // Similar package has been already added g_debug("Package with same nvra (%s) has been already added", nvra); g_free(nvra); return 0; } // Make a note that we have seen this package g_hash_table_replace(koji_stuff->seen_rpms, nvra, NULL); } return 1; } createrepo_c-0.17.0/src/koji.h000066400000000000000000000071371400672373200161420ustar00rootroot00000000000000/* * Copyright (C) 2018 Red Hat, Inc. * * Licensed under the GNU Lesser General Public License Version 2.1 * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __C_CREATEREPOLIB_KOJI_H__ #define __C_CREATEREPOLIB_KOJI_H__ #ifdef __cplusplus extern "C" { #endif #include "package.h" #include "mergerepo_c.h" // struct KojiMergedReposStuff // contains information needed to simulate sort_and_filter() method from // mergerepos script from Koji. // // sort_and_filter() method description: // ------------------------------------ // For each package object, check if the srpm name has ever been seen before. // If is has not, keep the package. If it has, check if the srpm name was first // seen in the same repo as the current package. If so, keep the package from // the srpm with the highest NVR. If not, keep the packages from the first // srpm we found, and delete packages from all other srpms. // // Packages with matching NVRs in multiple repos will be taken from the first // repo. // // If the srpm name appears in the blocked package list, any packages generated // from the srpm will be deleted from the package sack as well. // // This method will also generate a file called "pkgorigins" and add it to the // repo metadata. This is a tab-separated map of package E:N-V-R.A to repo URL // (as specified on the command-line). This allows a package to be tracked back // to its origin, even if the location field in the repodata does not match the // original repo location. struct srpm_val { int repo_id; // id of repository char *sourcerpm; // pkg->rpm_sourcerpm }; struct KojiMergedReposStuff { GHashTable *blocked_srpms; // blocked_srpms: // Names of sprms which will be skipped // Key: srpm name // Value: NULL (not important) GHashTable *include_srpms; // include_srpms: // Only packages from srpms included in this table will be included // in output merged metadata. // Key: srpm name // Value: struct srpm_val GHashTable *seen_rpms; // seen_rpms: // List of packages already included into the output metadata. // Purpose of this list is to avoid a duplicit packages in output. // Key: string with package n-v-r.a // Value: NULL (not important) CR_FILE *pkgorigins; // Every element has format: pkg_nvra\trepourl gboolean simple; }; /* Limited version of koji_stuff_prepare() that sets up only pkgorigins */ int pkgorigins_prepare(struct KojiMergedReposStuff **koji_stuff_ptr, const gchar *tmpdir); int koji_stuff_prepare(struct KojiMergedReposStuff **koji_stuff_ptr, struct CmdOptions *cmd_options, GSList *repos); void koji_stuff_destroy(struct KojiMergedReposStuff **koji_stuff_ptr); void cr_srpm_val_destroy(gpointer data); gboolean koji_allowed(cr_Package *pkg, struct KojiMergedReposStuff *koji_stuff); #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_KOJI_H__ */ createrepo_c-0.17.0/src/load_metadata.c000066400000000000000000000525271400672373200177630ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #ifdef WITH_LIBMODULEMD #include #endif /* WITH_LIBMODULEMD */ #include "error.h" #include "package.h" #include "misc.h" #include "load_metadata.h" #include "locate_metadata.h" #include "xml_parser.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define STRINGCHUNK_SIZE 16384 /** Structure for loaded metadata */ struct _cr_Metadata { cr_HashTableKey key; /*!< key used in hashtable */ GHashTable *ht; /*!< hashtable with packages */ GStringChunk *chunk; /*!< NULL or string chunk with strings from htn */ GHashTable *pkglist_ht; /*!< list of allowed package basenames to load */ cr_HashTableKeyDupAction dupaction; /*!< How to behave in case of duplicated items */ #ifdef WITH_LIBMODULEMD ModulemdModuleIndex *moduleindex; /*!< Module metadata */ #endif /* WITH_LIBMODULEMD */ }; cr_HashTableKey cr_metadata_key(cr_Metadata *md) { assert(md); return md->key; } GHashTable * cr_metadata_hashtable(cr_Metadata *md) { assert(md); return md->ht; } #ifdef WITH_LIBMODULEMD ModulemdModuleIndex * cr_metadata_modulemd(cr_Metadata *md) { assert(md); return md->moduleindex; } #endif /* WITH_LIBMODULEMD */ void cr_free_values(gpointer data) { cr_package_free((cr_Package *) data); } GHashTable * cr_new_metadata_hashtable() { GHashTable *hashtable = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, cr_free_values); return hashtable; } void cr_destroy_metadata_hashtable(GHashTable *hashtable) { if (hashtable) g_hash_table_destroy (hashtable); } cr_Metadata * cr_metadata_new(cr_HashTableKey key, int use_single_chunk, GSList *pkglist) { cr_Metadata *md; assert(key < CR_HT_KEY_SENTINEL); md = g_malloc0(sizeof(*md)); md->key = key; md->ht = cr_new_metadata_hashtable(); if (use_single_chunk) md->chunk = g_string_chunk_new(STRINGCHUNK_SIZE); if (pkglist) { // Create hashtable from pkglist // This hashtable is used for checking if the metadata of the package // should be included. // Purpose is to save memory - We load only metadata about // packages which we will probably use. // This hashtable is modified "on the fly" - If we found and load // a metadata about the package, we remove its record from the hashtable. // So if we met the metadata for this package again we will ignore it. md->pkglist_ht = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); for (GSList *elem = pkglist; elem; elem = g_slist_next(elem)) g_hash_table_insert(md->pkglist_ht, g_strdup(elem->data), NULL); } md->dupaction = CR_HT_DUPACT_KEEPFIRST; return md; } void cr_metadata_free(cr_Metadata *md) { if (!md) return; #ifdef WITH_LIBMODULEMD g_clear_pointer (&(md->moduleindex), g_object_unref); #endif /* WITH_LIBMODULEMD */ cr_destroy_metadata_hashtable(md->ht); if (md->chunk) g_string_chunk_free(md->chunk); if (md->pkglist_ht) g_hash_table_destroy(md->pkglist_ht); g_free(md); } gboolean cr_metadata_set_dupaction(cr_Metadata *md, cr_HashTableKeyDupAction dupaction) { if (!md || dupaction >= CR_HT_DUPACT_SENTINEL) return FALSE; md->dupaction = dupaction; return TRUE; } // Callbacks for XML parsers typedef enum { PARSING_PRI, PARSING_FIL, PARSING_OTH, } cr_ParsingState; typedef struct { GHashTable *ht; GStringChunk *chunk; GHashTable *pkglist_ht; GHashTable *ignored_pkgIds; /*!< If there are multiple packages wich have the same checksum (pkgId) but they are in fact different (they have different basenames, mtimes or sizes), then we want to ignore these packages during loading. It's because the pkgId is used to pair metadata from primary.xml with metadata from filelists.xml and other.xml and we want the pkgId to be unique. Key is pkgId and value is NULL. */ cr_ParsingState state; gint64 pkgKey; /*!< basically order of the package */ } cr_CbData; static int primary_newpkgcb(cr_Package **pkg, G_GNUC_UNUSED const char *pkgId, G_GNUC_UNUSED const char *name, G_GNUC_UNUSED const char *arch, void *cbdata, G_GNUC_UNUSED GError **err) { cr_CbData *cb_data = cbdata; assert(*pkg == NULL); if (cb_data->chunk) { *pkg = cr_package_new_without_chunk(); (*pkg)->chunk = cb_data->chunk; (*pkg)->loadingflags |= CR_PACKAGE_SINGLE_CHUNK; } else { *pkg = cr_package_new(); } return CR_CB_RET_OK; } static int primary_pkgcb(cr_Package *pkg, void *cbdata, G_GNUC_UNUSED GError **err) { gboolean store_pkg = TRUE; cr_CbData *cb_data = cbdata; cr_Package *epkg; char *basename = cr_get_filename(pkg->location_href); assert(pkg); assert(pkg->pkgId); if (cb_data->chunk) { // Set pkg internal chunk to NULL, // if global chunk for all packages is used assert(pkg->chunk == cb_data->chunk); pkg->chunk = NULL; } if (cb_data->pkglist_ht && basename) { // If a pkglist was specified, // check if the package should be loaded or not store_pkg = g_hash_table_lookup_extended(cb_data->pkglist_ht, basename, NULL, NULL); } if (store_pkg) { // Check if pkgId is not on the list of blocked Ids if (g_hash_table_lookup_extended(cb_data->ignored_pkgIds, pkg->pkgId, NULL, NULL)) // We should ignore this pkgId (package's hash) store_pkg = FALSE; } if (!store_pkg) { // Drop the currently loaded package cr_package_free(pkg); return CR_CB_RET_OK; } epkg = g_hash_table_lookup(cb_data->ht, pkg->pkgId); if (!epkg) { // Store package into the hashtable pkg->loadingflags |= CR_PACKAGE_FROM_XML; pkg->loadingflags |= CR_PACKAGE_LOADED_PRI; g_hash_table_replace(cb_data->ht, pkg->pkgId, pkg); } else { // Package with the same pkgId (hash) already exists if (epkg->time_file == pkg->time_file && epkg->size_package == pkg->size_package && !g_strcmp0(cr_get_filename(pkg->location_href), basename)) { // The existing package is the same as the current one. // This is ok g_debug("Multiple packages with the same checksum: %s. " "Loading the info only once.", pkg->pkgId); } else { // The existing package is different. We have two different // packages with the same checksum -> drop both of them // and append this checksum to the ignored_pkgIds // XXX: Note that this constrain works only for single repo! // If the same cr_Metadata are loaded from several different // repos, than inter-repo packages with matching checksum // are not checked. g_debug("Multiple different packages (basename, mtime or size " "doesn't match) with the same checksum: %s. " "Ignoring all packages with the checksum.", pkg->pkgId); g_hash_table_remove(cb_data->ht, pkg->pkgId); g_hash_table_replace(cb_data->ignored_pkgIds, g_strdup(pkg->pkgId), NULL); } // Drop the currently loaded package cr_package_free(pkg); return CR_CB_RET_OK; } ++cb_data->pkgKey; pkg->pkgKey = cb_data->pkgKey; return CR_CB_RET_OK; } static int newpkgcb(cr_Package **pkg, const char *pkgId, G_GNUC_UNUSED const char *name, G_GNUC_UNUSED const char *arch, void *cbdata, G_GNUC_UNUSED GError **err) { cr_CbData *cb_data = cbdata; assert(*pkg == NULL); assert(pkgId); *pkg = g_hash_table_lookup(cb_data->ht, pkgId); if (*pkg) { // If package with the pkgId was parsed from primary.xml, then... if (cb_data->state == PARSING_FIL) { if ((*pkg)->loadingflags & CR_PACKAGE_LOADED_FIL) { // For package with this checksum, the filelist was // already loaded. *pkg = NULL; } else { // Make a note that filelist is parsed (*pkg)->loadingflags |= CR_PACKAGE_LOADED_FIL; } } if (cb_data->state == PARSING_OTH) { if ((*pkg)->loadingflags & CR_PACKAGE_LOADED_OTH) { // For package with this checksum, the other (changelogs) were // already loaded. *pkg = NULL; } else { // Make a note that other is parsed (*pkg)->loadingflags |= CR_PACKAGE_LOADED_OTH; } } if (*pkg && cb_data->chunk) { assert(!(*pkg)->chunk); (*pkg)->chunk = cb_data->chunk; } } return CR_CB_RET_OK; } static int pkgcb(cr_Package *pkg, void *cbdata, G_GNUC_UNUSED GError **err) { cr_CbData *cb_data = cbdata; if (cb_data->chunk) { assert(pkg->chunk == cb_data->chunk); pkg->chunk = NULL; } return CR_CB_RET_OK; } static int cr_load_xml_files(GHashTable *hashtable, const char *primary_xml_path, const char *filelists_xml_path, const char *other_xml_path, GStringChunk *chunk, GHashTable *pkglist_ht, GError **err) { cr_CbData cb_data; GError *tmp_err = NULL; assert(hashtable); // Prepare cb data cb_data.state = PARSING_PRI; cb_data.ht = hashtable; cb_data.chunk = chunk; cb_data.pkglist_ht = pkglist_ht; cb_data.ignored_pkgIds = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); cb_data.pkgKey = G_GINT64_CONSTANT(0); cr_xml_parse_primary(primary_xml_path, primary_newpkgcb, &cb_data, primary_pkgcb, &cb_data, cr_warning_cb, "Primary XML parser", (filelists_xml_path) ? 0 : 1, &tmp_err); g_hash_table_destroy(cb_data.ignored_pkgIds); cb_data.ignored_pkgIds = NULL; if (tmp_err) { int code = tmp_err->code; g_debug("primary.xml parsing error: %s", tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "primary.xml parsing: "); return code; } cb_data.state = PARSING_FIL; if (filelists_xml_path) { cr_xml_parse_filelists(filelists_xml_path, newpkgcb, &cb_data, pkgcb, &cb_data, cr_warning_cb, "Filelists XML parser", &tmp_err); if (tmp_err) { int code = tmp_err->code; g_debug("filelists.xml parsing error: %s", tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "filelists.xml parsing: "); return code; } } cb_data.state = PARSING_OTH; if (other_xml_path) { cr_xml_parse_other(other_xml_path, newpkgcb, &cb_data, pkgcb, &cb_data, cr_warning_cb, "Other XML parser", &tmp_err); if (tmp_err) { int code = tmp_err->code; g_debug("other.xml parsing error: %s", tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "other.xml parsing: "); return code; } } return CRE_OK; } static gint module_read_fn (void *data, unsigned char *buffer, size_t size, size_t *size_read) { int ret; GError *tmp_err = NULL; CR_FILE *cr_file = (CR_FILE *)data; ret = cr_read (cr_file, buffer, size, &tmp_err); if (ret == CR_CW_ERR) { g_clear_pointer (&tmp_err, g_error_free); return 0; } *size_read = ret; return 1; } #ifdef WITH_LIBMODULEMD int cr_metadata_load_modulemd(cr_Metadata *md, struct cr_MetadataLocation *ml, GError **err) { int ret; gboolean result; GError *tmp_err = NULL; CR_FILE *modulemd = NULL; g_autoptr (GPtrArray) failures = NULL; md->moduleindex = modulemd_module_index_new(); if (!md->moduleindex) { g_set_error (err, ERR_DOMAIN, CRE_MEMORY, "Could not allocate module index"); return CRE_MEMORY; } cr_Metadatum *modulemd_metadatum = g_slist_find_custom(ml->additional_metadata, "modules", cr_cmp_metadatum_type)->data; /* Open the metadata location */ modulemd = cr_open(modulemd_metadatum->name, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", modulemd_metadatum->name); return code; } result = modulemd_module_index_update_from_custom (md->moduleindex, module_read_fn, modulemd, TRUE, &failures, &tmp_err); if (!result) { if (!tmp_err){ g_set_error(err, CRE_MODULEMD, CREATEREPO_C_ERROR, "Unknown error in libmodulemd with %s", modulemd_metadatum->name); }else{ g_propagate_error (err, tmp_err); } return CRE_MODULEMD; } ret = CRE_OK; cr_close(modulemd, &tmp_err); if (tmp_err) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while closing: "); } return ret; } #endif /* WITH_LIBMODULEMD */ int cr_metadata_load_xml(cr_Metadata *md, struct cr_MetadataLocation *ml, GError **err) { int result; GError *tmp_err = NULL; GHashTable *intern_hashtable; // key is checksum (pkgId) cr_HashTableKeyDupAction dupaction = md->dupaction; assert(md); assert(ml); assert(!err || *err == NULL); if (!ml->pri_xml_href) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "primary.xml file is missing"); return CRE_BADARG; } // Load metadata intern_hashtable = cr_new_metadata_hashtable(); result = cr_load_xml_files(intern_hashtable, ml->pri_xml_href, ml->fil_xml_href, ml->oth_xml_href, md->chunk, md->pkglist_ht, &tmp_err); if (result != CRE_OK) { g_critical("%s: Error encountered while parsing", __func__); g_propagate_prefixed_error(err, tmp_err, "Error encountered while parsing:"); cr_destroy_metadata_hashtable(intern_hashtable); return result; } g_debug("%s: Parsed items: %d", __func__, g_hash_table_size(intern_hashtable)); // Fill user hashtable and use user selected key GHashTableIter iter; gpointer p_key, p_value; GHashTable *ignored_keys = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); g_hash_table_iter_init (&iter, intern_hashtable); while (g_hash_table_iter_next (&iter, &p_key, &p_value)) { cr_Package *pkg = (cr_Package *) p_value; cr_Package *epkg; gpointer new_key; switch (md->key) { case CR_HT_KEY_FILENAME: new_key = cr_get_filename(pkg->location_href); break; case CR_HT_KEY_HREF: new_key = cr_get_cleaned_href(pkg->location_href); break; case CR_HT_KEY_HASH: new_key = pkg->pkgId; break; case CR_HT_KEY_NAME: new_key = pkg->name; break; default: // Well, this SHOULD never happend! // (md->key SHOULD be setted only by cr_metadata_new() // and it SHOULD set only valid key values) g_critical("%s: Unknown hash table key selected", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return CRE_ASSERT; } epkg = g_hash_table_lookup(md->ht, new_key); if (epkg) { // Such key already exists if (dupaction == CR_HT_DUPACT_KEEPFIRST) { g_debug("%s: Key \"%s\" already exists in hashtable - Keeping the first occurrence", __func__, (char *) new_key); } else { if (pkg->time_file != epkg->time_file || pkg->size_package != epkg->size_package || g_strcmp0(pkg->pkgId, epkg->pkgId) || g_strcmp0(cr_get_filename(pkg->location_href), cr_get_filename(epkg->location_href)) ) { // We got a key (checksum, filename, pkg name, ..) // which has a multiple occurences which are different. // Ignore such key g_debug("%s: Key \"%s\" is present multiple times and with " "different values. Ignoring all occurrences. " "[size_package: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT "; time_file: %"G_GINT64_FORMAT"|%"G_GINT64_FORMAT "; pkgId: %s|%s; basename: %s|%s]", __func__, (gchar *) new_key, pkg->size_package, epkg->size_package, pkg->time_file, epkg->time_file, pkg->pkgId, epkg->pkgId, cr_get_filename(pkg->location_href), cr_get_filename(epkg->location_href)); g_hash_table_insert(ignored_keys, g_strdup((gchar *) new_key), NULL); } } // Remove the package from the iterator anyway g_hash_table_iter_remove(&iter); } else { g_hash_table_insert(md->ht, new_key, p_value); g_hash_table_iter_steal(&iter); } } // Remove ignored_keys from resulting hashtable g_hash_table_iter_init(&iter, ignored_keys); while (g_hash_table_iter_next(&iter, &p_key, &p_value)) { char *key = (gchar *) p_key; g_hash_table_remove(md->ht, key); } // How much items we really use g_debug("%s: Really usable items: %d", __func__, g_hash_table_size(md->ht)); // Cleanup g_hash_table_destroy(ignored_keys); cr_destroy_metadata_hashtable(intern_hashtable); result = CRE_OK; #ifdef WITH_LIBMODULEMD if (g_slist_find_custom(ml->additional_metadata, "modules", cr_cmp_metadatum_type)){ result = cr_metadata_load_modulemd(md, ml, err); } #endif /* WITH_LIBMODULEMD */ return result; } int cr_metadata_locate_and_load_xml(cr_Metadata *md, const char *repopath, GError **err) { int ret; struct cr_MetadataLocation *ml; GError *tmp_err = NULL; assert(md); assert(repopath); ml = cr_locate_metadata(repopath, TRUE, &tmp_err); if (tmp_err) { g_clear_pointer(&ml, cr_metadatalocation_free); int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } ret = cr_metadata_load_xml(md, ml, err); cr_metadatalocation_free(ml); return ret; } createrepo_c-0.17.0/src/load_metadata.h000066400000000000000000000126241400672373200177620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_LOAD_METADATA_H__ #define __C_CREATEREPOLIB_LOAD_METADATA_H__ #include #include "locate_metadata.h" #ifdef __cplusplus extern "C" { #endif /** \defgroup load_metadata Load metadata API. * * Module for loading yum xml metadata. * * Example: * * \code * int ret; * cr_Metadata *oldmetadata; * GHashTable hashtable; * * // Create new metadata object * oldmetadata = cr_metadata_new(CR_HT_KEY_FILENAME, 1, NULL); * // Load metadata (path to directory which contains repodata/ subdir) * ret = cr_metadata_locate_and_load_xml(oldmetadata, "/foo/bar/repo/") * // Check return code * if (ret != CR_LOAD_METADATA_OK) exit(1); * // Get hash table with all loaded packages (key is package relative path) * hashtable = cr_metadata_hashtable(oldmetadata); * // What to do with hashtable? * // See: http://developer.gnome.org/glib/2.30/glib-Hash-Tables.html * \endcode * * \addtogroup load_metadata * @{ */ /** Package attribute used as key in the hashtable. */ typedef enum { CR_HT_KEY_DEFAULT, /*!< default = pkg hash */ CR_HT_KEY_HASH = CR_HT_KEY_DEFAULT, /*!< pkg hash (cr_Package ->pkgId) */ CR_HT_KEY_NAME, /*!< pkg name (cr_Package ->name) */ CR_HT_KEY_FILENAME, /*!< pkg filename (cr_Package ->location_href) */ CR_HT_KEY_HREF, /*!< pkg location */ CR_HT_KEY_SENTINEL, /*!< last element, terminator, .. */ } cr_HashTableKey; /** Internally, and by default, the loaded metadata are * indexed by pkgId (pkg's hash). But when you select different * attribute for indexing (see cr_HashTableKey). * The situation that multiple packages has the same (key) attribute. * This enum lists provided methos for resolution of such conflicts. */ typedef enum { CR_HT_DUPACT_KEEPFIRST = 0, /*!< First encontered item wins, other will be removed - Default */ CR_HT_DUPACT_REMOVEALL, /*!< Remove all conflicting items. */ CR_HT_DUPACT_SENTINEL, /*!< Last element, terminator, ... */ } cr_HashTableKeyDupAction; /** Metadata object */ typedef struct _cr_Metadata cr_Metadata; /** Return cr_HashTableKey from a cr_Metadata * @param md cr_Metadata object. * @return Key type */ cr_HashTableKey cr_metadata_key(cr_Metadata *md); /** Return hashtable from a cr_Metadata * @param md cr_Metadata object. * @return Pointer to internal GHashTable. */ GHashTable *cr_metadata_hashtable(cr_Metadata *md); /** Create new (empty) metadata hashtable. * It is NOT thread safe to load data into single cr_Metadata * from multiple threads. But non modifying access to the loaded data * in cr_Metadata is thread safe. * @param key key specifies which value will be (is) used as key * in hash table * @param use_single_chunk use only one string chunk (all loaded packages * share one string chunk in the cr_Metadata object) * Packages will not be standalone objects. * This option leads to less memory consumption. * @param pkglist load only packages which base filename is in this * list. If param is NULL all packages are loaded. * @return empty cr_Metadata object */ cr_Metadata *cr_metadata_new(cr_HashTableKey key, int use_single_chunk, GSList *pkglist); /** Set action how to deal with duplicated items. */ gboolean cr_metadata_set_dupaction(cr_Metadata *md, cr_HashTableKeyDupAction dupaction); /** Destroy metadata. * @param md cr_Metadata object */ void cr_metadata_free(cr_Metadata *md); /** Load metadata from the specified location. * @param md metadata object * @param ml metadata location * @param err GError ** * @return cr_Error code */ int cr_metadata_load_xml(cr_Metadata *md, struct cr_MetadataLocation *ml, GError **err); /** Locate and load metadata from the specified path. * @param md metadata object * @param repopath path to repo (to directory with repodata/ subdir) * @param err GError ** * @return cr_Error code */ int cr_metadata_locate_and_load_xml(cr_Metadata *md, const char *repopath, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_LOAD_METADATA_H__ */ createrepo_c-0.17.0/src/locate_metadata.c000066400000000000000000000302371400672373200203050ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "misc.h" #include "locate_metadata.h" #include "repomd.h" #include "xml_parser.h" #include "cleanup.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define TMPDIR_PATTERN "createrepo_c_tmp_repo_XXXXXX" #define FORMAT_XML 1 #define FORMAT_LEVEL 0 void cr_metadatum_free(cr_Metadatum *m) { if (!m) return; g_free(m->name); g_free(m->type); g_free(m); } void cr_metadatalocation_free(struct cr_MetadataLocation *ml) { if (!ml) return; if (ml->tmp && ml->local_path) { g_debug("%s: Removing %s", __func__, ml->local_path); cr_remove_dir(ml->local_path, NULL); } g_free(ml->pri_xml_href); g_free(ml->fil_xml_href); g_free(ml->oth_xml_href); g_free(ml->pri_sqlite_href); g_free(ml->fil_sqlite_href); g_free(ml->oth_sqlite_href); g_free(ml->repomd); g_free(ml->original_url); g_free(ml->local_path); g_slist_free_full(ml->additional_metadata, (GDestroyNotify) cr_metadatum_free); g_free(ml); } gint cr_cmp_metadatum_type(gconstpointer metadatum, gconstpointer type){ return g_strcmp0(((cr_Metadatum *) metadatum)->type, type); } gint cr_cmp_repomd_record_type(gconstpointer repomd_record, gconstpointer type){ return g_strcmp0(((cr_RepomdRecord *) repomd_record)->type, type); } gchar* cr_copy_metadatum(const gchar *src, const gchar *tmp_out_repo, GError **err) { g_message("Using %s from target repo", cr_get_filename(src)); gchar *metadatum = g_strconcat(tmp_out_repo, cr_get_filename(src), NULL); g_debug("Copy metadatum %s -> %s", src, metadatum); if (!cr_better_copy_file(src, metadatum, err)) { g_critical("Error while copy %s -> %s: %s", src, metadatum, (*err)->message); g_clear_error(err); return 0; } return metadatum; } GSList* cr_insert_additional_metadatum(const gchar *path, const gchar *type, GSList *additional_metadata) { GSList *elem = g_slist_find_custom(additional_metadata, type, cr_cmp_metadatum_type); if (elem){ g_free(((cr_Metadatum *) elem->data)->name); ((cr_Metadatum *) elem->data)->name = g_strdup(path); return additional_metadata; } cr_Metadatum *m = g_malloc0(sizeof(cr_Metadatum)); m->name = g_strdup(path); m->type = g_strdup(type); additional_metadata = g_slist_prepend(additional_metadata, m); g_message("type %s added to list from path: %s ", type, path); return additional_metadata; } struct cr_MetadataLocation * cr_parse_repomd(const char *repomd_path, const char *repopath, int ignore_sqlite) { assert(repomd_path); GError *tmp_err = NULL; cr_Repomd *repomd = cr_repomd_new(); cr_xml_parse_repomd(repomd_path, repomd, cr_warning_cb, "Repomd xml parser", &tmp_err); if (tmp_err) { g_critical("%s: %s", __func__, tmp_err->message); g_error_free(tmp_err); cr_repomd_free(repomd); return NULL; } struct cr_MetadataLocation *mdloc; mdloc = g_malloc0(sizeof(struct cr_MetadataLocation)); mdloc->repomd = g_strdup(repomd_path); mdloc->local_path = g_strdup(repopath); for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *record = elem->data; gchar *full_location_href = g_build_filename( repopath, (char *) record->location_href, NULL); if (!g_strcmp0(record->type, "primary")) mdloc->pri_xml_href = full_location_href; else if (!g_strcmp0(record->type, "primary_db") && !ignore_sqlite) mdloc->pri_sqlite_href = full_location_href; else if (!g_strcmp0(record->type, "filelists")) mdloc->fil_xml_href = full_location_href; else if (!g_strcmp0(record->type, "filelists_db") && !ignore_sqlite) mdloc->fil_sqlite_href = full_location_href; else if (!g_strcmp0(record->type, "other")) mdloc->oth_xml_href = full_location_href; else if (!g_strcmp0(record->type, "other_db") && !ignore_sqlite) mdloc->oth_sqlite_href = full_location_href; else if ( !g_str_has_prefix(record->type, "primary_" ) && !g_str_has_prefix(record->type, "filelists_" ) && !g_str_has_prefix(record->type, "other_" ) ) { mdloc->additional_metadata = cr_insert_additional_metadatum(full_location_href, record->type, mdloc->additional_metadata); g_free(full_location_href); } else g_free(full_location_href); } cr_repomd_free(repomd); return mdloc; } static struct cr_MetadataLocation * cr_get_local_metadata(const char *repopath, gboolean ignore_sqlite) { _cleanup_free_ gchar *repomd = NULL; struct cr_MetadataLocation *ret = NULL; if (!repopath) return ret; if (!g_file_test(repopath, G_FILE_TEST_EXISTS|G_FILE_TEST_IS_DIR)) { g_warning("%s: %s is not a directory", __func__, repopath); return ret; } // Create path to repomd.xml and check if it exists repomd = g_build_filename(repopath, "repodata", "repomd.xml", NULL); if (!g_file_test(repomd, G_FILE_TEST_EXISTS)) { g_debug("%s: %s doesn't exists", __func__, repomd); return ret; } ret = cr_parse_repomd(repomd, repopath, ignore_sqlite); return ret; } static struct cr_MetadataLocation * cr_get_remote_metadata(const char *repopath, gboolean ignore_sqlite) { CURL *handle = NULL; _cleanup_free_ gchar *tmp_dir = NULL; _cleanup_free_ gchar *tmp_repodata = NULL; _cleanup_free_ gchar *tmp_repomd = NULL; _cleanup_free_ gchar *url = NULL; struct cr_MetadataLocation *r_location = NULL; struct cr_MetadataLocation *ret = NULL; _cleanup_error_free_ GError *tmp_err = NULL; if (!repopath) return ret; // Create temporary repo in /tmp tmp_dir = g_build_filename(g_get_tmp_dir(), TMPDIR_PATTERN, NULL); if (!mkdtemp(tmp_dir)) { g_critical("%s: Cannot create a temporary directory: %s", __func__, g_strerror(errno)); return ret; } g_debug("%s: Using tmp dir: %s", __func__, tmp_dir); // Create repodata subdir in tmp dir tmp_repodata = g_build_filename(tmp_dir, "repodata", NULL); if (g_mkdir (tmp_repodata, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { g_critical("%s: Cannot create a temporary directory", __func__); return ret; } // Prepare temporary repomd.xml filename tmp_repomd = g_build_filename(tmp_repodata, "repomd.xml", NULL); // Prepare repomd.xml URL if (g_str_has_suffix(repopath, "/")) url = g_strconcat(repopath, "repodata/repomd.xml", NULL); else url = g_strconcat(repopath, "/repodata/repomd.xml", NULL); // Create and setup CURL handle handle = curl_easy_init(); // Fail on HTTP error (return code >= 400) if (curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1) != CURLE_OK) { g_critical("%s: curl_easy_setopt(CURLOPT_FAILONERROR) error", __func__); goto get_remote_metadata_cleanup; } // Follow redirs if (curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK) { g_critical("%s: curl_easy_setopt(CURLOPT_FOLLOWLOCATION) error", __func__); goto get_remote_metadata_cleanup; } // Maximal number of redirects if (curl_easy_setopt(handle, CURLOPT_MAXREDIRS, 6) != CURLE_OK) { g_critical("%s: curl_easy_setopt(CURLOPT_MAXREDIRS) error", __func__); goto get_remote_metadata_cleanup; } // Download repomd.xml cr_download(handle, url, tmp_repomd, &tmp_err); if (tmp_err) { g_critical("%s: %s", __func__, tmp_err->message); goto get_remote_metadata_cleanup; } // Parse downloaded repomd.xml r_location = cr_parse_repomd(tmp_repomd, repopath, ignore_sqlite); if (!r_location) { g_critical("%s: repomd.xml parser failed on %s", __func__, tmp_repomd); goto get_remote_metadata_cleanup; } // Download all other repofiles if (r_location->pri_xml_href) cr_download(handle, r_location->pri_xml_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->fil_xml_href) cr_download(handle, r_location->fil_xml_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->oth_xml_href) cr_download(handle, r_location->oth_xml_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->pri_sqlite_href) cr_download(handle, r_location->pri_sqlite_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->fil_sqlite_href) cr_download(handle, r_location->fil_sqlite_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->oth_sqlite_href) cr_download(handle, r_location->oth_sqlite_href, tmp_repodata, &tmp_err); if (!tmp_err && r_location->additional_metadata){ GSList *element = r_location->additional_metadata; for (; element; element=g_slist_next(element)) { cr_download(handle, ((cr_Metadatum *)element->data)->name, tmp_repodata, &tmp_err); if (tmp_err) break; } } cr_metadatalocation_free(r_location); if (tmp_err) { g_critical("%s: Error while downloadig files: %s", __func__, tmp_err->message); goto get_remote_metadata_cleanup; } g_debug("%s: Remote metadata was successfully downloaded", __func__); // Parse downloaded data ret = cr_get_local_metadata(tmp_dir, ignore_sqlite); if (ret) ret->tmp = 1; get_remote_metadata_cleanup: if (handle) curl_easy_cleanup(handle); if (!ret) cr_remove_dir(tmp_dir, NULL); return ret; } struct cr_MetadataLocation * cr_locate_metadata(const char *repopath, gboolean ignore_sqlite, GError **err) { struct cr_MetadataLocation *ret = NULL; assert(repopath); assert(!err || *err == NULL); if (g_str_has_prefix(repopath, "ftp://") || g_str_has_prefix(repopath, "http://") || g_str_has_prefix(repopath, "https://")) { // Remote metadata - Download them via curl ret = cr_get_remote_metadata(repopath, ignore_sqlite); } else { // Local metadata if (g_str_has_prefix(repopath, "file:///")) repopath += 7; ret = cr_get_local_metadata(repopath, ignore_sqlite); } if (ret) { ret->original_url = g_strdup(repopath); } else { g_set_error(err, ERR_DOMAIN, CRE_IO, "Metadata not found at %s.", repopath); } #ifndef WITH_LIBMODULEMD if (ret) { if (g_slist_find_custom(ret->additional_metadata, "modules", cr_cmp_metadatum_type)) { g_set_error (err, ERR_DOMAIN, CRE_MODULEMD, "Module metadata found in repository, but createrepo_c " "was not compiled with libmodulemd support."); } } #endif /* ! WITH_LIBMODULEMD */ return ret; } createrepo_c-0.17.0/src/locate_metadata.h000066400000000000000000000127501400672373200203120ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_LOCATE_METADATA_H__ #define __C_CREATEREPOLIB_LOCATE_METADATA_H__ #include #ifdef __cplusplus extern "C" { #endif /** \defgroup locate_metadata Locate metadata API. * \addtogroup locate_metadata * @{ */ /** Structure representing metadata location. */ struct cr_MetadataLocation { char *pri_xml_href; /*!< path to primary.xml */ char *fil_xml_href; /*!< path to filelists.xml */ char *oth_xml_href; /*!< path to other.xml */ char *pri_sqlite_href; /*!< path to primary.sqlite */ char *fil_sqlite_href; /*!< path to filelists.sqlite */ char *oth_sqlite_href; /*!< path to other.sqlite */ GSList *additional_metadata; /*!< list of cr_Metadatum: paths to additional metadata such as updateinfo, modulemd, .. */ char *repomd; /*!< path to repomd.xml */ char *original_url; /*!< original path of repo from commandline param */ char *local_path; /*!< local path to repo */ int tmp; /*!< if true - metadata were downloaded and will be removed during cr_metadata_location_free*/ }; /** Structure representing additional metadata location and type. * It is used to first hold old and later new location while keeping * type information. */ typedef struct { gchar *name; gchar *type; } cr_Metadatum; struct cr_MetadataLocation * cr_parse_repomd(const char *repomd_path, const char *repopath, int ignore_sqlite); /** Inserts additional metadatum to list of * additional metadata if this type is already * present it gets overridden * * @param path Path to metadatum * @param type Type of metadatum * @param additional_metadata List of additional metadata * @return Original list with new element */ GSList* cr_insert_additional_metadatum(const gchar *path, const gchar *type, GSList *additional_metadata); /** Compares type (string) of specified metadatum * with second parameter string (type) * * @param metadatum Cmp type of this metadatum * @param type String value * @return an integer less than, equal to, or greater than zero, * if metadatum type is <, == or > than type (string cmp) */ gint cr_cmp_metadatum_type(gconstpointer metadatum, gconstpointer type); /** Compares type (string) of specified cr_RepomdRecord * with second parameter string (type) * * @param cr_RepomdRecord Cmp type of this cr_RepomdRecord * @param type String value * @return an integer less than, equal to, or greater than zero, * if repomdRecord type is <, == or > than type (string cmp) */ gint cr_cmp_repomd_record_type(gconstpointer repomd_record, gconstpointer type); /** Parses repomd.xml and returns a filled cr_MetadataLocation structure. * Remote repodata (repopath with prefix "ftp://" or "http://") are dowloaded * into a temporary directory and removed when the cr_metadatalocation_free() * is called on the cr_MetadataLocation. * @param repopath path to directory with repodata/ subdirectory * @param ignore_sqlite if ignore_sqlite != 0 sqlite dbs are ignored * @param err GError ** * @return filled cr_MetadataLocation structure or NULL */ struct cr_MetadataLocation *cr_locate_metadata(const char *repopath, gboolean ignore_sqlite, GError **err); /** Free cr_MetadataLocation. If repodata were downloaded remove * a temporary directory with repodata. * @param ml MeatadaLocation */ void cr_metadatalocation_free(struct cr_MetadataLocation *ml); /** Free cr_Metadatum. * @param m Meatadatum */ void cr_metadatum_free(cr_Metadatum *m); /** Copies metadata files, exactly, even hashed name * It first constructs target path (location + name), * Then it copies file to that location * * Metadatum as in singular of metadata, it is eg. groupfile, updateinfo.. * * @param src From where are we copying * @param tmp_out_repo Copying destination dir * @param err GError ** * @return Path to copied file */ gchar* cr_copy_metadatum(const gchar *src, const gchar *tmp_out_repo, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_LOCATE_METADATA_H__ */ createrepo_c-0.17.0/src/mergerepo_c.c000066400000000000000000002225061400672373200174670ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #ifdef WITH_LIBMODULEMD #include #endif /* WITH_LIBMODULEMD */ #include "error.h" #include "createrepo_shared.h" #include "version.h" #include "helpers.h" #include "metadata_internal.h" #include "misc.h" #include "locate_metadata.h" #include "load_metadata.h" #include "package.h" #include "xml_dump.h" #include "repomd.h" #include "sqlite.h" #include "threads.h" #include "xml_file.h" #include "cleanup.h" #include "koji.h" #define DEFAULT_OUTPUTDIR "merged_repo/" #include "mergerepo_c.h" struct CmdOptions _cmd_options = { .db_compression_type = DEFAULT_DB_COMPRESSION_TYPE, .groupfile_compression_type = DEFAULT_GROUPFILE_COMPRESSION_TYPE, .merge_method = MM_DEFAULT, .unique_md_filenames = TRUE, .simple_md_filenames = FALSE, .zck_compression = FALSE, .zck_dict_dir = NULL, }; // TODO: // - rozvijet architekturu na listy tak jak to dela mergedrepo static GOptionEntry cmd_entries[] = { { "version", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.version), "Show program's version number and exit", NULL }, { "repo", 'r', 0, G_OPTION_ARG_FILENAME_ARRAY, &(_cmd_options.repos), "Repo url", "REPOS" }, { "repo-prefix-search", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.repo_prefix_search), "Repository prefix to be replaced by NEW_PREFIX.", "OLD_PREFIX" }, { "repo-prefix-replace", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.repo_prefix_replace), "Repository prefix URL by which the OLD_PREFIX is replaced.", "NEW_PREFIX" }, { "archlist", 'a', 0, G_OPTION_ARG_STRING, &(_cmd_options.archlist), "Defaults to all arches - otherwise specify arches", "ARCHLIST" }, { "database", 'd', 0, G_OPTION_ARG_NONE, &(_cmd_options.database), "", NULL }, { "no-database", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.no_database), "", NULL }, { "verbose", 'v', 0, G_OPTION_ARG_NONE, &(_cmd_options.verbose), "", NULL }, { "outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.outputdir), "Location to create the repository", "OUTPUTDIR" }, { "nogroups", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.nogroups), "Do not merge group (comps) metadata", NULL }, { "noupdateinfo", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.noupdateinfo), "Do not merge updateinfo metadata", NULL }, { "compress-type", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.compress_type), "Which compression type to use", "COMPRESS_TYPE" }, #ifdef WITH_ZCHUNK { "zck", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.zck_compression), "Generate zchunk files as well as the standard repodata.", NULL }, { "zck-dict-dir", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.zck_dict_dir), "Directory containing compression dictionaries for use by zchunk", "ZCK_DICT_DIR" }, #endif { "method", 0, 0, G_OPTION_ARG_STRING, &(_cmd_options.merge_method_str), "Specify merge method for packages with the same name and arch (available" " merge methods: repo (default), ts, nvr)", "MERGE_METHOD" }, { "all", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.all), "Include all packages with the same name and arch if version or release " "is different. If used --method argument is ignored!", NULL }, { "noarch-repo", 0, 0, G_OPTION_ARG_FILENAME, &(_cmd_options.noarch_repo_url), "Packages with noarch architecture will be replaced by package from this " "repo if exists in it.", "URL" }, { "unique-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.unique_md_filenames), "Include the file's checksum in the metadata filename, helps HTTP caching (default).", NULL }, { "simple-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.simple_md_filenames), "Do not include the file's checksum in the metadata filename.", NULL }, { "omit-baseurl", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.omit_baseurl), "Don't add a baseurl to packages that don't have one before." , NULL}, // -- Options related to Koji-mergerepos behaviour { "koji", 'k', 0, G_OPTION_ARG_NONE, &(_cmd_options.koji), "Enable koji mergerepos behaviour. (Optionally select simple mode with: --simple)", NULL}, { "simple", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.koji_simple), "Enable koji specific simple merge mode where we keep even packages with " "identical NEVRAs. Only works with combination with --koji/-k.", NULL}, { "pkgorigins", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.pkgorigins), "Enable standard mergerepos behavior while also providing the " "pkgorigins file for koji.", NULL}, { "arch-expand", 0, 0, G_OPTION_ARG_NONE, &(_cmd_options.arch_expand), "Add multilib architectures for specified archlist and expand all of them. " "Only works with combination with --archlist.", NULL}, { "groupfile", 'g', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.groupfile), "Path to groupfile to include in metadata.", "GROUPFILE" }, { "blocked", 'b', 0, G_OPTION_ARG_FILENAME, &(_cmd_options.blocked), "A file containing a list of srpm names to exclude from the merged repo. " "Only works with combination with --koji/-k.", "FILE" }, // -- Options related to Koji-mergerepos behaviour - end { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL } }; GSList * append_arch(GSList *list, gchar *arch, gboolean expand) { // Try to find arch in the list for (GSList *elem = list; elem; elem = g_slist_next(elem)) { if (!g_strcmp0(elem->data, arch)) return list; // Arch already exists } list = g_slist_prepend(list, g_strdup(arch)); if (expand) { // expand arch if (!g_strcmp0(arch, "i386")) { list = append_arch(list, "i486", FALSE); list = append_arch(list, "i586", FALSE); list = append_arch(list, "geode", FALSE); list = append_arch(list, "i686", FALSE); list = append_arch(list, "athlon", FALSE); } else if (!g_strcmp0(arch, "x86_64")) { list = append_arch(list, "ia32e", FALSE); list = append_arch(list, "amd64", FALSE); } else if (!g_strcmp0(arch, "ppc64")) { list = append_arch(list, "ppc64pseries", FALSE); list = append_arch(list, "ppc64iseries", FALSE); } else if (!g_strcmp0(arch, "sparc64")) { list = append_arch(list, "sparc64v", FALSE); list = append_arch(list, "sparc64v2", FALSE); } else if (!g_strcmp0(arch, "sparc")) { list = append_arch(list, "sparcv8", FALSE); list = append_arch(list, "sparcv9", FALSE); list = append_arch(list, "sparcv9v", FALSE); list = append_arch(list, "sparcv9v2", FALSE); } else if (!g_strcmp0(arch, "alpha")) { list = append_arch(list, "alphaev4", FALSE); list = append_arch(list, "alphaev45", FALSE); list = append_arch(list, "alphaev5", FALSE); list = append_arch(list, "alphaev56", FALSE); list = append_arch(list, "alphapca56", FALSE); list = append_arch(list, "alphaev6", FALSE); list = append_arch(list, "alphaev67", FALSE); list = append_arch(list, "alphaev68", FALSE); list = append_arch(list, "alphaev7", FALSE); } else if (!g_strcmp0(arch, "armhfp")) { list = append_arch(list, "armv7hl", FALSE); list = append_arch(list, "armv7hnl", FALSE); } else if (!g_strcmp0(arch, "arm")) { list = append_arch(list, "rmv5tel", FALSE); list = append_arch(list, "armv5tejl", FALSE); list = append_arch(list, "armv6l", FALSE); list = append_arch(list, "armv7l", FALSE); } else if (!g_strcmp0(arch, "sh4")) { list = append_arch(list, "sh4a", FALSE); } } // Always include noarch list = append_arch(list, "noarch", FALSE); return list; } GSList * append_multilib_arch(GSList *list, gchar *arch) { if (!g_strcmp0(arch, "x86_64")) list = append_arch(list, "i386", TRUE); else if (!g_strcmp0(arch, "ppc64")) list = append_arch(list, "ppc", TRUE); else if (!g_strcmp0(arch, "s390x")) list = append_arch(list, "s390", TRUE); return list; } gboolean check_arguments(struct CmdOptions *options) { int x; gboolean ret = TRUE; if (options->outputdir){ options->out_dir = cr_normalize_dir_path(options->outputdir); } else { options->out_dir = g_strdup(DEFAULT_OUTPUTDIR); } options->out_repo = g_strconcat(options->out_dir, "repodata/", NULL); options->tmp_out_repo = g_strconcat(options->out_dir, ".repodata/", NULL); // Process repos x = 0; options->repo_list = NULL; while (options->repos && options->repos[x] != NULL) { char *normalized = cr_normalize_dir_path(options->repos[x]); if (normalized) { options->repo_list = g_slist_prepend(options->repo_list, normalized); } x++; } // Reverse come with downloading repos // options->repo_list = g_slist_reverse (options->repo_list); // Process archlist options->arch_list = NULL; if (options->archlist) { x = 0; gchar **arch_set = g_strsplit_set(options->archlist, ",;", -1); while (arch_set && arch_set[x] != NULL) { gchar *arch = arch_set[x]; if (arch[0] != '\0') { // Append (and expand) the arch options->arch_list = append_arch(options->arch_list, arch, options->koji || options->arch_expand); // Support multilib repos if (options->koji || options->arch_expand) options->arch_list = append_multilib_arch(options->arch_list, arch); } x++; } g_strfreev(arch_set); } else if (options->koji) { // Work only with noarch packages if --koji and no archlist specified options->arch_list = append_arch(options->arch_list, "noarch", TRUE); } if (!options->archlist && options->arch_expand){ g_critical("--arch-expand cannot be used without -a/--archlist argument"); ret = FALSE; } // Compress type if (options->compress_type) { cr_CompressionType type; type = cr_compression_type(options->compress_type); if (type == CR_CW_UNKNOWN_COMPRESSION) { g_critical("Compression %s not available: Please choose from: " "gz or bz2 or xz", options->compress_type); ret = FALSE; } else { options->db_compression_type = type; options->groupfile_compression_type = type; } } // Merge method if (options->merge_method_str) { if (options->koji) { g_warning("With -k/--koji argument merge method is ignored (--all is implicitly used)."); } else if (!g_strcmp0(options->merge_method_str, "repo")) { options->merge_method = MM_FIRST_FROM_IDENTICAL_NA; } else if (!g_strcmp0(options->merge_method_str, "ts")) { options->merge_method = MM_NEWEST_FROM_IDENTICAL_NA; } else if (!g_strcmp0(options->merge_method_str, "nvr")) { options->merge_method = MM_WITH_HIGHEST_NEVRA; } else { g_critical("Unknown merge method %s", options->merge_method_str); ret = FALSE; } } // Check simple filenames if (options->simple_md_filenames) { options->unique_md_filenames = FALSE; } if (options->all) options->merge_method = MM_FIRST_FROM_IDENTICAL_NEVRA; // Koji arguments if (options->koji) { options->all = TRUE; if (options->koji_simple) { options->merge_method = MM_ALL_WITH_IDENTICAL_NEVRA; }else{ options->merge_method = MM_FIRST_FROM_IDENTICAL_NEVRA; } } if (options->blocked) { if (!options->koji) { g_critical("-b/--blocked cannot be used without -k/--koji argument"); ret = FALSE; } if (!g_file_test(options->blocked, G_FILE_TEST_EXISTS)) { g_critical("File %s doesn't exists", options->blocked); ret = FALSE; } } if (options->repo_prefix_search || options->repo_prefix_replace) { if (options->repo_prefix_search == NULL) { g_critical("--repo-prefix-replace must be used together with --repo-prefix-search"); ret = FALSE; } else if (options->repo_prefix_replace == NULL) { g_critical("--repo-prefix-search must be used together with --repo-prefix-replace"); ret = FALSE; } else if (*options->repo_prefix_search == '\0') { g_critical("--repo-prefix-search cannot be an empty string."); ret = FALSE; } } // Zchunk options if (options->zck_dict_dir && !options->zck_compression) { g_critical("Cannot use --zck-dict-dir without setting --zck"); ret = FALSE; } if (options->zck_dict_dir) options->zck_dict_dir = cr_normalize_dir_path(options->zck_dict_dir); return ret; } struct CmdOptions * parse_arguments(int *argc, char ***argv) { GError *error = NULL; GOptionContext *context; context = g_option_context_new("--repo=url --repo=url"); g_option_context_set_summary(context, "Take one or more repositories and " "merge their metadata into a new repo"); g_option_context_add_main_entries(context, cmd_entries, NULL); gboolean ret = g_option_context_parse(context, argc, argv, &error); if (!ret) { g_print("Option parsing failed: %s\n", error->message); g_option_context_free(context); g_error_free(error); return NULL; } if (*argc > 1) { g_printerr("Argument parsing failed.\n"); g_print("%s", g_option_context_get_help(context, TRUE, NULL)); return NULL; } g_option_context_free(context); return &(_cmd_options); } void free_options(struct CmdOptions *options) { g_free(options->outputdir); g_free(options->archlist); g_free(options->compress_type); g_free(options->merge_method_str); g_free(options->noarch_repo_url); g_free(options->groupfile); g_free(options->blocked); g_strfreev(options->repos); g_free(options->out_dir); g_free(options->out_repo); g_free(options->tmp_out_repo); GSList *element = NULL; // Free include_pkgs GSList for (element = options->repo_list; element; element = g_slist_next(element)) { g_free( (gchar *) element->data ); } g_slist_free(options->repo_list); // Free include_pkgs GSList for (element = options->arch_list; element; element = g_slist_next(element)) { g_free( (gchar *) element->data ); } g_slist_free(options->arch_list); } void free_merged_values(gpointer data) { GSList *element = (GSList *) data; for (; element; element=g_slist_next(element)) { cr_Package *pkg = (cr_Package *) element->data; cr_package_free(pkg); } g_slist_free((GSList *) data); } GHashTable * new_merged_metadata_hashtable() { GHashTable *hashtable = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, free_merged_values); return hashtable; } void destroy_merged_metadata_hashtable(GHashTable *hashtable) { if (hashtable) { g_hash_table_destroy(hashtable); } } // Merged table structure: {"package_name": [pkg, pkg, pkg, ...], ...} // Return codes: // 0 = Package was not added // 1 = Package was added // 2 = Package replaced old package // 3 = Package was added as a duplicate static int add_package(cr_Package *pkg, gchar *repopath, GHashTable *merged, GSList *arch_list, MergeMethod merge_method, struct KojiMergedReposStuff *koji_stuff, gboolean omit_baseurl, int repoid) { GSList *list, *element; int ret = 1; if (omit_baseurl) repopath = NULL; // Check if the package meet the command line architecture constraints if (arch_list) { gboolean right_arch = FALSE; for (element=arch_list; element; element=g_slist_next(element)) { if (!g_strcmp0(pkg->arch, (gchar *) element->data)) { right_arch = TRUE; } } if (!right_arch) { g_debug("Skip - %s (Bad arch: %s)", pkg->name, pkg->arch); return 0; } } // Koji-mergerepos specific behaviour ----------------------- if (koji_stuff) { if (!koji_allowed(pkg, koji_stuff)) return 0; // For first repo (with --koji) ignore baseURL (RhBug: 1220082) if (repoid == 0) repopath = NULL; } // Koji-mergerepos specific behaviour end -------------------- // Lookup package in the merged list = (GSList *) g_hash_table_lookup(merged, pkg->name); // Key doesn't exist yet if (!list) { list = g_slist_prepend(list, pkg); if ((!pkg->location_base || *pkg->location_base == '\0') && repopath) { pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath); } g_hash_table_insert (merged, (gpointer) g_strdup(pkg->name), (gpointer) list); return 1; } // Check if package with the architecture isn't in the list already for (element=list; element; element=g_slist_next(element)) { cr_Package *c_pkg = (cr_Package *) element->data; if (!g_strcmp0(pkg->arch, c_pkg->arch)) { // Two packages have same name and arch // Use selected merge method to determine which package should // be included switch(merge_method) { // REPO merge method case MM_FIRST_FROM_IDENTICAL_NA: // Package with the same arch already exists g_debug("Package %s (%s) already exists", pkg->name, pkg->arch); return 0; // TS merge method case MM_NEWEST_FROM_IDENTICAL_NA: if (pkg->time_file > c_pkg->time_file) { // Remove older package cr_package_free(c_pkg); // Replace package in element if (!pkg->location_base) pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath); element->data = pkg; return 2; } else { g_debug("Newer package %s (%s) already exists", pkg->name, pkg->arch); return 0; } break; // NVR merge method case MM_WITH_HIGHEST_NEVRA: { gboolean pkg_is_newer = FALSE; int epoch_cmp = cr_cmp_version_str(pkg->epoch, c_pkg->epoch); int version_cmp = cr_cmp_version_str(pkg->version, c_pkg->version); int release_cmp = cr_cmp_version_str(pkg->release, c_pkg->release); if (epoch_cmp == 1) pkg_is_newer = TRUE; else if (epoch_cmp == 0 && version_cmp == 1) pkg_is_newer = TRUE; else if (epoch_cmp == 0 && version_cmp == 0 && release_cmp == 1) pkg_is_newer = TRUE; if (pkg_is_newer) { // Remove older package cr_package_free(c_pkg); // Replace package in element if (!pkg->location_base) pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath); element->data = pkg; return 2; } else { g_debug("Newer version of package %s.%s " "(epoch: %s) (ver: %s) (rel: %s) already exists", pkg->name, pkg->arch, pkg->epoch ? pkg->epoch : "0", pkg->version ? pkg->version : "N/A", pkg->release ? pkg->release : "N/A"); return 0; } break; } case MM_FIRST_FROM_IDENTICAL_NEVRA: // Two packages have same name and arch but all param is used // We want to check if two packages are the same. // We already know that name and arch matches. // We need to check version and release and epoch if ((cr_cmp_version_str(pkg->epoch, c_pkg->epoch) == 0) && (cr_cmp_version_str(pkg->version, c_pkg->version) == 0) && (cr_cmp_version_str(pkg->release, c_pkg->release) == 0)) { // Both packages are the same (at least by NEVRA values) g_debug("Same version of package %s.%s " "(epoch: %s) (ver: %s) (rel: %s) already exists", pkg->name, pkg->arch, pkg->epoch ? pkg->epoch : "0", pkg->version ? pkg->version : "N/A", pkg->release ? pkg->release : "N/A"); return 0; } break; case MM_ALL_WITH_IDENTICAL_NEVRA: // We want even duplicates with exact NEVRAs if ((cr_cmp_version_str(pkg->epoch, c_pkg->epoch) == 0) && (cr_cmp_version_str(pkg->version, c_pkg->version) == 0) && (cr_cmp_version_str(pkg->release, c_pkg->release) == 0)) { // Both packages are the same (at least by NEVRA values) // We warn, but do not omit it g_debug("Duplicate rpm %s.%s " "(epoch: %s) (ver: %s) (rel: %s)", pkg->name, pkg->arch, pkg->epoch ? pkg->epoch : "0", pkg->version ? pkg->version : "N/A", pkg->release ? pkg->release : "N/A"); ret = 3; goto add_package; } break; } } } add_package: if (!pkg->location_base) { pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, repopath); } // Add package // XXX: The first list element (pointed from hashtable) must stay first! // g_slist_append() is suitable but non effective, insert a new element // right after first element is optimal (at least for now) if (g_slist_insert(list, pkg, 1) != list) { assert(0); } return ret; } long merge_repos(GHashTable *merged, #ifdef WITH_LIBMODULEMD ModulemdModuleIndex **module_index, #endif GSList *repo_list, GSList *arch_list, MergeMethod merge_method, GHashTable *noarch_hashtable, struct KojiMergedReposStuff *koji_stuff, gboolean omit_baseurl, gchar *repo_prefix_search, gchar *repo_prefix_replace) { long loaded_packages = 0; GSList *used_noarch_keys = NULL; #ifdef WITH_LIBMODULEMD g_autoptr(ModulemdModuleIndexMerger) merger = NULL; GError *err = NULL; merger = modulemd_module_index_merger_new(); #endif /* WITH_LIBMODULEMD */ // Load all repos int repoid = 0; GSList *element = NULL; for (element = repo_list; element; element = g_slist_next(element), repoid++) { gchar *repopath; // base url of current repodata cr_Metadata *metadata; // current repodata struct cr_MetadataLocation *ml; // location of current repodata ml = (struct cr_MetadataLocation *) element->data; if (!ml) { g_critical("Bad location!"); break; } metadata = cr_metadata_new(CR_HT_KEY_HASH, 0, NULL); repopath = cr_normalize_dir_path(ml->original_url); // Base paths in output of original createrepo doesn't have trailing '/' if (repopath && strlen(repopath) > 1) repopath[strlen(repopath)-1] = '\0'; // If repo_prefix_search and repo_prefix_replace is set, replace // repo_prefix_search in the repopath by repo_prefix_replace. if (repo_prefix_search && *repo_prefix_search && repo_prefix_replace && g_str_has_prefix(repopath, repo_prefix_search)) { gchar *repo_suffix = repopath + strlen(repo_prefix_search); gchar *new_repopath = g_strconcat(repo_prefix_replace, repo_suffix, NULL); g_free(repopath); repopath = new_repopath; } g_debug("Processing: %s", repopath); if (cr_metadata_load_xml(metadata, ml, NULL) != CRE_OK) { cr_metadata_free(metadata); g_critical("Cannot load repo: \"%s\"", ml->repomd); break; } #ifdef WITH_LIBMODULEMD if (cr_metadata_modulemd(metadata)) { modulemd_module_index_merger_associate_index ( merger, cr_metadata_modulemd (metadata), 0); } #endif /* WITH_LIBMODULEMD */ GHashTableIter iter; gpointer key, value; guint original_size; long repo_loaded_packages = 0; original_size = g_hash_table_size(cr_metadata_hashtable(metadata)); g_hash_table_iter_init (&iter, cr_metadata_hashtable(metadata)); while (g_hash_table_iter_next (&iter, &key, &value)) { int ret; cr_Package *pkg = (cr_Package *) value; // Lookup a package in the noarch_hashtable gboolean noarch_pkg_used = FALSE; if (noarch_hashtable && !g_strcmp0(pkg->arch, "noarch")) { cr_Package *noarch_pkg; noarch_pkg = g_hash_table_lookup(noarch_hashtable, pkg->location_href); if (noarch_pkg) { pkg = noarch_pkg; noarch_pkg_used = TRUE; } } g_debug("Reading metadata for %s (%s-%s.%s)", pkg->name, pkg->version, pkg->release, pkg->arch); // Add package ret = add_package(pkg, repopath, merged, arch_list, merge_method, koji_stuff, omit_baseurl, repoid); if (ret > 0) { if (!noarch_pkg_used) { // Original package was added // => remove only record from hashtable g_hash_table_iter_steal(&iter); } else { // Package from noarch repo was added // => do not remove record, just make note used_noarch_keys = g_slist_prepend(used_noarch_keys, pkg->location_href); g_debug("Package: %s (from: %s) has been replaced by noarch package", pkg->location_href, repopath); } if (ret == 1) { repo_loaded_packages++; // Koji-mergerepos specific behaviour ----------- if (koji_stuff && koji_stuff->pkgorigins) { _cleanup_free_ gchar *nvra = cr_package_nvra(pkg); _cleanup_free_ gchar *url = cr_prepend_protocol(ml->original_url); cr_printf(NULL, koji_stuff->pkgorigins, "%s\t%s\n", nvra, url); } // Koji-mergerepos specific behaviour - end ----- } } } loaded_packages += repo_loaded_packages; cr_metadata_free(metadata); g_debug("Repo: %s (Loaded: %ld Used: %ld)", repopath, (unsigned long) original_size, repo_loaded_packages); g_free(repopath); } #ifdef WITH_LIBMODULEMD g_autoptr(ModulemdModuleIndex) moduleindex = modulemd_module_index_merger_resolve (merger, &err); g_auto (GStrv) module_names = modulemd_module_index_get_module_names_as_strv (moduleindex); if (moduleindex && g_strv_length(module_names) == 0) { /* If the final module index is empty, free it so it won't get * output in dump_merged_metadata() */ g_clear_pointer (&moduleindex, g_object_unref); } *module_index = g_steal_pointer(&moduleindex); #endif // Steal used keys from noarch_hashtable for (element = used_noarch_keys; element; element = g_slist_next(element)) g_hash_table_steal(noarch_hashtable, (gconstpointer) element->data); g_slist_free(used_noarch_keys); return loaded_packages; } int package_cmp(gconstpointer a_p, gconstpointer b_p) { int ret; const cr_Package *pkg_a = a_p; const cr_Package *pkg_b = b_p; ret = g_strcmp0(pkg_a->location_href, pkg_b->location_href); if (ret) return ret; return g_strcmp0(pkg_a->location_base, pkg_b->location_base); } #ifdef WITH_LIBMODULEMD static gint modulemd_write_handler (void *data, unsigned char *buffer, size_t size) { int ret; CR_FILE *cr_file = (CR_FILE *)data; g_autoptr (GError) err = NULL; ret = cr_write (cr_file, buffer, size, &err); if (ret < 1) { g_warning ("Could not write modulemd: %s", err->message); return 0; } return 1; } #endif /* WITH_LIBMODULEMD */ int dump_merged_metadata(GHashTable *merged_hashtable, long packages, gchar *groupfile, #ifdef WITH_LIBMODULEMD ModulemdModuleIndex *module_index, #endif /* WITH_LIBMODULEMD */ struct CmdOptions *cmd_options) { GError *tmp_err = NULL; // Create/Open output xml files cr_ContentStat *pri_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); cr_ContentStat *fil_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); cr_ContentStat *oth_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); cr_XmlFile *pri_f; cr_XmlFile *fil_f; cr_XmlFile *oth_f; gchar *pri_zck_filename = NULL; gchar *fil_zck_filename = NULL; gchar *oth_zck_filename = NULL; cr_XmlFile *pri_cr_zck = NULL; cr_XmlFile *fil_cr_zck = NULL; cr_XmlFile *oth_cr_zck = NULL; cr_ContentStat *pri_zck_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); cr_ContentStat *fil_zck_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); cr_ContentStat *oth_zck_stat = cr_contentstat_new(CR_CHECKSUM_SHA256, NULL); gchar *pri_dict = NULL; gchar *fil_dict = NULL; gchar *oth_dict = NULL; size_t pri_dict_size = 0; size_t fil_dict_size = 0; size_t oth_dict_size = 0; gchar *pri_dict_file = NULL; gchar *fil_dict_file = NULL; gchar *oth_dict_file = NULL; if (cmd_options->zck_dict_dir) { pri_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "primary.xml"); fil_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "filelists.xml"); oth_dict_file = cr_get_dict_file(cmd_options->zck_dict_dir, "other.xml"); if (pri_dict_file && !g_file_get_contents(pri_dict_file, &pri_dict, &pri_dict_size, &tmp_err)) { g_critical("Error reading zchunk primary dict %s: %s", pri_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } if (fil_dict_file && !g_file_get_contents(fil_dict_file, &fil_dict, &fil_dict_size, &tmp_err)) { g_critical("Error reading zchunk filelists dict %s: %s", fil_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } if (oth_dict_file && !g_file_get_contents(oth_dict_file, &oth_dict, &oth_dict_size, &tmp_err)) { g_critical("Error reading zchunk other dict %s: %s", oth_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } } const char *groupfile_suffix = cr_compression_suffix( cmd_options->groupfile_compression_type); gchar *pri_xml_filename = g_strconcat(cmd_options->tmp_out_repo, "/primary.xml.gz", NULL); gchar *fil_xml_filename = g_strconcat(cmd_options->tmp_out_repo, "/filelists.xml.gz", NULL); gchar *oth_xml_filename = g_strconcat(cmd_options->tmp_out_repo, "/other.xml.gz", NULL); gchar *update_info_filename = NULL; if (!cmd_options->noupdateinfo) update_info_filename = g_strconcat(cmd_options->tmp_out_repo, "/updateinfo.xml", groupfile_suffix, NULL); pri_f = cr_xmlfile_sopen_primary(pri_xml_filename, CR_CW_GZ_COMPRESSION, pri_stat, &tmp_err); if (tmp_err) { g_critical("Cannot open %s: %s", pri_xml_filename, tmp_err->message); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); g_free(update_info_filename); g_error_free(tmp_err); return 0; } fil_f = cr_xmlfile_sopen_filelists(fil_xml_filename, CR_CW_GZ_COMPRESSION, fil_stat, &tmp_err); if (tmp_err) { g_critical("Cannot open %s: %s", fil_xml_filename, tmp_err->message); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); g_free(update_info_filename); cr_xmlfile_close(pri_f, NULL); g_error_free(tmp_err); return 0; } oth_f = cr_xmlfile_sopen_other(oth_xml_filename, CR_CW_GZ_COMPRESSION, oth_stat, &tmp_err); if (tmp_err) { g_critical("Cannot open %s: %s", oth_xml_filename, tmp_err->message); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); g_free(update_info_filename); cr_xmlfile_close(fil_f, NULL); cr_xmlfile_close(pri_f, NULL); g_error_free(tmp_err); return 0; } cr_xmlfile_set_num_of_pkgs(pri_f, packages, NULL); cr_xmlfile_set_num_of_pkgs(fil_f, packages, NULL); cr_xmlfile_set_num_of_pkgs(oth_f, packages, NULL); if (cmd_options->zck_compression) { g_debug("Creating .xml.zck files"); pri_zck_filename = g_strconcat(cmd_options->tmp_out_repo, "/primary.xml.zck", NULL); fil_zck_filename = g_strconcat(cmd_options->tmp_out_repo, "/filelists.xml.zck", NULL); oth_zck_filename = g_strconcat(cmd_options->tmp_out_repo, "/other.xml.zck", NULL); pri_cr_zck = cr_xmlfile_sopen_primary(pri_zck_filename, CR_CW_ZCK_COMPRESSION, pri_zck_stat, &tmp_err); assert(pri_cr_zck || tmp_err); if (!pri_cr_zck) { g_critical("Cannot open file %s: %s", pri_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); exit(EXIT_FAILURE); } cr_set_dict(pri_cr_zck->f, pri_dict, pri_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting primary dict %s: %s", pri_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(pri_dict); fil_cr_zck = cr_xmlfile_sopen_filelists(fil_zck_filename, CR_CW_ZCK_COMPRESSION, fil_zck_stat, &tmp_err); assert(fil_cr_zck || tmp_err); if (!fil_cr_zck) { g_critical("Cannot open file %s: %s", fil_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); cr_xmlfile_close(pri_cr_zck, NULL); exit(EXIT_FAILURE); } cr_set_dict(fil_cr_zck->f, fil_dict, fil_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting filelists dict %s: %s", fil_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(fil_dict); oth_cr_zck = cr_xmlfile_sopen_other(oth_zck_filename, CR_CW_ZCK_COMPRESSION, oth_zck_stat, &tmp_err); assert(oth_cr_zck || tmp_err); if (!oth_cr_zck) { g_critical("Cannot open file %s: %s", oth_zck_filename, tmp_err->message); g_clear_error(&tmp_err); cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); cr_xmlfile_close(fil_cr_zck, NULL); cr_xmlfile_close(pri_cr_zck, NULL); exit(EXIT_FAILURE); } cr_set_dict(oth_cr_zck->f, oth_dict, oth_dict_size, &tmp_err); if (tmp_err) { g_critical("Error reading setting other dict %s: %s", oth_dict_file, tmp_err->message); g_clear_error(&tmp_err); exit(EXIT_FAILURE); } g_free(oth_dict); // Set number of packages g_debug("Setting number of packages"); cr_xmlfile_set_num_of_pkgs(pri_cr_zck, packages, NULL); cr_xmlfile_set_num_of_pkgs(fil_cr_zck, packages, NULL); cr_xmlfile_set_num_of_pkgs(oth_cr_zck, packages, NULL); } // Prepare sqlite if needed cr_SqliteDb *pri_db = NULL; cr_SqliteDb *fil_db = NULL; cr_SqliteDb *oth_db = NULL; if (!cmd_options->no_database) { gchar *pri_db_filename = NULL; gchar *fil_db_filename = NULL; gchar *oth_db_filename = NULL; pri_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/primary.sqlite", NULL); fil_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/filelists.sqlite", NULL); oth_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/other.sqlite", NULL); pri_db = cr_db_open_primary(pri_db_filename, NULL); fil_db = cr_db_open_filelists(fil_db_filename, NULL); oth_db = cr_db_open_other(oth_db_filename, NULL); g_free(pri_db_filename); g_free(fil_db_filename); g_free(oth_db_filename); } // Dump hashtable GList *keys, *key; keys = g_hash_table_get_keys(merged_hashtable); keys = g_list_sort(keys, (GCompareFunc) g_strcmp0); char *prev_srpm = NULL; for (key = keys; key; key = g_list_next(key)) { gpointer value = g_hash_table_lookup(merged_hashtable, key->data); GSList *element = (GSList *) value; element = g_slist_sort(element, package_cmp); for (; element; element=g_slist_next(element)) { struct cr_XmlStruct res; cr_Package *pkg; pkg = (cr_Package *) element->data; res = cr_xml_dump(pkg, NULL); g_debug("Writing metadata for %s (%s-%s.%s)", pkg->name, pkg->version, pkg->release, pkg->arch); if (cmd_options->zck_compression && (!prev_srpm || !pkg->rpm_sourcerpm || strlen(prev_srpm) != strlen(pkg->rpm_sourcerpm) || strncmp(pkg->rpm_sourcerpm, prev_srpm, strlen(prev_srpm)) != 0)) { cr_end_chunk(pri_cr_zck->f, NULL); cr_end_chunk(fil_cr_zck->f, NULL); cr_end_chunk(oth_cr_zck->f, NULL); g_free(prev_srpm); if (pkg->rpm_sourcerpm) prev_srpm = g_strdup(pkg->rpm_sourcerpm); else prev_srpm = NULL; } cr_xmlfile_add_chunk(pri_f, (const char *) res.primary, NULL); cr_xmlfile_add_chunk(fil_f, (const char *) res.filelists, NULL); cr_xmlfile_add_chunk(oth_f, (const char *) res.other, NULL); if (cmd_options->zck_compression) { cr_xmlfile_add_chunk(pri_cr_zck, (const char *) res.primary, NULL); cr_xmlfile_add_chunk(fil_cr_zck, (const char *) res.filelists, NULL); cr_xmlfile_add_chunk(oth_cr_zck, (const char *) res.other, NULL); } if (!cmd_options->no_database) { cr_db_add_pkg(pri_db, pkg, NULL); cr_db_add_pkg(fil_db, pkg, NULL); cr_db_add_pkg(oth_db, pkg, NULL); } free(res.primary); free(res.filelists); free(res.other); } } g_free(prev_srpm); g_list_free(keys); // Close files cr_xmlfile_close(pri_f, NULL); cr_xmlfile_close(fil_f, NULL); cr_xmlfile_close(oth_f, NULL); if (cmd_options->zck_compression) { cr_xmlfile_close(pri_cr_zck, NULL); cr_xmlfile_close(fil_cr_zck, NULL); cr_xmlfile_close(oth_cr_zck, NULL); } // Write updateinfo.xml // TODO if (!cmd_options->noupdateinfo) { CR_FILE *update_info = cr_open(update_info_filename, CR_CW_MODE_WRITE, cmd_options->groupfile_compression_type, &tmp_err); if (update_info) { cr_puts(update_info, "\n\n", NULL); cr_close(update_info, NULL); } else { g_warning("Cannot open %s: %s", update_info_filename, tmp_err->message); g_error_free(tmp_err); } } #ifdef WITH_LIBMODULEMD // Write modulemd g_autofree gchar *modulemd_filename = NULL; if (module_index) { gboolean ret; modulemd_filename = g_strconcat(cmd_options->tmp_out_repo, "/modules.yaml.gz", NULL); CR_FILE *modulemd = cr_open(modulemd_filename, CR_CW_MODE_WRITE, CR_CW_GZ_COMPRESSION, &tmp_err); if (modulemd) { ret = modulemd_module_index_dump_to_custom(module_index, modulemd_write_handler, modulemd, &tmp_err); if (!ret) { g_warning("Could not write module metadata: %s", tmp_err->message); } cr_close(modulemd, NULL); } else { g_warning("Cannot open %s: %s", modulemd_filename, tmp_err->message); g_error_free(tmp_err); } } #endif // Prepare repomd records cr_RepomdRecord *pri_xml_rec = cr_repomd_record_new("primary", pri_xml_filename); cr_RepomdRecord *fil_xml_rec = cr_repomd_record_new("filelists", fil_xml_filename); cr_RepomdRecord *oth_xml_rec = cr_repomd_record_new("other", oth_xml_filename); cr_RepomdRecord *pri_db_rec = NULL; cr_RepomdRecord *fil_db_rec = NULL; cr_RepomdRecord *oth_db_rec = NULL; cr_RepomdRecord *pri_zck_rec = NULL; cr_RepomdRecord *fil_zck_rec = NULL; cr_RepomdRecord *oth_zck_rec = NULL; cr_RepomdRecord *groupfile_rec = NULL; cr_RepomdRecord *compressed_groupfile_rec = NULL; cr_RepomdRecord *groupfile_zck_rec = NULL; cr_RepomdRecord *update_info_rec = NULL; cr_RepomdRecord *update_info_zck_rec = NULL; cr_RepomdRecord *pkgorigins_rec = NULL; cr_RepomdRecord *pkgorigins_zck_rec = NULL; #ifdef WITH_LIBMODULEMD cr_RepomdRecord *modulemd_rec = NULL; cr_RepomdRecord *modulemd_zck_rec = NULL; if (module_index) { modulemd_rec = cr_repomd_record_new("modules", modulemd_filename); } #endif /* WITH_LIBMODULEMD */ // XML cr_repomd_record_load_contentstat(pri_xml_rec, pri_stat); cr_repomd_record_load_contentstat(fil_xml_rec, fil_stat); cr_repomd_record_load_contentstat(oth_xml_rec, oth_stat); cr_contentstat_free(pri_stat, NULL); cr_contentstat_free(fil_stat, NULL); cr_contentstat_free(oth_stat, NULL); GThreadPool *fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_fill_task; cr_RepomdRecordFillTask *fil_fill_task; cr_RepomdRecordFillTask *oth_fill_task; pri_fill_task = cr_repomdrecordfilltask_new(pri_xml_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, pri_fill_task, NULL); fil_fill_task = cr_repomdrecordfilltask_new(fil_xml_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, fil_fill_task, NULL); oth_fill_task = cr_repomdrecordfilltask_new(oth_xml_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, oth_fill_task, NULL); #ifdef WITH_LIBMODULEMD cr_RepomdRecordFillTask *mmd_fill_task; if (module_index) { mmd_fill_task = cr_repomdrecordfilltask_new(modulemd_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, mmd_fill_task, NULL); if (cmd_options->zck_compression) { modulemd_zck_rec = cr_repomd_record_new("modules_zck", NULL); cr_repomd_record_compress_and_fill(modulemd_rec, modulemd_zck_rec, CR_CHECKSUM_SHA256, CR_CW_ZCK_COMPRESSION, NULL, NULL); } } #endif /* WITH_LIBMODULEMD */ // Groupfile if (groupfile) { groupfile_rec = cr_repomd_record_new("group", groupfile); compressed_groupfile_rec = cr_repomd_record_new("group_gz", NULL); cr_repomd_record_compress_and_fill(groupfile_rec, compressed_groupfile_rec, CR_CHECKSUM_SHA256, cmd_options->groupfile_compression_type, NULL, NULL); if (cmd_options->zck_compression) { groupfile_zck_rec = cr_repomd_record_new("group_zck", NULL); cr_repomd_record_compress_and_fill(groupfile_rec, groupfile_zck_rec, CR_CHECKSUM_SHA256, CR_CW_ZCK_COMPRESSION, NULL, NULL); } } // Update info if (!cmd_options->noupdateinfo) { update_info_rec = cr_repomd_record_new("updateinfo", update_info_filename); cr_repomd_record_fill(update_info_rec, CR_CHECKSUM_SHA256, NULL); if (cmd_options->zck_compression) { update_info_zck_rec = cr_repomd_record_new("updateinfo_zck", NULL); cr_repomd_record_compress_and_fill(update_info_rec, update_info_zck_rec, CR_CHECKSUM_SHA256, CR_CW_ZCK_COMPRESSION, NULL, NULL); } } // Pkgorigins if (cmd_options->koji || cmd_options->pkgorigins) { gchar *pkgorigins_path = g_strconcat(cmd_options->tmp_out_repo, "pkgorigins.gz", NULL); pkgorigins_rec = cr_repomd_record_new("origin", pkgorigins_path); cr_repomd_record_fill(pkgorigins_rec, CR_CHECKSUM_SHA256, NULL); if (cmd_options->zck_compression) { pkgorigins_zck_rec = cr_repomd_record_new("origin_zck", NULL); cr_repomd_record_compress_and_fill(pkgorigins_rec, pkgorigins_zck_rec, CR_CHECKSUM_SHA256, CR_CW_ZCK_COMPRESSION, NULL, NULL); } g_free(pkgorigins_path); } // Wait till repomd record fill task of xml files ends. g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_fill_task, NULL); cr_repomdrecordfilltask_free(fil_fill_task, NULL); cr_repomdrecordfilltask_free(oth_fill_task, NULL); #ifdef WITH_LIBMODULEMD if (module_index) { cr_repomdrecordfilltask_free(mmd_fill_task, NULL); } #endif // Sqlite db if (!cmd_options->no_database) { const char *db_suffix = cr_compression_suffix(cmd_options->db_compression_type); // Insert XML checksums into the dbs cr_db_dbinfo_update(pri_db, pri_xml_rec->checksum, NULL); cr_db_dbinfo_update(fil_db, fil_xml_rec->checksum, NULL); cr_db_dbinfo_update(oth_db, oth_xml_rec->checksum, NULL); cr_db_close(pri_db, NULL); cr_db_close(fil_db, NULL); cr_db_close(oth_db, NULL); // Compress dbs gchar *pri_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/primary.sqlite", NULL); gchar *fil_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/filelists.sqlite", NULL); gchar *oth_db_filename = g_strconcat(cmd_options->tmp_out_repo, "/other.sqlite", NULL); gchar *pri_db_c_filename = g_strconcat(pri_db_filename, db_suffix, NULL); gchar *fil_db_c_filename = g_strconcat(fil_db_filename, db_suffix, NULL); gchar *oth_db_c_filename = g_strconcat(oth_db_filename, db_suffix, NULL); GThreadPool *compress_pool = g_thread_pool_new(cr_compressing_thread, NULL, 3, FALSE, NULL); cr_CompressionTask *pri_db_task; cr_CompressionTask *fil_db_task; cr_CompressionTask *oth_db_task; pri_db_task = cr_compressiontask_new(pri_db_filename, pri_db_c_filename, cmd_options->db_compression_type, CR_CHECKSUM_SHA256, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, pri_db_task, NULL); fil_db_task = cr_compressiontask_new(fil_db_filename, fil_db_c_filename, cmd_options->db_compression_type, CR_CHECKSUM_SHA256, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, fil_db_task, NULL); oth_db_task = cr_compressiontask_new(oth_db_filename, oth_db_c_filename, cmd_options->db_compression_type, CR_CHECKSUM_SHA256, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, oth_db_task, NULL); g_thread_pool_free(compress_pool, FALSE, TRUE); // Prepare repomd records pri_db_rec = cr_repomd_record_new("primary_db", pri_db_c_filename); fil_db_rec = cr_repomd_record_new("filelists_db", fil_db_c_filename); oth_db_rec = cr_repomd_record_new("other_db", oth_db_c_filename); g_free(pri_db_filename); g_free(fil_db_filename); g_free(oth_db_filename); g_free(pri_db_c_filename); g_free(fil_db_c_filename); g_free(oth_db_c_filename); cr_repomd_record_load_contentstat(pri_db_rec, pri_db_task->stat); cr_repomd_record_load_contentstat(fil_db_rec, fil_db_task->stat); cr_repomd_record_load_contentstat(oth_db_rec, oth_db_task->stat); cr_compressiontask_free(pri_db_task, NULL); cr_compressiontask_free(fil_db_task, NULL); cr_compressiontask_free(oth_db_task, NULL); fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_db_fill_task; cr_RepomdRecordFillTask *fil_db_fill_task; cr_RepomdRecordFillTask *oth_db_fill_task; pri_db_fill_task = cr_repomdrecordfilltask_new(pri_db_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, pri_db_fill_task, NULL); fil_db_fill_task = cr_repomdrecordfilltask_new(fil_db_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, fil_db_fill_task, NULL); oth_db_fill_task = cr_repomdrecordfilltask_new(oth_db_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, oth_db_fill_task, NULL); g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_db_fill_task, NULL); cr_repomdrecordfilltask_free(fil_db_fill_task, NULL); cr_repomdrecordfilltask_free(oth_db_fill_task, NULL); } // Zchunk if (cmd_options->zck_compression) { // Prepare repomd records pri_zck_rec = cr_repomd_record_new("primary_zck", pri_zck_filename); fil_zck_rec = cr_repomd_record_new("filelists_zck", fil_zck_filename); oth_zck_rec = cr_repomd_record_new("other_zck", oth_zck_filename); g_free(pri_zck_filename); g_free(fil_zck_filename); g_free(oth_zck_filename); cr_repomd_record_load_zck_contentstat(pri_zck_rec, pri_zck_stat); cr_repomd_record_load_zck_contentstat(fil_zck_rec, fil_zck_stat); cr_repomd_record_load_zck_contentstat(oth_zck_rec, oth_zck_stat); fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); cr_RepomdRecordFillTask *pri_zck_fill_task; cr_RepomdRecordFillTask *fil_zck_fill_task; cr_RepomdRecordFillTask *oth_zck_fill_task; pri_zck_fill_task = cr_repomdrecordfilltask_new(pri_zck_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, pri_zck_fill_task, NULL); fil_zck_fill_task = cr_repomdrecordfilltask_new(fil_zck_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, fil_zck_fill_task, NULL); oth_zck_fill_task = cr_repomdrecordfilltask_new(oth_zck_rec, CR_CHECKSUM_SHA256, NULL); g_thread_pool_push(fill_pool, oth_zck_fill_task, NULL); g_thread_pool_free(fill_pool, FALSE, TRUE); cr_repomdrecordfilltask_free(pri_zck_fill_task, NULL); cr_repomdrecordfilltask_free(fil_zck_fill_task, NULL); cr_repomdrecordfilltask_free(oth_zck_fill_task, NULL); } cr_contentstat_free(pri_zck_stat, NULL); cr_contentstat_free(fil_zck_stat, NULL); cr_contentstat_free(oth_zck_stat, NULL); // Add checksums into files names if (cmd_options->unique_md_filenames) { cr_repomd_record_rename_file(pri_xml_rec, NULL); cr_repomd_record_rename_file(fil_xml_rec, NULL); cr_repomd_record_rename_file(oth_xml_rec, NULL); cr_repomd_record_rename_file(pri_db_rec, NULL); cr_repomd_record_rename_file(fil_db_rec, NULL); cr_repomd_record_rename_file(oth_db_rec, NULL); cr_repomd_record_rename_file(pri_zck_rec, NULL); cr_repomd_record_rename_file(fil_zck_rec, NULL); cr_repomd_record_rename_file(oth_zck_rec, NULL); cr_repomd_record_rename_file(groupfile_rec, NULL); cr_repomd_record_rename_file(compressed_groupfile_rec, NULL); cr_repomd_record_rename_file(groupfile_zck_rec, NULL); cr_repomd_record_rename_file(update_info_rec, NULL); cr_repomd_record_rename_file(update_info_zck_rec, NULL); cr_repomd_record_rename_file(pkgorigins_rec, NULL); cr_repomd_record_rename_file(pkgorigins_zck_rec, NULL); #ifdef WITH_LIBMODULEMD cr_repomd_record_rename_file(modulemd_rec, NULL); cr_repomd_record_rename_file(modulemd_zck_rec, NULL); #endif /* WITH_LIBMODULEMD */ } // Gen repomd.xml content cr_Repomd *repomd_obj = cr_repomd_new(); cr_repomd_set_record(repomd_obj, pri_xml_rec); cr_repomd_set_record(repomd_obj, fil_xml_rec); cr_repomd_set_record(repomd_obj, oth_xml_rec); cr_repomd_set_record(repomd_obj, pri_db_rec); cr_repomd_set_record(repomd_obj, fil_db_rec); cr_repomd_set_record(repomd_obj, oth_db_rec); cr_repomd_set_record(repomd_obj, pri_zck_rec); cr_repomd_set_record(repomd_obj, fil_zck_rec); cr_repomd_set_record(repomd_obj, oth_zck_rec); cr_repomd_set_record(repomd_obj, groupfile_rec); cr_repomd_set_record(repomd_obj, compressed_groupfile_rec); cr_repomd_set_record(repomd_obj, groupfile_zck_rec); cr_repomd_set_record(repomd_obj, update_info_rec); cr_repomd_set_record(repomd_obj, update_info_zck_rec); cr_repomd_set_record(repomd_obj, pkgorigins_rec); cr_repomd_set_record(repomd_obj, pkgorigins_zck_rec); #ifdef WITH_LIBMODULEMD cr_repomd_set_record(repomd_obj, modulemd_rec); cr_repomd_set_record(repomd_obj, modulemd_zck_rec); #endif /* WITH_LIBMODULEMD */ char *repomd_xml = cr_xml_dump_repomd(repomd_obj, NULL); cr_repomd_free(repomd_obj); if (repomd_xml) { gchar *repomd_path = g_strconcat(cmd_options->tmp_out_repo, "repomd.xml", NULL); FILE *frepomd = fopen(repomd_path, "w"); if (frepomd) { fputs(repomd_xml, frepomd); fclose(frepomd); } else g_critical("Cannot open file: %s", repomd_path); g_free(repomd_path); } else g_critical("Generate of repomd.xml failed"); // Move files from out_repo into tmp_out_repo g_debug("Moving data from %s", cmd_options->out_repo); if (g_file_test(cmd_options->out_repo, G_FILE_TEST_EXISTS)) { // Delete old metadata g_debug("Removing old metadata from %s", cmd_options->out_repo); cr_remove_metadata_classic(cmd_options->out_dir, 0, NULL); // Move files from out_repo to tmp_out_repo GDir *dirp; dirp = g_dir_open (cmd_options->out_repo, 0, NULL); if (!dirp) { g_critical("Cannot open directory: %s", cmd_options->out_repo); exit(1); } const gchar *filename; while ((filename = g_dir_read_name(dirp))) { gchar *full_path = g_strconcat(cmd_options->out_repo, filename, NULL); gchar *new_full_path = g_strconcat(cmd_options->tmp_out_repo, filename, NULL); // Do not override new file with the old one if (g_file_test(new_full_path, G_FILE_TEST_EXISTS)) { g_debug("Skip move of: %s -> %s (the destination file already exists)", full_path, new_full_path); g_debug("Removing: %s", full_path); g_remove(full_path); g_free(full_path); g_free(new_full_path); continue; } if (g_rename(full_path, new_full_path) == -1) g_critical("Cannot move file %s -> %s", full_path, new_full_path); else g_debug("Moved %s -> %s", full_path, new_full_path); g_free(full_path); g_free(new_full_path); } g_dir_close(dirp); // Remove out_repo if (g_rmdir(cmd_options->out_repo) == -1) { g_critical("Cannot remove %s", cmd_options->out_repo); } else { g_debug("Old out repo %s removed", cmd_options->out_repo); } } // Rename tmp_out_repo to out_repo if (g_rename(cmd_options->tmp_out_repo, cmd_options->out_repo) == -1) { g_critical("Cannot rename %s -> %s", cmd_options->tmp_out_repo, cmd_options->out_repo); } else { g_debug("Renamed %s -> %s", cmd_options->tmp_out_repo, cmd_options->out_repo); } // Clean up g_free(repomd_xml); g_free(pri_xml_filename); g_free(fil_xml_filename); g_free(oth_xml_filename); g_free(update_info_filename); return 1; } int main(int argc, char **argv) { _cleanup_error_free_ GError *tmp_err = NULL; // Parse arguments struct CmdOptions *cmd_options; cmd_options = parse_arguments(&argc, &argv); if (!cmd_options) { return 1; } // Set logging cr_setup_logging(FALSE, cmd_options->verbose); // Check arguments if (!check_arguments(cmd_options)) { free_options(cmd_options); return 1; } if (cmd_options->version) { printf("Version: %s\n", cr_version_string_with_features()); free_options(cmd_options); exit(0); } if (g_slist_length(cmd_options->repo_list) < 1) { free_options(cmd_options); g_printerr("Usage: %s [OPTION...] --repo=url --repo=url\n\n" "%s: take 2 or more repositories and merge their " "metadata into a new repo\n\n", cr_get_filename(argv[0]), cr_get_filename(argv[0])); return 1; } g_debug("Version: %s", cr_version_string_with_features()); // Prepare out_repo if (g_file_test(cmd_options->tmp_out_repo, G_FILE_TEST_EXISTS)) { g_critical("Temporary repodata directory: %s already exists! (" "Another createrepo process is running?)", cmd_options->tmp_out_repo); free_options(cmd_options); return 1; } if (g_mkdir_with_parents (cmd_options->tmp_out_repo, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { g_critical("Error while creating temporary repodata directory %s: %s", cmd_options->tmp_out_repo, g_strerror(errno)); free_options(cmd_options); return 1; } // Download repos GSList *local_repos = NULL; GSList *element = NULL; gchar *groupfile = NULL; gboolean cr_download_failed = FALSE; for (element = cmd_options->repo_list; element; element = g_slist_next(element)) { struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) element->data, TRUE, NULL); if (!loc) { g_warning("Downloading of repodata failed: %s", (gchar *) element->data); cr_download_failed = TRUE; break; } local_repos = g_slist_prepend(local_repos, loc); } if (cr_download_failed) { // Remove downloaded metadata and free structures for (element = local_repos; element; element = g_slist_next(element)) { struct cr_MetadataLocation *loc = (struct cr_MetadataLocation *) element->data; cr_metadatalocation_free(loc); } return 1; } // Groupfile // XXX: There must be a better logic if (!cmd_options->groupfile) { // Use first groupfile you find for (element = local_repos; element; element = g_slist_next(element)) { struct cr_MetadataLocation *loc; loc = (struct cr_MetadataLocation *) element->data; if (!groupfile){ if (loc->additional_metadata){ GSList *loc_groupfile = (g_slist_find_custom(loc->additional_metadata, "group", cr_cmp_metadatum_type)); if (loc_groupfile) { cr_Metadatum *g = loc_groupfile->data; if (cr_copy_file(g->name, cmd_options->tmp_out_repo, &tmp_err)) { groupfile = g_strconcat(cmd_options->tmp_out_repo, cr_get_filename(g->name), NULL); g_debug("Using groupfile: %s", groupfile); break; } else { g_warning("Groupfile %s from repo: %s cannot be used: %s\n", g->name, loc->original_url, tmp_err->message); g_clear_error(&tmp_err); } } } } } } else { // Use groupfile specified by user if (cr_copy_file(cmd_options->groupfile, cmd_options->tmp_out_repo, &tmp_err)) { groupfile = g_strconcat(cmd_options->tmp_out_repo, cr_get_filename(cmd_options->groupfile), NULL); g_debug("Using user specified groupfile: %s", groupfile); } else { g_critical("Cannot copy groupfile %s: %s", cmd_options->groupfile, tmp_err->message); return 1; } } // Load noarch repo cr_Metadata *noarch_metadata = NULL; // cr_metadata_hashtable(noarch_metadata): // Key: CR_HT_KEY_FILENAME aka pkg->location_href // Value: package if (cmd_options->noarch_repo_url) { struct cr_MetadataLocation *noarch_ml; noarch_ml = cr_locate_metadata(cmd_options->noarch_repo_url, TRUE, NULL); if (!noarch_ml) { g_critical("Cannot locate noarch repo: %s", cmd_options->noarch_repo_url); return 1; } noarch_metadata = cr_metadata_new(CR_HT_KEY_FILENAME, 0, NULL); // Base paths in output of original createrepo doesn't have trailing '/' gchar *noarch_repopath = cr_normalize_dir_path(noarch_ml->original_url); if (noarch_repopath && strlen(noarch_repopath) > 1) { noarch_repopath[strlen(noarch_repopath)-1] = '\0'; } g_debug("Loading noarch_repo: %s", noarch_repopath); if (cr_metadata_load_xml(noarch_metadata, noarch_ml, NULL) != CRE_OK) { g_critical("Cannot load noarch repo: \"%s\"", noarch_ml->repomd); cr_metadata_free(noarch_metadata); // TODO cleanup cr_metadatalocation_free(noarch_ml); return 1; } // Fill basepath - set proper base path for all packages in noarch hastable GHashTableIter iter; gpointer p_key, p_value; g_hash_table_iter_init (&iter, cr_metadata_hashtable(noarch_metadata)); while (g_hash_table_iter_next (&iter, &p_key, &p_value)) { cr_Package *pkg = (cr_Package *) p_value; if (!pkg->location_base) pkg->location_base = g_string_chunk_insert(pkg->chunk, noarch_repopath); } g_free(noarch_repopath); cr_metadatalocation_free(noarch_ml); } // Prepare Koji stuff if needed struct KojiMergedReposStuff *koji_stuff = NULL; if (cmd_options->koji) koji_stuff_prepare(&koji_stuff, cmd_options, local_repos); else if (cmd_options->pkgorigins) pkgorigins_prepare(&koji_stuff, cmd_options->tmp_out_repo); // Load metadata long loaded_packages; GHashTable *merged_hashtable = new_merged_metadata_hashtable(); // merged_hashtable: // Key: pkg->name // Value: GSList with packages with the same name #ifdef WITH_LIBMODULEMD g_autoptr(ModulemdModuleIndex) merged_index = NULL; #endif loaded_packages = merge_repos(merged_hashtable, #ifdef WITH_LIBMODULEMD &merged_index, #endif /* WITH_LIBMODULEMD */ local_repos, cmd_options->arch_list, cmd_options->merge_method, noarch_metadata ? cr_metadata_hashtable(noarch_metadata) : NULL, koji_stuff, cmd_options->omit_baseurl, cmd_options->repo_prefix_search, cmd_options->repo_prefix_replace ); // Destroy koji stuff - we have to close pkgorigins file before dump if (cmd_options->koji || cmd_options->pkgorigins) koji_stuff_destroy(&koji_stuff); // Dump metadata dump_merged_metadata(merged_hashtable, loaded_packages, groupfile, #ifdef WITH_LIBMODULEMD merged_index, #endif cmd_options); // Remove downloaded repos and free repo location structures for (element = local_repos; element; element = g_slist_next(element)) { struct cr_MetadataLocation *loc = (struct cr_MetadataLocation *) element->data; cr_metadatalocation_free(loc); } g_slist_free (local_repos); // Cleanup g_free(groupfile); cr_metadata_free(noarch_metadata); destroy_merged_metadata_hashtable(merged_hashtable); free_options(cmd_options); return 0; } createrepo_c-0.17.0/src/mergerepo_c.h000066400000000000000000000047371400672373200175000ustar00rootroot00000000000000/* * Copyright (C) 2018 Red Hat, Inc. * * Licensed under the GNU Lesser General Public License Version 2.1 * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __C_CREATEREPOLIB_MERGEREPO_C_H__ #define __C_CREATEREPOLIB_MERGEREPO_C_H__ #ifdef __cplusplus extern "C" { #endif #include "compression_wrapper.h" #define DEFAULT_DB_COMPRESSION_TYPE CR_CW_BZ2_COMPRESSION #define DEFAULT_GROUPFILE_COMPRESSION_TYPE CR_CW_GZ_COMPRESSION typedef enum { MM_DEFAULT, // NA == Name, Arch MM_FIRST_FROM_IDENTICAL_NA = MM_DEFAULT, MM_NEWEST_FROM_IDENTICAL_NA, MM_WITH_HIGHEST_NEVRA, MM_FIRST_FROM_IDENTICAL_NEVRA, MM_ALL_WITH_IDENTICAL_NEVRA } MergeMethod; struct CmdOptions { // Items filled by cmd option parser gboolean version; char **repos; char *repo_prefix_search; char *repo_prefix_replace; char *archlist; gboolean database; gboolean no_database; gboolean verbose; char *outputdir; char *outputrepo; gboolean nogroups; gboolean noupdateinfo; char *compress_type; gboolean zck_compression; char *zck_dict_dir; char *merge_method_str; gboolean all; char *noarch_repo_url; gboolean unique_md_filenames; gboolean simple_md_filenames; gboolean omit_baseurl; // Koji mergerepos specific options gboolean koji; gboolean koji_simple; gboolean pkgorigins; gboolean arch_expand; char *groupfile; char *blocked; // Items filled by check_arguments() char *out_dir; char *out_repo; char *tmp_out_repo; GSList *repo_list; GSList *arch_list; cr_CompressionType db_compression_type; cr_CompressionType groupfile_compression_type; MergeMethod merge_method; }; #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_MERGEREPO_C_H__ */ createrepo_c-0.17.0/src/metadata_internal.h000066400000000000000000000025331400672373200206550ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_METADATA_INTERNAL_H__ #define __C_CREATEREPOLIB_METADATA_INTERNAL_H__ #ifdef __cplusplus extern "C" { #endif #ifdef WITH_LIBMODULEMD #include #include "load_metadata.h" /** Return module metadata from a cr_Metadata * @param md cr_Metadata object. * @return Pointer to internal ModulemdModuleIndex. */ ModulemdModuleIndex *cr_metadata_modulemd(cr_Metadata *md); #endif /* WITH_LIBMODULEMD */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_METADATA_INTERNAL_H__ */ createrepo_c-0.17.0/src/misc.c000066400000000000000000001204011400672373200161220ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #define _XOPEN_SOURCE 500 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cleanup.h" #include "error.h" #include "misc.h" #include "version.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define BUFFER_SIZE 4096 #define xstr(s) str(s) #define str(s) #s const char * cr_flag_to_str(gint64 flags) { flags &= 0xf; switch(flags) { case 0: return NULL; case 2: return "LT"; case 4: return "GT"; case 8: return "EQ"; case 10: return "LE"; case 12: return "GE"; default: return NULL; } } /* * BE CAREFUL! * * In case chunk param is NULL: * Returned structure had all strings malloced!!! * Be so kind and don't forget use free() for all its element, before end of * structure lifecycle. * * In case chunk is pointer to a GStringChunk: * Returned structure had all string inserted in the passed chunk. * */ cr_EVR * cr_str_to_evr(const char *string, GStringChunk *chunk) { cr_EVR *evr = g_new0(cr_EVR, 1); evr->epoch = NULL; evr->version = NULL; evr->release = NULL; if (!string || !(strlen(string))) { return evr; } const char *ptr; // These names are totally self explaining const char *ptr2; // // Epoch gboolean bad_epoch = FALSE; ptr = strstr(string, ":"); if (ptr) { // Check if epoch str is a number char *p = NULL; strtol(string, &p, 10); if (p == ptr) { // epoch str seems to be a number size_t len = ptr - string; if (len) { if (chunk) { evr->epoch = g_string_chunk_insert_len(chunk, string, len); } else { evr->epoch = g_strndup(string, len); } } } else { // Bad (non-numerical) epoch bad_epoch = TRUE; } } else { // There is no epoch ptr = (char*) string-1; } if (!evr->epoch && !bad_epoch) { if (chunk) { evr->epoch = g_string_chunk_insert_const(chunk, "0"); } else { evr->epoch = g_strdup("0"); } } // Version + release ptr2 = strstr(ptr+1, "-"); if (ptr2) { // Version size_t version_len = ptr2 - (ptr+1); if (chunk) { evr->version = g_string_chunk_insert_len(chunk, ptr+1, version_len); } else { evr->version = g_strndup(ptr+1, version_len); } // Release size_t release_len = strlen(ptr2+1); if (release_len) { if (chunk) { evr->release = g_string_chunk_insert_len(chunk, ptr2+1, release_len); } else { evr->release = g_strndup(ptr2+1, release_len); } } } else { // Release is not here, just version if (chunk) { evr->version = g_string_chunk_insert_const(chunk, ptr+1); } else { evr->version = g_strdup(ptr+1); } } return evr; } void cr_evr_free(cr_EVR *evr) { if (!evr) return; g_free(evr->epoch); g_free(evr->version); g_free(evr->release); g_free(evr); } /* inline int cr_is_primary(const char *filename) { This optimal piece of code cannot be used because of yum... We must match any string that contains "bin/" in dirname Response to my question from packaging team: .... It must still contain that. Atm. it's defined as taking anything with 'bin/' in the path. The idea was that it'd match /usr/kerberos/bin/ and /opt/blah/sbin. So that is what all versions of createrepo generate, and what yum all versions of yum expect to be generated. We can't change one side, without breaking the expectation of the other. There have been plans to change the repodata, and one of the changes would almost certainly be in how files are represented ... likely via. lists of "known" paths, that can be computed at createrepo time. if (!strncmp(filename, "/bin/", 5)) { return 1; } if (!strncmp(filename, "/sbin/", 6)) { return 1; } if (!strncmp(filename, "/etc/", 5)) { return 1; } if (!strncmp(filename, "/usr/", 5)) { if (!strncmp(filename+5, "bin/", 4)) { return 1; } if (!strncmp(filename+5, "sbin/", 5)) { return 1; } if (!strcmp(filename+5, "lib/sendmail")) { return 1; } } if (!strncmp(filename, "/etc/", 5)) { return 1; } if (!strcmp(filename, "/usr/lib/sendmail")) { return 1; } if (strstr(filename, "bin/")) { return 1; } return 0; } */ #define VAL_LEN 4 // Len of numeric values in rpm struct cr_HeaderRangeStruct cr_get_header_byte_range(const char *filename, GError **err) { /* Values readed by fread are 4 bytes long and stored as big-endian. * So there is htonl function to convert this big-endian number into host * byte order. */ struct cr_HeaderRangeStruct results; assert(!err || *err == NULL); results.start = 0; results.end = 0; // Open file FILE *fp = fopen(filename, "rb"); if (!fp) { g_debug("%s: Cannot open file %s (%s)", __func__, filename, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open %s: %s", filename, g_strerror(errno)); return results; } // Get header range if (fseek(fp, 104, SEEK_SET) != 0) { g_debug("%s: fseek fail on %s (%s)", __func__, filename, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot seek over %s: %s", filename, g_strerror(errno)); fclose(fp); return results; } unsigned int sigindex = 0; unsigned int sigdata = 0; if (fread(&sigindex, VAL_LEN, 1, fp) != 1) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fread() error on %s: %s", filename, g_strerror(errno)); fclose(fp); return results; } sigindex = htonl(sigindex); if (fread(&sigdata, VAL_LEN, 1, fp) != 1) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fread() error on %s: %s", filename, g_strerror(errno)); fclose(fp); return results; } sigdata = htonl(sigdata); unsigned int sigindexsize = sigindex * 16; unsigned int sigsize = sigdata + sigindexsize; unsigned int disttoboundary = sigsize % 8; if (disttoboundary) { disttoboundary = 8 - disttoboundary; } unsigned int hdrstart = 112 + sigsize + disttoboundary; fseek(fp, hdrstart, SEEK_SET); fseek(fp, 8, SEEK_CUR); unsigned int hdrindex = 0; unsigned int hdrdata = 0; if (fread(&hdrindex, VAL_LEN, 1, fp) != 1) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fread() error on %s: %s", filename, g_strerror(errno)); fclose(fp); return results; } hdrindex = htonl(hdrindex); if (fread(&hdrdata, VAL_LEN, 1, fp) != 1) { g_set_error(err, ERR_DOMAIN, CRE_IO, "fread() error on %s: %s", filename, g_strerror(errno)); fclose(fp); return results; } hdrdata = htonl(hdrdata); unsigned int hdrindexsize = hdrindex * 16; unsigned int hdrsize = hdrdata + hdrindexsize + 16; unsigned int hdrend = hdrstart + hdrsize; fclose(fp); // Check sanity if (hdrend < hdrstart) { g_debug("%s: sanity check fail on %s (%d > %d))", __func__, filename, hdrstart, hdrend); g_set_error(err, ERR_DOMAIN, CRE_ERROR, "sanity check error on %s (hdrstart: %d > hdrend: %d)", filename, hdrstart, hdrend); return results; } results.start = hdrstart; results.end = hdrend; return results; } char * cr_get_filename(const char *filepath) { char *filename; if (!filepath) return NULL; filename = (char *) filepath; size_t x = 0; while (filepath[x] != '\0') { if (filepath[x] == '/') { filename = (char *) filepath+(x+1); } x++; } return filename; } char * cr_get_cleaned_href(const char *filepath) { char *filename; if (!filepath) return NULL; filename = (char *) filepath; while (filename[0] == '.' && filename[1] == '/') filename += 2; return filename; } gboolean cr_copy_file(const char *src, const char *in_dst, GError **err) { size_t readed; char buf[BUFFER_SIZE]; _cleanup_free_ gchar *dst = NULL; _cleanup_file_fclose_ FILE *orig = NULL; _cleanup_file_fclose_ FILE *new = NULL; assert(src); assert(in_dst); assert(!err || *err == NULL); // If destination is dir use filename from src if (g_str_has_suffix(in_dst, "/")) dst = g_strconcat(in_dst, cr_get_filename(src), NULL); else dst = g_strdup(in_dst); // Open src file if ((orig = fopen(src, "rb")) == NULL) { g_debug("%s: Cannot open source file %s (%s)", __func__, src, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open file %s: %s", src, g_strerror(errno)); return FALSE; } // Open dst file if ((new = fopen(dst, "wb")) == NULL) { g_debug("%s: Cannot open destination file %s (%s)", __func__, dst, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open file %s: %s", dst, g_strerror(errno)); return FALSE; } // Copy content from src -> dst while ((readed = fread(buf, 1, BUFFER_SIZE, orig)) > 0) { if (readed != BUFFER_SIZE && ferror(orig)) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Error while read %s: %s", src, g_strerror(errno)); return FALSE; } if (fwrite(buf, 1, readed, new) != readed) { g_debug("%s: Error while copy %s -> %s (%s)", __func__, src, dst, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Error while write %s: %s", dst, g_strerror(errno)); return FALSE; } } return TRUE; } int cr_compress_file_with_stat(const char *src, const char *in_dst, cr_CompressionType compression, cr_ContentStat *stat, const char *zck_dict_dir, gboolean zck_auto_chunk, GError **err) { int ret = CRE_OK; int readed; char buf[BUFFER_SIZE]; CR_FILE *orig = NULL; CR_FILE *new = NULL; gchar *dst = (gchar *) in_dst; GError *tmp_err = NULL; assert(src); assert(!err || *err == NULL); const char *c_suffix = cr_compression_suffix(compression); // Src must be a file NOT a directory if (!g_file_test(src, G_FILE_TEST_IS_REGULAR)) { g_debug("%s: Source (%s) must be a regular file!", __func__, src); g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "Not a regular file: %s", src); return CRE_NOFILE; } if (!dst) { // If destination is NULL, use src + compression suffix dst = g_strconcat(src, c_suffix, NULL); } else if (g_str_has_suffix(dst, "/")) { // If destination is dir use filename from src + compression suffix dst = g_strconcat(dst, cr_get_filename(src), c_suffix, NULL); } else if (c_suffix && !g_str_has_suffix(dst, c_suffix)) { // If destination is missing compression suffix or has a different one, use specified compression suffix cr_CompressionType old_type = cr_detect_compression(dst, &tmp_err); if (tmp_err) { g_debug("%s: Unable to detect compression type of %s, using the filename as is.", __func__, dst); g_clear_error(&tmp_err); } else if (old_type == CR_CW_NO_COMPRESSION) { dst = g_strconcat(dst, c_suffix, NULL); } else { _cleanup_free_ gchar *tmp_file = g_strndup(dst, strlen(dst) - strlen(cr_compression_suffix(old_type))); dst = g_strconcat(tmp_file, c_suffix, NULL); } } int mode = CR_CW_AUTO_DETECT_COMPRESSION; orig = cr_open(src, CR_CW_MODE_READ, mode, &tmp_err); if (!orig) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", src); return ret; } _cleanup_free_ gchar *dict = NULL; size_t dict_size = 0; if (compression == CR_CW_ZCK_COMPRESSION && zck_dict_dir) { /* Find zdict */ _cleanup_free_ gchar *file_basename = NULL; if (dst) { _cleanup_free_ gchar *dict_base = NULL; if (g_str_has_suffix(dst, ".zck")) dict_base = g_strndup(dst, strlen(dst)-4); else dict_base = g_strdup(dst); file_basename = g_path_get_basename(dict_base); } else { file_basename = g_path_get_basename(src); } _cleanup_free_ gchar *dict_file = cr_get_dict_file(zck_dict_dir, file_basename); /* Read dictionary from file */ if (dict_file && !g_file_get_contents(dict_file, &dict, &dict_size, &tmp_err)) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Error reading zchunk dict %s: %s", dict_file, tmp_err->message); ret = CRE_IO; goto compress_file_cleanup; } } new = cr_sopen(dst, CR_CW_MODE_WRITE, compression, stat, &tmp_err); if (tmp_err) { g_debug("%s: Cannot open destination file %s", __func__, dst); g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", dst); ret = CRE_IO; goto compress_file_cleanup; } if (compression == CR_CW_ZCK_COMPRESSION) { if (dict && cr_set_dict(new, dict, dict_size, &tmp_err) != CRE_OK) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Unable to set zdict for %s: ", dst); goto compress_file_cleanup; } if (zck_auto_chunk && cr_set_autochunk(new, TRUE, &tmp_err) != CRE_OK) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Unable to set auto-chunking for %s: ", dst); goto compress_file_cleanup; } } while ((readed = cr_read(orig, buf, BUFFER_SIZE, &tmp_err)) > 0) { cr_write(new, buf, readed, &tmp_err); if (tmp_err) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Unable to write to %s: ", dst); goto compress_file_cleanup; } } compress_file_cleanup: if (dst != in_dst) g_free(dst); if (orig) cr_close(orig, NULL); if (new) cr_close(new, NULL); return ret; } int cr_decompress_file_with_stat(const char *src, const char *in_dst, cr_CompressionType compression, cr_ContentStat *stat, GError **err) { int ret = CRE_OK; int readed; char buf[BUFFER_SIZE]; FILE *new = NULL; CR_FILE *orig = NULL; gchar *dst = (gchar *) in_dst; GError *tmp_err = NULL; assert(src); assert(!err || *err == NULL); // Src must be a file NOT a directory if (!g_file_test(src, G_FILE_TEST_IS_REGULAR)) { g_debug("%s: Source (%s) must be a regular file!", __func__, src); g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "Not a regular file: %s", src); return CRE_NOFILE; } if (compression == CR_CW_AUTO_DETECT_COMPRESSION || compression == CR_CW_UNKNOWN_COMPRESSION) { compression = cr_detect_compression(src, NULL); } if (compression == CR_CW_UNKNOWN_COMPRESSION) { g_set_error(err, ERR_DOMAIN, CRE_UNKNOWNCOMPRESSION, "Cannot detect compression type"); return CRE_UNKNOWNCOMPRESSION; } const char *c_suffix = cr_compression_suffix(compression); if (!in_dst || g_str_has_suffix(in_dst, "/")) { char *filename = cr_get_filename(src); if (g_str_has_suffix(filename, c_suffix)) { filename = g_strndup(filename, strlen(filename) - strlen(c_suffix)); } else { filename = g_strconcat(filename, ".decompressed", NULL); } if (!in_dst) { // in_dst is NULL, use same dir as src char *src_dir = g_strndup(src, strlen(src) - strlen(cr_get_filename(src))); dst = g_strconcat(src_dir, filename, NULL); g_free(src_dir); } else { // in_dst is dir dst = g_strconcat(in_dst, filename, NULL); } g_free(filename); } orig = cr_sopen(src, CR_CW_MODE_READ, compression, stat, &tmp_err); if (orig == NULL) { g_debug("%s: Cannot open source file %s", __func__, src); g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", src); ret = CRE_IO; goto compress_file_cleanup; } new = fopen(dst, "wb"); if (!new) { g_debug("%s: Cannot open destination file %s (%s)", __func__, dst, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open %s: %s", src, g_strerror(errno)); ret = CRE_IO; goto compress_file_cleanup; } while ((readed = cr_read(orig, buf, BUFFER_SIZE, &tmp_err)) > 0) { if (tmp_err) { g_debug("%s: Error while copy %s -> %s (%s)", __func__, src, dst, tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "Error while read %s: ", src); ret = CRE_IO; goto compress_file_cleanup; } if (fwrite(buf, 1, readed, new) != (size_t) readed) { g_debug("%s: Error while copy %s -> %s (%s)", __func__, src, dst, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Error while write %s: %s", dst, g_strerror(errno)); ret = CRE_IO; goto compress_file_cleanup; } } compress_file_cleanup: if (dst != in_dst) g_free(dst); if (orig) cr_close(orig, NULL); if (new) fclose(new); return ret; } int cr_download(CURL *in_handle, const char *url, const char *in_dst, GError **err) { CURL *handle = NULL; CURLcode rcode; char errorbuf[CURL_ERROR_SIZE]; _cleanup_free_ gchar *dst = NULL; _cleanup_file_fclose_ FILE *file = NULL; assert(in_handle); assert(!err || *err == NULL); // If destination is dir use filename from src if (g_str_has_suffix(in_dst, "/")) dst = g_strconcat(in_dst, cr_get_filename(url), NULL); else if (g_file_test(in_dst, G_FILE_TEST_IS_DIR)) dst = g_strconcat(in_dst, "/", cr_get_filename(url), NULL); else dst = g_strdup(in_dst); // Open dst file file = fopen(dst, "wb"); if (!file) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open %s: %s", dst, g_strerror(errno)); remove(dst); return CRE_IO; } // Dup the input handle handle = curl_easy_duphandle(in_handle); // Set error buffer errorbuf[0] = '\0'; rcode = curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorbuf); if (rcode != CURLE_OK) { g_set_error(err, ERR_DOMAIN, CRE_CURL, "curl_easy_setopt failed(CURLOPT_ERRORBUFFER): %s", curl_easy_strerror(rcode)); return CRE_CURL; } // Set URL rcode = curl_easy_setopt(handle, CURLOPT_URL, url); if (rcode != CURLE_OK) { g_set_error(err, ERR_DOMAIN, CRE_CURL, "curl_easy_setopt failed(CURLOPT_URL): %s", curl_easy_strerror(rcode)); remove(dst); return CRE_CURL; } // Set output file descriptor rcode = curl_easy_setopt(handle, CURLOPT_WRITEDATA, file); if (rcode != CURLE_OK) { g_set_error(err, ERR_DOMAIN, CRE_CURL, "curl_easy_setopt(CURLOPT_WRITEDATA) failed: %s", curl_easy_strerror(rcode)); remove(dst); return CRE_CURL; } // Download the file rcode = curl_easy_perform(handle); if (rcode != CURLE_OK) { g_set_error(err, ERR_DOMAIN, CRE_CURL, "curl_easy_perform failed: %s: %s", curl_easy_strerror(rcode), errorbuf); remove(dst); return CRE_CURL; } g_debug("%s: Successfully downloaded: %s", __func__, dst); return CRE_OK; } gboolean cr_better_copy_file(const char *src, const char *in_dst, GError **err) { GError *tmp_err = NULL; assert(!err || *err == NULL); if (!strstr(src, "://")) // Probably local path return cr_copy_file(src, in_dst, err); CURL *handle = curl_easy_init(); cr_download(handle, src, in_dst, &tmp_err); curl_easy_cleanup(handle); if (tmp_err) { g_debug("%s: Error while downloading %s: %s", __func__, src, tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "Error while downloading %s: ", src); return FALSE; } return TRUE; } int cr_remove_dir_cb(const char *fpath, G_GNUC_UNUSED const struct stat *sb, G_GNUC_UNUSED int typeflag, G_GNUC_UNUSED struct FTW *ftwbuf) { int rv = remove(fpath); if (rv) g_warning("%s: Cannot remove: %s: %s", __func__, fpath, g_strerror(errno)); return rv; } int cr_remove_dir(const char *path, GError **err) { int ret; assert(!err || *err == NULL); ret = nftw(path, cr_remove_dir_cb, 64, FTW_DEPTH | FTW_PHYS); if (ret != 0) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot remove dir %s: %s", path, g_strerror(errno)); return CRE_IO; } return CRE_OK; } // Return path with exactly one trailing '/' char * cr_normalize_dir_path(const char *path) { char *normalized = NULL; if (!path) return normalized; int i = strlen(path); if (i == 0) { return g_strdup("./"); } do { // Skip all trailing '/' i--; } while (i >= 0 && path[i] == '/'); normalized = g_strndup(path, i+2); if (normalized[i+1] != '/') { normalized[i+1] = '/'; } return normalized; } struct cr_Version cr_str_to_version(const char *str) { char *endptr; const char *ptr = str; struct cr_Version ver; ver.major = 0; ver.minor = 0; ver.patch = 0; ver.suffix = NULL; if (!str || str[0] == '\0') { return ver; } // Major chunk ver.major = strtol(ptr, &endptr, 10); if (!endptr || endptr[0] == '\0') { // Whole string has been converted successfully return ver; } else { if (endptr[0] == '.') { // '.' is supposed to be delimiter -> skip it and go to next chunk ptr = endptr+1; } else { ver.suffix = g_strdup(endptr); return ver; } } // Minor chunk ver.minor = strtol(ptr, &endptr, 10); if (!endptr || endptr[0] == '\0') { // Whole string has been converted successfully return ver; } else { if (endptr[0] == '.') { // '.' is supposed to be delimiter -> skip it and go to next chunk ptr = endptr+1; } else { ver.suffix = g_strdup(endptr); return ver; } } // Patch chunk ver.patch = strtol(ptr, &endptr, 10); if (!endptr || endptr[0] == '\0') { // Whole string has been converted successfully return ver; } else { if (endptr[0] == '.') { // '.' is supposed to be delimiter -> skip it and go to next chunk ptr = endptr+1; } else { ver.suffix = g_strdup(endptr); return ver; } } return ver; } static int cr_compare_values(const char *str1, const char *str2) { if (!str1 && !str2) return 0; else if (str1 && !str2) return 1; else if (!str1 && str2) return -1; return rpmvercmp(str1, str2); } // Return values: // 0 - versions are same // 1 - first string is bigger version // 2 - second string is bigger version // Examples: // "6.3.2azb" > "6.3.2abc" // "2.1" < "2.1.3" int cr_cmp_version_str(const char* str1, const char *str2) { int rc = cr_compare_values(str1 ? str1 : "", str2 ? str2 : ""); if (rc == -1) rc = 2; return rc; } void cr_null_log_fn(G_GNUC_UNUSED const gchar *log_domain, G_GNUC_UNUSED GLogLevelFlags log_level, G_GNUC_UNUSED const gchar *message, G_GNUC_UNUSED gpointer user_data) { return; } void cr_log_fn(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { gint hidden_log_levels = GPOINTER_TO_INT(user_data); if (log_level & hidden_log_levels) return; switch(log_level) { case G_LOG_LEVEL_ERROR: if (log_domain) g_printerr("%s: ", log_domain); g_printerr("Error: %s\n", message); break; case G_LOG_LEVEL_CRITICAL: if (log_domain) g_printerr("%s: ", log_domain); g_printerr("Critical: %s\n", message); break; case G_LOG_LEVEL_WARNING: if (log_domain) g_printerr("%s: ", log_domain); g_printerr("Warning: %s\n", message); break; case G_LOG_LEVEL_DEBUG: { time_t rawtime; struct tm * timeinfo; char buffer[80]; time ( &rawtime ); timeinfo = localtime ( &rawtime ); strftime (buffer, 80, "%H:%M:%S", timeinfo); //if (log_domain) g_printerr("%s: ", log_domain); g_printerr("%s: %s\n", buffer, message); break; } default: printf("%s\n", message); } return; } void cr_slist_free_full(GSList *list, GDestroyNotify free_f) { g_slist_free_full(list, free_f); } void cr_queue_free_full(GQueue *queue, GDestroyNotify free_f) { g_queue_free_full(queue, free_f); } cr_NEVRA * cr_split_rpm_filename(const char *filename) { cr_NEVRA *nevra = NULL; gchar *str, *epoch = NULL; size_t len; filename = cr_get_filename(filename); if (!filename) return NULL; str = g_strdup(filename); // N-V-R.rpm:E if (strchr(str, ':')) { gchar **filename_epoch = g_strsplit(str, ":", 2); if (g_str_has_suffix(filename_epoch[0], ".rpm")) { g_free(str); str = filename_epoch[0]; epoch = filename_epoch[1]; } else { g_strfreev(filename_epoch); } } len = strlen(str); // Get rid off .rpm suffix if (len >= 4 && !strcmp(str+(len-4), ".rpm")) { len -= 4; str[len] = '\0'; } nevra = cr_str_to_nevra(str); g_free(str); if (epoch) { g_free(nevra->epoch); nevra->epoch = epoch; } return nevra; } /** Split N-V-R:E or E:N-V-R or N-E:V-R */ cr_NEVR * cr_str_to_nevr(const char *instr) { gchar *nvr = NULL; gchar *epoch = NULL; gchar **nvr_epoch_list = NULL; cr_NEVR *nevr = NULL; size_t len; int i; if (!instr) return NULL; // 1) // Try to split by ':' // If we have N-V-R:E or E:N-V-R then nvr and epoch will be filled // If we have N-E:V-R or N-V-R then only nvr will be filed nvr_epoch_list = g_strsplit(instr, ":", 2); if (!nvr_epoch_list || !(*nvr_epoch_list)) { g_strfreev(nvr_epoch_list); return NULL; } nvr = nvr_epoch_list[0]; epoch = nvr_epoch_list[1]; // May be NULL if (epoch && strchr(epoch, '-')) { if (!strchr(nvr, '-')) { // Switch nvr and epoch char *tmp = nvr; nvr = epoch; epoch = tmp; } else { // Probably the N-E:V-R format, handle it after the split g_free(nvr); g_free(epoch); nvr = g_strdup(instr); epoch = NULL; } } g_free(nvr_epoch_list); // 2) // Now split the nvr by the '-' into three parts nevr = g_new0(cr_NEVR, 1); len = strlen(nvr); // Get release for (i = len-1; i >= 0; i--) if (nvr[i] == '-') { nevr->release = g_strdup(nvr+i+1); nvr[i] = '\0'; len = i; break; } // Get version for (i = len-1; i >= 0; i--) if (nvr[i] == '-') { nevr->version = g_strdup(nvr+i+1); nvr[i] = '\0'; len = i; break; } // Get name nevr->name = g_strdup(nvr); g_free(nvr); // 3) // Now split the E:V if (epoch == NULL && (nevr->version && strchr(nevr->version, ':'))) { gchar **epoch_version = g_strsplit(nevr->version, ":", 2); g_free(nevr->version); nevr->epoch = epoch_version[0]; nevr->version = epoch_version[1]; g_free(epoch_version); } else { nevr->epoch = epoch; } return nevr; } void cr_nevr_free(cr_NEVR *nevr) { if (!nevr) return; g_free(nevr->name); g_free(nevr->epoch); g_free(nevr->version); g_free(nevr->release); g_free(nevr); } cr_NEVRA * cr_str_to_nevra(const char *instr) { cr_NEVR *nevr; cr_NEVRA *nevra = NULL; gchar *str, *epoch = NULL; size_t len; int i; if (!instr) return NULL; nevra = g_new0(cr_NEVRA, 1); str = g_strdup(instr); // N-V-R.A:E if (strchr(str, ':')) { gchar **nvra_epoch = g_strsplit(str, ":", 2); char *epoch_candidate = nvra_epoch[1]; if (epoch_candidate && !strchr(epoch_candidate, '-') && !strchr(epoch_candidate, '.')) { // Strip epoch from the very end epoch = epoch_candidate; str = nvra_epoch[0]; } else { g_strfreev(nvra_epoch); } } len = strlen(str); // Get arch for (i = len-1; i >= 0; i--) if (str[i] == '.') { nevra->arch = g_strdup(str+i+1); str[i] = '\0'; len = i; break; } if (nevra->arch && strchr(nevra->arch, '-')) { g_warning("Invalid arch %s", nevra->arch); cr_nevra_free(nevra); g_free(str); return NULL; } nevr = cr_str_to_nevr(str); nevra->name = nevr->name; nevra->epoch = nevr->epoch; nevra->version = nevr->version; nevra->release = nevr->release; g_free(nevr); g_free(str); if (epoch) { g_free(nevra->epoch); nevra->epoch = epoch; } return nevra; } void cr_nevra_free(cr_NEVRA *nevra) { if (!nevra) return; g_free(nevra->name); g_free(nevra->epoch); g_free(nevra->version); g_free(nevra->release); g_free(nevra->arch); g_free(nevra); } int cr_cmp_evr(const char *e1, const char *v1, const char *r1, const char *e2, const char *v2, const char *r2) { int rc; if (e1 == NULL) e1 = "0"; if (e2 == NULL) e2 = "0"; rc = cr_compare_values(e1, e2); if (rc) return rc; rc = cr_compare_values(v1, v2); if (rc) return rc; rc = cr_compare_values(r1, r2); return rc; } int cr_warning_cb(G_GNUC_UNUSED cr_XmlParserWarningType type, char *msg, void *cbdata, G_GNUC_UNUSED GError **err) { g_warning("%s: %s", (char *) cbdata, msg); return CR_CB_RET_OK; } gboolean cr_write_to_file(GError **err, gchar *filename, const char *format, ...) { assert(filename); assert(!err || *err == NULL); if (!format) return TRUE; FILE *f = fopen(filename, "w"); if (!f) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot open %s: %s", filename, g_strerror(errno)); return FALSE; } va_list args; va_start(args, format); vfprintf (f, format, args); va_end(args); gboolean ret = TRUE; if (ferror(f)) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot write content to %s: %s", filename, g_strerror(errno)); ret = FALSE; } fclose(f); return ret; } static gboolean cr_run_command(char **cmd, const char *working_dir, GError **err) { assert(cmd); assert(!err || *err == NULL); GError *tmp_err = NULL; gint status = 0; gchar *error_str = NULL; int spawn_flags = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL; g_spawn_sync(working_dir, cmd, NULL, // envp spawn_flags, NULL, // child setup function NULL, // user data for child setup NULL, // stdout &error_str, // stderr &status, &tmp_err); if (tmp_err) { g_free(error_str); g_propagate_error(err, tmp_err); return FALSE; } gboolean ret = cr_spawn_check_exit_status(status, &tmp_err); if (!ret && error_str) { // Remove newlines from error message for (char *ptr = error_str; *ptr; ptr++) if (*ptr == '\n') *ptr = ' '; g_propagate_prefixed_error(err, tmp_err, "%s: ", error_str); } g_free(error_str); return ret; } gboolean cr_cp(const char *src, const char *dst, cr_CpFlags flags, const char *working_dir, GError **err) { assert(src); assert(dst); assert(!err || *err == NULL); GPtrArray *argv_array = g_ptr_array_new(); g_ptr_array_add(argv_array, "cp"); if (flags & CR_CP_RECURSIVE) g_ptr_array_add(argv_array, "-r"); if (flags & CR_CP_PRESERVE_ALL) g_ptr_array_add(argv_array, "--preserve=all"); g_ptr_array_add(argv_array, (char *) src); g_ptr_array_add(argv_array, (char *) dst); g_ptr_array_add(argv_array, (char *) NULL); gboolean ret = cr_run_command((char **) argv_array->pdata, working_dir, err); g_ptr_array_free(argv_array, TRUE); return ret; } gboolean cr_rm(const char *path, cr_RmFlags flags, const char *working_dir, GError **err) { assert(path); assert(!err || *err == NULL); GPtrArray *argv_array = g_ptr_array_new(); g_ptr_array_add(argv_array, "rm"); if (flags & CR_RM_RECURSIVE) g_ptr_array_add(argv_array, "-r"); if (flags & CR_RM_FORCE) g_ptr_array_add(argv_array, "-f"); g_ptr_array_add(argv_array, (char *) path); g_ptr_array_add(argv_array, (char *) NULL); gboolean ret = cr_run_command((char **) argv_array->pdata, working_dir, err); g_ptr_array_free(argv_array, TRUE); return ret; } gchar * cr_append_pid_and_datetime(const char *str, const char *suffix) { struct tm * timeinfo; struct timeval tv; char datetime[80]; gettimeofday(&tv, NULL); timeinfo = localtime (&(tv.tv_sec)); strftime(datetime, 80, "%Y%m%d%H%M%S", timeinfo); gchar *result = g_strdup_printf("%s%jd.%s.%ld%s", str ? str : "", (intmax_t) getpid(), datetime, tv.tv_usec, suffix ? suffix : ""); return result; } gboolean cr_spawn_check_exit_status(gint exit_status, GError **err) { assert(!err || *err == NULL); if (WIFEXITED(exit_status)) { if (WEXITSTATUS(exit_status) == 0) { // Exit code == 0 means success return TRUE; } else { g_set_error (err, ERR_DOMAIN, CRE_SPAWNERRCODE, "Child process exited with code %ld", (long) WEXITSTATUS(exit_status)); } } else if (WIFSIGNALED(exit_status)) { g_set_error (err, ERR_DOMAIN, CRE_SPAWNKILLED, "Child process killed by signal %ld", (long) WTERMSIG(exit_status)); } else if (WIFSTOPPED(exit_status)) { g_set_error (err, ERR_DOMAIN, CRE_SPAWNSTOPED, "Child process stopped by signal %ld", (long) WSTOPSIG(exit_status)); } else { g_set_error (err, ERR_DOMAIN, CRE_SPAWNABNORMAL, "Child process exited abnormally"); } return FALSE; } gboolean cr_identical_files(const gchar *fn1, const gchar *fn2, gboolean *identical, GError **err) { int rc; GStatBuf buf1, buf2; *identical = FALSE; // Stat old file rc = g_stat(fn1, &buf1); if (rc == -1) { if (errno == ENOENT) // The first file doesn't exist return TRUE; g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot stat %s: %s", fn1, g_strerror(errno)); return FALSE; } // Stat new file rc = g_stat(fn2, &buf2); if (rc == -1) { if (errno == ENOENT) // The second file doesn't exist return TRUE; g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot stat %s: %s", fn2, g_strerror(errno)); return FALSE; } // Check if both paths point to the same file if (buf1.st_ino == buf2.st_ino) *identical = TRUE; return TRUE; } gchar * cr_cut_dirs(gchar *path, gint cut_dirs) { if (!path) return NULL; if (cut_dirs < 1) return path; gchar *last_component = NULL; for (gchar *p = path; *p; p++) { if (*p == '/') last_component = p; } if (last_component == NULL) return path; gchar *cut = path; gint n = 0; gint state = 0; for (gchar *p = path; p <= last_component; p++) { if (state == 0) { if (*p == '/') { cut = p; } else { state = 1; if (n == cut_dirs) break; } } else if (state == 1) { if (*p == '/') { cut = p; state = 0; n++; } } } return cut+1; } const gchar * cr_version_string_with_features(void) { return (xstr(CR_VERSION_MAJOR) "." xstr(CR_VERSION_MINOR) "." xstr(CR_VERSION_PATCH) " (Features: " #ifdef CR_DELTA_RPM_SUPPORT "DeltaRPM " #endif #ifdef ENABLE_LEGACY_WEAKDEPS "LegacyWeakdeps " #endif #ifdef ENABLE_THREADED_XZ_ENCODER "ThreadedXzEncoder " #endif ")"); } gchar * cr_get_dict_file(const gchar *dir, const gchar *file) { gchar *dict_file = malloc(strlen(file) + 7); assert(dict_file); snprintf(dict_file, strlen(file) + 7, "%s.zdict", file); gchar *full_path = g_build_path("/", dir, dict_file, NULL); assert(full_path); free(dict_file); if (!g_file_test(full_path, G_FILE_TEST_EXISTS)) { g_warning("%s: Zchunk dict %s doesn't exist", __func__, full_path); free(full_path); return NULL; } return full_path; } createrepo_c-0.17.0/src/misc.h000066400000000000000000000460071400672373200161400ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_MISC_H__ #define __C_CREATEREPOLIB_MISC_H__ #ifdef __cplusplus extern "C" { #endif #include #include #include #include "compression_wrapper.h" #include "xml_parser.h" /** \defgroup misc Miscellaneous useful functions and macros. * \addtogroup misc * @{ */ /** Lenght of static string (including last '\0' byte) */ #define CR_STATICSTRLEN(s) (sizeof(s)/sizeof(s[0])) /* Length of static defined array. */ #define CR_ARRAYLEN(x) ((sizeof(x)/sizeof(0[x])) / ((size_t)(!(sizeof(x) % sizeof(0[x]))))) /** Convert flags from RPM header to a string representation. * @param flags flags * @return flags as constant string */ const char *cr_flag_to_str(gint64 flags); /** Epoch-Version-Release representation. */ typedef struct { char *epoch; /*!< epoch */ char *version; /*!< version */ char *release; /*!< release */ } cr_EVR; typedef struct { char *name; char *epoch; char *version; char *release; } cr_NEVR; typedef struct { char *name; char *epoch; char *version; char *release; char *arch; } cr_NEVRA; /** Version representation * e.g. for openssl-devel-1.0.0i = version: 1, release: 0, patch: 0, suffix: i */ struct cr_Version { long major; /*!< version */ long minor; /*!< release */ long patch; /*!< patch */ char *suffix; /*!< rest of version string after conversion */ }; /** Convert epoch-version-release string into cr_EVR structure. * If no GStringChunk passed, all non NULL items in returned structure * are malloced and in that case, you have to free all non-NULL element * yourself. * @param string NULL terminated n-v-r string * @param chunk string chunk for strings (optional - could be NULL) * @return filled NVR */ cr_EVR *cr_str_to_evr(const char *string, GStringChunk *chunk); /** Free cr_EVR * Warning: Do not use this function when a string chunk was * used in the cr_str_to_evr! In that case use only g_free on * the cr_EVR pointer. * @param evr cr_EVR structure */ void cr_evr_free(cr_EVR *evr); /** Check if the filename match pattern for primary files (files listed * in primary.xml). * @param filename full path to file * @return 1 if it is primary file, otherwise 0 */ static inline int cr_is_primary(const char *filename) { if (!strncmp(filename, "/etc/", 5)) return 1; if (!strcmp(filename, "/usr/lib/sendmail")) return 1; if (strstr(filename, "bin/")) return 1; return 0; }; /** Header range */ struct cr_HeaderRangeStruct { unsigned int start; /*!< First byte of header */ unsigned int end; /*!< Last byte of header */ }; /** Return header byte range. * @param filename filename * @param err GError ** * @return header range (start = end = 0 on error) */ struct cr_HeaderRangeStruct cr_get_header_byte_range(const char *filename, GError **err); /** Return pointer to the rest of string after last '/'. * (e.g. for "/foo/bar" returns "bar") * @param filepath path * @return pointer into the path */ char *cr_get_filename(const char *filepath); /** Return pointer to the rest of string after './' prefix. * (e.g. for "././foo/bar" returns "foo/bar") * @param filepath path * @return pointer into the path */ char *cr_get_cleaned_href(const char *filepath); /** Download a file from the URL into the in_dst via curl handle. * @param handle CURL handle * @param url source url * @param destination destination (if destination is dir, filename from the * url is used) * @param err GError ** * @return cr_Error */ int cr_download(CURL *handle, const char *url, const char *destination, GError **err); /** Copy file. * @param src source filename * @param dst destination (if dst is dir, filename of src is used) * @param err GError ** * @return TRUE on success, FALSE if an error occured */ gboolean cr_copy_file(const char *src, const char *dst, GError **err); /** Compress file. * @param SRC source filename * @param DST destination (If dst is dir, filename of src + * compression suffix is used. * If dst is NULL, src + compression suffix is used) * @param COMTYPE type of compression * @param ZCK_DICT_DIR Location of zchunk zdicts (if zchunk is enabled) * @param ZCK_AUTO_CHUNK Whether zchunk file should be auto-chunked * @param ERR GError ** * @return cr_Error return code */ #define cr_compress_file(SRC, DST, COMTYPE, ZCK_DICT_DIR, ZCK_AUTO_CHUNK, ERR) \ cr_compress_file_with_stat(SRC, DST, COMTYPE, NULL, ZCK_DICT_DIR, \ ZCK_AUTO_CHUNK, ERR) /** Compress file. * @param src source filename * @param dst destination (If dst is dir, filename of src + * compression suffix is used. * If dst is NULL, src + compression suffix is used) * @param comtype type of compression * @param stat pointer to cr_ContentStat or NULL * @param zck_dict_dir Location of zchunk zdicts (if zchunk is enabled) * @param zck_auto_chunk Whether zchunk file should be auto-chunked * @param err GError ** * @return cr_Error return code */ int cr_compress_file_with_stat(const char *src, const char *dst, cr_CompressionType comtype, cr_ContentStat *stat, const char *zck_dict_dir, gboolean zck_auto_chunk, GError **err); /** Decompress file. * @param SRC source filename * @param DST destination (If dst is dir, filename of src without * compression suffix (if present) is used. * If dst is NULL, src without compression suffix is used) * Otherwise ".decompressed" suffix is used * @param COMTYPE type of compression * @param ERR GError ** * @return cr_Error return code */ #define cr_decompress_file(SRC, DST, COMTYPE, ERR) \ cr_decompress_file_with_stat(SRC, DST, COMTYPE, NULL, ERR) /** Decompress file. * @param src source filename * @param dst destination (If dst is dir, filename of src without * compression suffix (if present) is used. * If dst is NULL, src without compression suffix is used) * Otherwise ".decompressed" suffix is used * @param comtype type of compression * @param stat pointer to cr_ContentStat or NULL * @param err GError ** * @return cr_Error return code */ int cr_decompress_file_with_stat(const char *src, const char *dst, cr_CompressionType comtype, cr_ContentStat *stat, GError **err); /** Better copy file. Source (src) could be remote address (http:// or ftp://). * @param src source filename * @param dst destination (if dst is dir, filename of src is used) * @param err GError ** * @return TRUE on success, FALSE if an error occured */ gboolean cr_better_copy_file(const char *src, const char *dst, GError **err); /** Recursively remove directory. * @param path filepath * @param err GError ** * @return cr_Error return code */ int cr_remove_dir(const char *path, GError **err); /** Normalize path (Path with exactly one trailing '/'). *@param path path *@return mallocated string with normalized path or NULL */ char *cr_normalize_dir_path(const char *path); /** Convert version string into cr_Version struct. * @param str version string * @return cr_Version */ struct cr_Version cr_str_to_version(const char *str); /** Compare two version string. * @param str1 first version string * @param str2 second version string * @return 0 - versions are same, 1 - first string is bigger * version, 2 - second string is bigger version */ int cr_cmp_version_str(const char* str1, const char *str2); /** Logging function with no output. * @param log_domain logging domain * @param log_level logging level * @param message message * @param user_data user data */ void cr_null_log_fn(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); /** Createrepo_c library standard logging function. * @param log_domain logging domain * @param log_level logging level * @param message message * @param user_data user data */ void cr_log_fn(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); /** Frees all the memory used by a GSList, and calls the specified destroy * function on every element's data. * This is the same function as g_slist_free_full(). The original function * is implemented in glib since 2.28 but we need to support the older glib too. * @param list pointer to GSList * @param free_f the function to be called to free each element's data */ void cr_slist_free_full(GSList *list, GDestroyNotify free_f); /** Convenience method, which frees all the memory used by a GQueue, * and calls the specified destroy function on every element's data. * This is the same function as g_queue_free_full(). The original function * is implemented in glib since 2.32 but we need to support the older glib too. * @param queue a pointer to a GQueue * @param the function to be called to free each element's data */ void cr_queue_free_full(GQueue *queue, GDestroyNotify free_f); /** Split filename into the NEVRA. * Supported formats: * [path/]N-V-R:E.A[.rpm] * [path/]E:N-V-R.A[.rpm] * [path/]N-E:V-R.A[.rpm] * [path/]N-V-R.A[.rpm]:E * @param filename filename * @return cr_NEVRA */ cr_NEVRA *cr_split_rpm_filename(const char *filename); /** Compare evr of two cr_NEVRA. Name and arch are ignored. * @param A pointer to first cr_NEVRA * @param B pointer to second cr_NEVRA * @return 0 = same, 1 = first is newer, -1 = second is newer */ #define cr_cmp_nevra(A, B) (cr_cmp_evr((A)->epoch, (A)->version, (A)->release,\ (B)->epoch, (B)->version, (B)->release)) /** Compare two version strings splited into evr chunks. * @param e1 1. epoch * @param v1 1. version * @param r1 1. release * @param e2 2. epoch * @param v2 2. version * @param r2 2. release * @return 0 = same, 1 = first is newer, -1 = second is newer */ int cr_cmp_evr(const char *e1, const char *v1, const char *r1, const char *e2, const char *v2, const char *r2); /** Safe insert into GStringChunk. * @param chunk a GStringChunk * @param str string to add or NULL * @return pointer to the copy of str or NULL if str is NULL */ static inline gchar * cr_safe_string_chunk_insert(GStringChunk *chunk, const char *str) { if (!str) return NULL; return g_string_chunk_insert(chunk, str); } /** Safe insert into GStringChunk with free the str afterwards. * @param chunk a GStringChunk * @param str string to add or NULL * @return pointer to the copy of str on NULL if str was NULL */ static inline gchar * cr_safe_string_chunk_insert_and_free(GStringChunk *chunk, char *str) { if (!str) return NULL; gchar *copy = g_string_chunk_insert(chunk, str); g_free(str); return copy; } /** Safe insert into GStringChunk. If str is NULL or "\0" inserts nothing and * returns NULL. * @param chunk a GStringChunk * @param str string to add or NULL * @return pointer to the copy of str or NULL if str is NULL */ static inline gchar * cr_safe_string_chunk_insert_null(GStringChunk *chunk, const char *str) { if (!str || *str == '\0') return NULL; return g_string_chunk_insert(chunk, str); } /** Safe const insert into GStringChunk. * @param chunk a GStringChunk * @param str string to add or NULL * @return pointer to the copy of str or NULL if str is NULL */ static inline gchar * cr_safe_string_chunk_insert_const(GStringChunk *chunk, const char *str) { if (!str) return NULL; return g_string_chunk_insert_const(chunk, str); } static inline gboolean cr_key_file_get_boolean_default(GKeyFile *key_file, const gchar *group_name, const gchar *key, gboolean default_value, GError **error) { GError *tmp_err = NULL; gboolean ret = g_key_file_get_boolean(key_file, group_name, key, &tmp_err); if (tmp_err) { g_propagate_error(error, tmp_err); return default_value; } return ret; } /** Warning callback for xml parser warnings. * For use in xml parsers like primary, filelists, other or repomd parser. * Name of the parser should be passed as a string via * warning callback data (warningcb_data) argument of the parser. */ int cr_warning_cb(cr_XmlParserWarningType type, char *msg, void *cbdata, GError **err); /** Open file and write content. * @param err GError ** * @param filename Filename * @param format Format string * @param ... Arguments */ gboolean cr_write_to_file(GError **err, gchar *filename, const char *format, ...); typedef enum { CR_CP_DEFAULT = (1<<0), /*!< No attributes - default */ CR_CP_RECURSIVE = (1<<1), /*!< Copy directories recursively */ CR_CP_PRESERVE_ALL = (1<<2), /*!< preserve the all attributes (if possible) */ } cr_CpFlags; /** Recursive copy of directory (works on files as well) * @param src Source (supports wildcards) * @param dst Destination (supports wildcards) * @param flags Flags * @param working_dir Working directory * @param err GError ** */ gboolean cr_cp(const char *src, const char *dst, cr_CpFlags flags, const char *working_directory, GError **err); typedef enum { CR_RM_DEFAULT = (1<<0), /*!< No attributes - default */ CR_RM_RECURSIVE = (1<<1), /*!< Copy directories recursively */ CR_RM_FORCE = (1<<2), /*!< Use force */ } cr_RmFlags; /** Wrapper over rm command * @param path Path (supports wildcards) * @param flags Flags * @param working_dir Working directory * @param err GError ** */ gboolean cr_rm(const char *path, cr_RmFlags flags, const char *working_dir, GError **err); /** Append "YYYYmmddHHMMSS.MICROSECONDS.PID" suffix to the str. * @param str String or NULL * @param suffix Another string that will be appended or NULL * @param return Newly allocated string */ gchar * cr_append_pid_and_datetime(const char *str, const char *suffix); /** Createrepo_c's reimplementation of convinient * g_spawn_check_exit_status() function which is available since * glib 2.34 (createrepo_c is currently compatible with glib >= 2.28) * @param exit_status An exit code as returned from g_spawn_sync() * @param error GError ** * @returns TRUE if child exited successfully, * FALSE otherwise (and error will be set) */ gboolean cr_spawn_check_exit_status(gint exit_status, GError **error); /** Parse E:N-V-R or N-V-R:E or N-E:V-R string * @param str NEVR string * @returns Malloced cr_NEVR or NULL on error */ cr_NEVR * cr_str_to_nevr(const char *str); /** Free cr_NEVR * @param nevr cr_NEVR structure */ void cr_nevr_free(cr_NEVR *nevr); /** Parse E:N-V-R.A, N-V-R:E.A, N-E:V-R.A or N-V-R.A:E string. * @param str NEVRA string * @returns Malloced cr_NEVRA or NULL on error */ cr_NEVRA * cr_str_to_nevra(const char *str); /** Free cr_NEVRA * @param nevra cr_NEVRA structure */ void cr_nevra_free(cr_NEVRA *nevra); /** Are the files identical? * Different paths could point to the same file. * This functions checks if both paths point to the same file or not. * If one of the files doesn't exists, the funcion doesn't fail * and just put FALSE into "indentical" value and returns. * @param fn1 First path * @param fn2 Second path * @param identical Are the files same or not * @param err GError ** * @return FALSE if an error was encountered, TRUE otherwise */ gboolean cr_identical_files(const gchar *fn1, const gchar *fn2, gboolean *identical, GError **err); /** Cut first N components of path. * Note: Basename is never cut out. */ gchar * cr_cut_dirs(gchar *path, gint cut_dirs); /** Return string with createrepo_c lib version and available features * @return String with version and list of features */ const gchar * cr_version_string_with_features(void); /** Get dict file from dict directory * This functions returns a zchunk dictionary file from the zchunk dictionary * directory that matches the passed filename. If no zchunk dictionary file * exists or no dictionary directory is set, this function returns NULL * * The zchunk dictionary file must be the same as the passed filename with a * ".zdict" extension * * @param dir Zchunk dictionary directory * @param file File being zchunked * @return NULL if no matching file exists, or the full path to the * file otherwise */ gchar * cr_get_dict_file(const gchar *dir, const gchar *file); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_MISC_H__ */ createrepo_c-0.17.0/src/modifyrepo_c.c000066400000000000000000000265341400672373200176620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include "error.h" #include "version.h" #include "compression_wrapper.h" #include "createrepo_shared.h" #include "misc.h" #include "locate_metadata.h" #include "load_metadata.h" #include "package.h" #include "repomd.h" #include "sqlite.h" #include "xml_file.h" #include "modifyrepo_shared.h" #define ERR_DOMAIN CREATEREPO_C_ERROR typedef struct { gboolean version; gchar *mdtype; gchar *remove; gboolean compress; gboolean no_compress; gchar *compress_type; gchar *checksum; gboolean unique_md_filenames; gboolean simple_md_filenames; gboolean verbose; gchar *batchfile; gchar *new_name; gboolean zck; gchar *zck_dict_dir; } RawCmdOptions; static gboolean parse_arguments(int *argc, char ***argv, RawCmdOptions *options, GError **err) { const GOptionEntry cmd_entries[] = { { "version", 0, 0, G_OPTION_ARG_NONE, &(options->version), "Show program's version number and exit.", NULL }, { "mdtype", 0, 0, G_OPTION_ARG_STRING, &(options->mdtype), "Specific datatype of the metadata, will be derived from " "the filename if not specified.", "MDTYPE" }, { "remove", 0, 0, G_OPTION_ARG_STRING, &(options->remove), "Remove specified file from repodata.", NULL }, { "compress", 0, 0, G_OPTION_ARG_NONE, &(options->compress), "Compress the new repodata before adding it to the repo. " "(default)", NULL }, { "no-compress", 0, 0, G_OPTION_ARG_NONE, &(options->no_compress), "Do not compress the new repodata before adding it to the repo.", NULL }, { "compress-type", 0, 0, G_OPTION_ARG_STRING, &(options->compress_type), "Compression format to use.", "COMPRESS_TYPE" }, { "checksum", 's', 0, G_OPTION_ARG_STRING, &(options->checksum), "Specify the checksum type to use. (default: sha256)", "SUMTYPE" }, { "unique-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(options->unique_md_filenames), "Include the file's checksum in the filename, helps with proxies. " "(default)", NULL }, { "simple-md-filenames", 0, 0, G_OPTION_ARG_NONE, &(options->simple_md_filenames), "Do not include the file's checksum in the filename.", NULL }, { "verbose", 0, 0, G_OPTION_ARG_NONE, &(options->verbose), "Verbose output.", NULL}, { "batchfile", 'f', 0, G_OPTION_ARG_STRING, &(options->batchfile), "Batch file.", "BATCHFILE" }, { "new-name", 0, 0, G_OPTION_ARG_STRING, &(options->new_name), "New filename for the file", "NEWFILENAME"}, #ifdef WITH_ZCHUNK { "zck", 0, 0, G_OPTION_ARG_NONE, &(options->zck), "Generate zchunk files as well as the standard repodata.", NULL }, { "zck-dict-dir", 0, 0, G_OPTION_ARG_FILENAME, &(options->zck_dict_dir), "Directory containing compression dictionaries for use by zchunk", "ZCK_DICT_DIR" }, #endif { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }, }; // Frstly, set default values options->version = FALSE; options->mdtype = NULL; options->remove = NULL; options->compress = TRUE; options->no_compress = FALSE; options->compress_type = NULL; options->checksum = NULL; options->unique_md_filenames = TRUE; options->simple_md_filenames = FALSE; options->verbose = FALSE; options->batchfile = NULL; options->new_name = NULL; options->zck = FALSE; options->zck_dict_dir = NULL; GOptionContext *context; context = g_option_context_new(" \n" " modifyrepo_c --remove \n" " modifyrepo_c [OPTION...] --batchfile "); g_option_context_set_summary(context, "Modify a repository's repomd.xml"); g_option_context_add_main_entries(context, cmd_entries, NULL); gboolean ret = g_option_context_parse(context, argc, argv, err); g_option_context_free(context); return ret; } static gboolean check_arguments(RawCmdOptions *options, GError **err) { // --no-compress if (options->no_compress) { options->compress = FALSE; if (options->compress_type) { g_warning("Use --compress-type simultaneously with --no-compress " "doesn't make a sense"); } } // --compress-type if (options->compress_type && cr_compression_type(options->compress_type) == \ CR_CW_UNKNOWN_COMPRESSION) { g_set_error(err, ERR_DOMAIN, CRE_ERROR, "Unknown compression type \"%s\"", options->compress_type); return FALSE; } // -s/--checksum if (options->checksum && cr_checksum_type(options->checksum) == CR_CHECKSUM_UNKNOWN) { g_set_error(err, ERR_DOMAIN, CRE_ERROR, "Unknown checksum type \"%s\"", options->checksum); return FALSE; } // --unique_md_filenames && --simple_md_filenames if (options->simple_md_filenames) { options->unique_md_filenames = FALSE; } // -f/--batchfile if (options->batchfile && !g_file_test(options->batchfile, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_ERROR, "File \"%s\" doesn't exist", options->batchfile); return FALSE; } // Zchunk options if (options->zck_dict_dir && !options->zck) { g_set_error(err, ERR_DOMAIN, CRE_ERROR, "Cannot use --zck-dict-dir without setting --zck"); return FALSE; } if (options->zck_dict_dir) options->zck_dict_dir = cr_normalize_dir_path(options->zck_dict_dir); return TRUE; } static void print_usage(void) { g_printerr( "Usage: modifyrepo_c [options] \n" "Usage: modifyrepo_c --remove \n" "Usage: modifyrepo_c [options] --batchfile " " \n"); } static gboolean cmd_options_to_task(GSList **modifyrepotasks, RawCmdOptions *options, gchar *metadatapath, GError **err) { assert(modifyrepotasks); assert(!err || *err == NULL); if (!options) return TRUE; //assert(metadatapath || options->remove); if (options->remove) g_debug("Preparing remove-task for: %s", options->remove); else g_debug("Preparing task for: %s", metadatapath); if (metadatapath && !g_file_test(metadatapath, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_ERROR, "File \"%s\" is not regular file or doesn't exists", metadatapath); return FALSE; } if (options->remove) metadatapath = options->remove; cr_ModifyRepoTask *task = cr_modifyrepotask_new(); task->path = cr_safe_string_chunk_insert_null(task->chunk, metadatapath); task->type = cr_safe_string_chunk_insert_null(task->chunk, options->mdtype); task->remove = (options->remove) ? TRUE : FALSE; task->compress = options->compress; task->compress_type = cr_compression_type(options->compress_type); task->unique_md_filenames = options->unique_md_filenames; task->checksum_type = cr_checksum_type(options->checksum); task->new_name = cr_safe_string_chunk_insert_null(task->chunk, options->new_name); task->zck = options->zck; task->zck_dict_dir = options->zck_dict_dir; *modifyrepotasks = g_slist_append(*modifyrepotasks, task); g_debug("Task: [path: %s, type: %s, remove: %d, compress: %d, " "compress_type: %d (%s), unique_md_filenames: %d, " "checksum_type: %d (%s), new_name: %s]", task->path, task->type, task->remove, task->compress, task->compress_type, cr_compression_suffix(task->compress_type), task->unique_md_filenames, task->checksum_type, cr_checksum_name_str(task->checksum_type), task->new_name); return TRUE; } int main(int argc, char **argv) { gboolean ret = TRUE; RawCmdOptions options; GError *err = NULL; // Parse arguments parse_arguments(&argc, &argv, &options, &err); if (err) { g_printerr("%s\n", err->message); print_usage(); g_error_free(err); exit(EXIT_FAILURE); } // Set logging cr_setup_logging(FALSE, options.verbose); // Print version if required if (options.version) { printf("Version: %s\n", cr_version_string_with_features()); exit(EXIT_SUCCESS); } // Check arguments check_arguments(&options, &err); if (err) { g_printerr("%s\n", err->message); print_usage(); g_error_free(err); exit(EXIT_FAILURE); } // Emit debug message with version g_debug("Version: %s", cr_version_string_with_features()); // Prepare list of tasks to do gchar *repodatadir = NULL; GSList *modifyrepotasks = NULL; if (!options.batchfile && !options.remove && argc == 3) { // three arguments (prog, metadata, repodata_dir) repodatadir = argv[2]; ret = cmd_options_to_task(&modifyrepotasks, &options, argv[1], &err); } else if (options.batchfile && argc == 2) { // two arguments (prog, repodata_dir) repodatadir = argv[1]; ret = cr_modifyrepo_parse_batchfile(options.batchfile, &modifyrepotasks, &err); } else if (!options.batchfile && options.remove && argc == 2) { // two arguments (prog, repodata_dir) repodatadir = argv[1]; ret = cmd_options_to_task(&modifyrepotasks, &options, NULL, &err); } else { // Bad arguments print_usage(); exit(EXIT_FAILURE); } if (!ret) { g_printerr("%s\n", err->message); g_error_free(err); exit(EXIT_FAILURE); } // Process the tasks ret = cr_modifyrepo(modifyrepotasks, repodatadir, &err); cr_slist_free_full(modifyrepotasks, (GDestroyNotify)cr_modifyrepotask_free); if (!ret) { g_printerr("%s\n", err->message); g_error_free(err); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } createrepo_c-0.17.0/src/modifyrepo_shared.c000066400000000000000000000446161400672373200207070ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include "cleanup.h" #include "error.h" #include "misc.h" #include "checksum.h" #include "modifyrepo_shared.h" #include "compression_wrapper.h" #include "threads.h" #include "xml_dump.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define DEFAULT_COMPRESSION CR_CW_GZ_COMPRESSION #define DEFAULT_CHECKSUM CR_CHECKSUM_SHA256 cr_ModifyRepoTask * cr_modifyrepotask_new(void) { cr_ModifyRepoTask *task = g_new0(cr_ModifyRepoTask, 1); task->chunk = g_string_chunk_new(16); return task; } void cr_modifyrepotask_free(cr_ModifyRepoTask *task) { if (!task) return; g_string_chunk_free(task->chunk); g_free(task); } gchar * cr_remove_compression_suffix_if_present(gchar* name, GError **err) { cr_CompressionType src_fn_com_type = cr_detect_compression(name, err); if (src_fn_com_type != CR_CW_NO_COMPRESSION && src_fn_com_type != CR_CW_UNKNOWN_COMPRESSION){ const gchar *src_suffix = cr_compression_suffix(src_fn_com_type); if (src_suffix){ if (g_str_has_suffix(name, src_suffix)){ int name_len = strlen(name); int suffix_len = strlen(src_suffix); return g_strndup(name, name_len - suffix_len); } } } return g_strdup(name); } gchar * cr_write_file(gchar *repopath, cr_ModifyRepoTask *task, cr_CompressionType compress_type, GError **err) { const gchar *suffix = NULL; if (task->compress) suffix = cr_compression_suffix(compress_type); gchar *src_fn = task->path; // Shortcut gchar *dst_fn = NULL; char* sufixless_src_fn = cr_remove_compression_suffix_if_present(task->path, err); // Prepare dst filename - Get basename _cleanup_free_ gchar *filename = NULL; if (task->new_name) filename = g_path_get_basename(task->new_name); else filename = g_path_get_basename(sufixless_src_fn); g_free(sufixless_src_fn); // Prepare dst filename - Add suffix if (suffix) { gchar *tmp_fn = g_strconcat(filename, suffix, NULL); g_free(filename); filename = tmp_fn; } // Prepare dst filename - Full path dst_fn = g_build_filename(repopath, filename, NULL); task->dst_fn = g_string_chunk_insert(task->chunk, dst_fn); // Check if the src and dst is the same file gboolean identical = FALSE; if (!cr_identical_files(src_fn, dst_fn, &identical, err)) return NULL; if (identical) { // Source and destination file is the same file g_debug("Using already existing file: %s", dst_fn); } else { // Check if the file already exist if (g_file_test(dst_fn, G_FILE_TEST_EXISTS) && g_str_has_suffix(dst_fn, cr_compression_suffix(compress_type))) { g_warning("Destination file \"%s\" already exists and will be " "overwritten", dst_fn); } // Do the copy g_debug("%s: Copy & compress operation %s -> %s", __func__, src_fn, dst_fn); if (cr_compress_file(src_fn, dst_fn, compress_type, task->zck_dict_dir, TRUE, err) != CRE_OK) { g_debug("%s: Copy & compress operation failed", __func__); return NULL; } } return dst_fn; } gboolean cr_modifyrepo(GSList *modifyrepotasks, gchar *repopath, GError **err) { assert(!err || *err == NULL); if (!modifyrepotasks) { g_debug("%s: No tasks to process", __func__); return TRUE; } // Parse repomd.xml gchar *repomd_path = g_build_filename(repopath, "repomd.xml", NULL); if (!g_file_test(repomd_path, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Regular file \"%s\" doesn't exists", repomd_path); g_free(repomd_path); return FALSE; } cr_Repomd *repomd = cr_repomd_new(); int rc = cr_xml_parse_repomd(repomd_path, repomd, cr_warning_cb, "Repomd XML parser", err); if (rc != CRE_OK) { g_debug("%s: Error while parsing repomd.xml", __func__); cr_repomd_free(repomd); g_free(repomd_path); return FALSE; } // TODO: // (?) Autodetect used checksum_type // (?) Autodetect if unique_md_filenames are used // Prepare tasks for (GSList *elem = modifyrepotasks; elem; elem = g_slist_next(elem)) { cr_ModifyRepoTask *task = elem->data; if (!task->type) { // If type is not specified, derive it from path or new name gchar *basename; if (task->new_name) basename = g_path_get_basename(task->new_name); else basename = g_path_get_basename(task->path); // Split at first '.' in filename and use only first part for (gchar *tmp=basename; *tmp; tmp++) { if (*tmp == '.') { *tmp = '\0'; break; } } task->type = cr_safe_string_chunk_insert_null(task->chunk, basename); g_debug("%s: Use derived type \"%s\" (%s)", __func__, task->type, basename); g_free(basename); } if (task->remove) continue; if (task->compress && task->compress_type == CR_CW_UNKNOWN_COMPRESSION) // If compression enabled but type not specified, use default task->compress_type = DEFAULT_COMPRESSION; if (task->checksum_type == CR_CHECKSUM_UNKNOWN) // If no checksum type specified, use default task->checksum_type = DEFAULT_CHECKSUM; } // Check tasks for (GSList *elem = modifyrepotasks; elem; elem = g_slist_next(elem)) { cr_ModifyRepoTask *task = elem->data; if (task->remove) { // Check if metadata of a type that should be removed // exists in repomd if (!cr_repomd_get_record(repomd, task->type)) g_warning("Record of type \"%s\", which should be removed, " "doesn't exist in repomd.xml", task->path); if (task->new_name) g_warning("Use remove with new_name doesn't make a sense"); } else { // Check if file exists if (!g_file_test(task->path, G_FILE_TEST_IS_REGULAR)) { g_debug("%s: Regular file \"%s\" doesn't exist", __func__, task->path); cr_repomd_free(repomd); g_free(repomd_path); return FALSE; } // Check if new_name is not empty string if (task->new_name) { if (!g_strcmp0(task->new_name, "")) { g_debug("%s: New name cannot be empty", __func__); cr_repomd_free(repomd); g_free(repomd_path); return FALSE; } } // Check if record with this name doesn't exists yet if (cr_repomd_get_record(repomd, task->type)) g_warning("Record with type \"%s\" already exists " "in repomd.xml", task->type); } } // // Modifications of the target repository starts here // // Add (copy) new metadata to repodata/ directory for (GSList *elem = modifyrepotasks; elem; elem = g_slist_next(elem)) { cr_ModifyRepoTask *task = elem->data; _cleanup_free_ gchar *dst_fn = NULL; if (task->remove) // Skip removing task continue; cr_CompressionType compress_type = CR_CW_NO_COMPRESSION; if (task->compress) compress_type = task->compress_type; dst_fn = cr_write_file(repopath, task, compress_type, err); if (dst_fn == NULL) { cr_repomd_free(repomd); g_free(repomd_path); return FALSE; } task->repopath = cr_safe_string_chunk_insert_null(task->chunk, dst_fn); #ifdef WITH_ZCHUNK if (task->zck) { free(dst_fn); dst_fn = cr_write_file(repopath, task, CR_CW_ZCK_COMPRESSION, err); if (dst_fn == NULL) { cr_repomd_free(repomd); g_free(repomd_path); return FALSE; } task->zck_repopath = cr_safe_string_chunk_insert_null(task->chunk, dst_fn); } #endif } // Prepare new repomd records GSList *repomdrecords = NULL; GSList *repomdrecords_uniquefn = NULL; GSList *repomdrecordfilltasks = NULL; GThreadPool *fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 5, FALSE, NULL); for (GSList *elem = modifyrepotasks; elem; elem = g_slist_next(elem)) { cr_ModifyRepoTask *task = elem->data; if (task->remove) continue; cr_RepomdRecord *rec = cr_repomd_record_new(task->type, task->repopath); cr_RepomdRecordFillTask *filltask = cr_repomdrecordfilltask_new(rec, task->checksum_type, NULL); g_thread_pool_push(fill_pool, filltask, NULL); repomdrecords = g_slist_append(repomdrecords, rec); if (task->unique_md_filenames) repomdrecords_uniquefn = g_slist_prepend(repomdrecords_uniquefn, rec); repomdrecordfilltasks = g_slist_prepend(repomdrecordfilltasks, filltask); if (task->zck) { _cleanup_free_ gchar *type = g_strconcat(task->type, "_zck", NULL); rec = cr_repomd_record_new(type, task->zck_repopath); filltask = cr_repomdrecordfilltask_new(rec, task->checksum_type, NULL); g_thread_pool_push(fill_pool, filltask, NULL); repomdrecords = g_slist_append(repomdrecords, rec); if (task->unique_md_filenames) repomdrecords_uniquefn = g_slist_prepend(repomdrecords_uniquefn, rec); repomdrecordfilltasks = g_slist_prepend(repomdrecordfilltasks, filltask); } } g_thread_pool_free(fill_pool, FALSE, TRUE); // Wait for (GSList *elem = repomdrecordfilltasks; elem; elem = g_slist_next(elem)) { // Clean up tasks cr_RepomdRecordFillTask *filltask = elem->data; cr_repomdrecordfilltask_free(filltask, NULL); } g_slist_free(repomdrecordfilltasks); // Detach records from repomd GSList *recordstoremove = NULL; for (GSList *elem = modifyrepotasks; elem; elem = g_slist_next(elem)) { cr_ModifyRepoTask *task = elem->data; // Remove both, records that will be removed but also // records with types that will be added. cr_RepomdRecord *rec = cr_repomd_get_record(repomd, task->type); if (rec) { g_debug("%s: Removing record \"%s\" from repomd.xml", __func__, task->type); recordstoremove = g_slist_prepend(recordstoremove, rec); cr_repomd_detach_record(repomd, rec); if (task->zck) { _cleanup_free_ gchar *type = g_strconcat(task->type, "_zck", NULL); cr_RepomdRecord *rec = cr_repomd_get_record(repomd, type); if (rec) { g_debug("%s: Removing record \"%s\" from repomd.xml", __func__, type); recordstoremove = g_slist_prepend(recordstoremove, rec); cr_repomd_detach_record(repomd, rec); } } } } // Prepend checksum for (GSList *elem = repomdrecords_uniquefn; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; cr_repomd_record_rename_file(rec, NULL); } g_slist_free(repomdrecords_uniquefn); // Add records into repomd for (GSList *elem = repomdrecords; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; g_debug("Adding record \"%s\"", rec->type); cr_repomd_set_record(repomd, rec); } g_slist_free(repomdrecords); // Write repomd.xml cr_repomd_sort_records(repomd); gchar *repomd_xml = cr_xml_dump_repomd(repomd, NULL); g_debug("Generated repomd.xml:\n%s", repomd_xml); g_debug("%s: Writing modified %s", __func__, repomd_path); gboolean ret = cr_write_to_file(err, repomd_path, "%s", repomd_xml); g_free(repomd_xml); g_free(repomd_path); if (!ret) { assert(!err || *err); cr_repomd_free(repomd); return FALSE; } // Delete files of removed records for (GSList *elem = recordstoremove; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; if (rec->location_base) // Do not even try to remove records with base location continue; // Construct filename _cleanup_free_ gchar *realpath = g_build_filename(repopath, "../", rec->location_href, NULL); // Firstly check if the file that should be deleted isn't // really used by other record anymore. // It could happend if user add a file, that already exists, // in repodata. Then we don't want to remove this file. gboolean remove_this = TRUE; // Check if a file that is referenced by a record that should // be removed belongs to any other record (a record that // shouldn't be removed). for (GSList *e = repomd->records; e; e = g_slist_next(e)) { cr_RepomdRecord *lrec = e->data; _cleanup_free_ gchar *lrealpath = NULL; // Construct filename lrealpath = g_build_filename(repopath, "../", lrec->location_href, NULL); // Check if files are identical gboolean identical = FALSE; if (!cr_identical_files(realpath, lrealpath, &identical, err)) return FALSE; // If yes, do not remove it if (identical) { remove_this = FALSE; break; } } if (!remove_this) continue; g_debug("%s: Removing \"%s\"", __func__, realpath); if (remove(realpath) == -1) g_warning("Cannot remove \"%s\": %s", realpath, g_strerror(errno)); } cr_slist_free_full(recordstoremove, (GDestroyNotify)cr_repomd_record_free); cr_repomd_free(repomd); return TRUE; } gboolean cr_modifyrepo_parse_batchfile(const gchar *path, GSList **modifyrepotasks, GError **err) { assert(!err || *err == NULL); if (!path) return TRUE; GKeyFile *keyfile = g_key_file_new(); gboolean ret = TRUE; ret = g_key_file_load_from_file(keyfile, path, G_KEY_FILE_NONE, err); if (!ret) { g_debug("%s: Parsing of modifyrepo batchfile failed", __func__); return FALSE; } gsize length; gchar **groups = g_key_file_get_groups(keyfile, &length); GSList *tasks = NULL; gboolean success = TRUE; for (gsize x = 0; x < length; x++) { gchar *group = groups[x]; assert(group); g_debug("%s: Group: \"%s\"", __func__, group); cr_ModifyRepoTask *task = cr_modifyrepotask_new(); tasks = g_slist_append(tasks, task); gchar *tmp_str; // As path use path option value or group // name if no path option specified task->path = cr_safe_string_chunk_insert_and_free(task->chunk, g_key_file_get_string(keyfile, group, "path", NULL)); if (!task->path) task->path = cr_safe_string_chunk_insert(task->chunk, group); task->type = cr_safe_string_chunk_insert_and_free(task->chunk, g_key_file_get_string(keyfile, group, "type", NULL)); task->remove = cr_key_file_get_boolean_default(keyfile, group, "remove", FALSE, NULL); task->compress = cr_key_file_get_boolean_default(keyfile, group, "compress", TRUE, NULL); tmp_str = g_key_file_get_string(keyfile, group, "compress-type", NULL); task->compress_type = cr_compression_type(tmp_str); g_free(tmp_str); task->unique_md_filenames = cr_key_file_get_boolean_default(keyfile, group, "unique-md-filenames", TRUE, NULL); tmp_str = g_key_file_get_string(keyfile, group, "checksum", NULL); task->checksum_type = cr_checksum_type(tmp_str); g_free(tmp_str); task->new_name = cr_safe_string_chunk_insert_and_free(task->chunk, g_key_file_get_string(keyfile, group, "new-name", NULL)); g_debug("Task: [path: %s, type: %s, remove: %d, compress: %d, " "compress_type: %d (%s), unique_md_filenames: %d, " "checksum_type: %d (%s), new_name: %s]", task->path, task->type, task->remove, task->compress, task->compress_type, cr_compression_suffix(task->compress_type), task->unique_md_filenames, task->checksum_type, cr_checksum_name_str(task->checksum_type), task->new_name); } g_strfreev(groups); if (success) { *modifyrepotasks = g_slist_concat(*modifyrepotasks, tasks); } else { cr_slist_free_full(tasks, (GDestroyNotify)cr_modifyrepotask_free); } g_key_file_free(keyfile); return success; } createrepo_c-0.17.0/src/modifyrepo_shared.h000066400000000000000000000043001400672373200206760ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_MODIFYREPO_SHARED_H__ #define __C_CREATEREPOLIB_MODIFYREPO_SHARED_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" #include "compression_wrapper.h" #include "package.h" /** \defgroup modifyrepo_shared Modifyrepo API. * * Module with modifyrepo API * * \addtogroup modifyrepo_shared * @{ */ typedef struct { gchar *path; gchar *type; gboolean remove; gboolean compress; cr_CompressionType compress_type; gboolean unique_md_filenames; cr_ChecksumType checksum_type; gchar *new_name; gboolean zck; gchar *zck_dict_dir; // Internal use gchar *repopath; gchar *zck_repopath; gchar *dst_fn; GStringChunk *chunk; } cr_ModifyRepoTask; cr_ModifyRepoTask * cr_modifyrepotask_new(void); void cr_modifyrepotask_free(cr_ModifyRepoTask *task); gchar * cr_write_file(gchar *repopath, cr_ModifyRepoTask *task, cr_CompressionType compress_type, GError **err); gboolean cr_modifyrepo(GSList *modifyrepotasks, gchar *repopath, GError **err); gboolean cr_modifyrepo_parse_batchfile(const gchar *path, GSList **modifyrepotasks, GError **err); gchar * cr_remove_compression_suffix_if_present(gchar* name, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_MODIFYREPO_SHARED__ */ createrepo_c-0.17.0/src/package.c000066400000000000000000000173171400672373200165750ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * Copyright (C) 2006 Seth Vidal * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include "package.h" #include "misc.h" #define PACKAGE_CHUNK_SIZE 2048 cr_Dependency * cr_dependency_new(void) { cr_Dependency *dep; dep = g_new0 (cr_Dependency, 1); return dep; } cr_PackageFile * cr_package_file_new(void) { cr_PackageFile *file; file = g_new0 (cr_PackageFile, 1); return file; } cr_ChangelogEntry * cr_changelog_entry_new(void) { cr_ChangelogEntry *entry; entry = g_new0 (cr_ChangelogEntry, 1); return entry; } cr_BinaryData * cr_binary_data_new(void) { return (cr_BinaryData *) g_new0(cr_BinaryData, 1); } cr_Package * cr_package_new(void) { cr_Package *package; package = g_new0 (cr_Package, 1); package->chunk = g_string_chunk_new (PACKAGE_CHUNK_SIZE); return package; } cr_Package * cr_package_new_without_chunk(void) { return g_new0(cr_Package, 1); } void cr_package_free(cr_Package *package) { if (!package) return; if (package->chunk && !(package->loadingflags & CR_PACKAGE_SINGLE_CHUNK)) g_string_chunk_free (package->chunk); /* Note: Since glib 2.28 * g_slist_foreach && g_slist_free could be replaced with one function: * g_slist_free_full() */ if (package->requires) { g_slist_free_full(package->requires, g_free); } if (package->provides) { g_slist_free_full(package->provides, g_free); } if (package->conflicts) { g_slist_free_full(package->conflicts, g_free); } if (package->obsoletes) { g_slist_free_full(package->obsoletes, g_free); } if (package->suggests) { g_slist_free_full(package->suggests, g_free); } if (package->enhances) { g_slist_free_full(package->enhances, g_free); } if (package->recommends) { g_slist_free_full(package->recommends, g_free); } if (package->supplements) { g_slist_free_full(package->supplements, g_free); } if (package->files) { g_slist_free_full(package->files, g_free); } if (package->changelogs) { g_slist_free_full(package->changelogs, g_free); } g_free(package->siggpg); g_free(package->sigpgp); g_free (package); } gchar * cr_package_nvra(cr_Package *package) { return g_strdup_printf("%s-%s-%s.%s", package->name, package->version, package->release, package->arch); } gchar * cr_package_nevra(cr_Package *package) { char *epoch; if (package->epoch && strlen(package->epoch)) epoch = package->epoch; else epoch = "0"; return g_strdup_printf("%s-%s:%s-%s.%s", package->name, epoch, package->version, package->release, package->arch); } static GSList * cr_dependency_dup(GStringChunk *chunk, GSList *orig) { GSList *list = NULL; for (GSList *elem = orig; elem; elem = g_slist_next(elem)) { cr_Dependency *odep = elem->data; cr_Dependency *ndep = cr_dependency_new(); ndep->name = cr_safe_string_chunk_insert(chunk, odep->name); ndep->flags = cr_safe_string_chunk_insert(chunk, odep->flags); ndep->epoch = cr_safe_string_chunk_insert(chunk, odep->epoch); ndep->version = cr_safe_string_chunk_insert(chunk, odep->version); ndep->release = cr_safe_string_chunk_insert(chunk, odep->release); ndep->pre = odep->pre; list = g_slist_prepend(list, ndep); } return g_slist_reverse(list); } cr_Package * cr_package_copy(cr_Package *orig) { cr_Package *pkg = cr_package_new(); pkg->pkgKey = orig->pkgKey; pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, orig->pkgId); pkg->name = cr_safe_string_chunk_insert(pkg->chunk, orig->name); pkg->arch = cr_safe_string_chunk_insert(pkg->chunk, orig->arch); pkg->version = cr_safe_string_chunk_insert(pkg->chunk, orig->version); pkg->epoch = cr_safe_string_chunk_insert(pkg->chunk, orig->epoch); pkg->release = cr_safe_string_chunk_insert(pkg->chunk, orig->release); pkg->summary = cr_safe_string_chunk_insert(pkg->chunk, orig->summary); pkg->description = cr_safe_string_chunk_insert(pkg->chunk, orig->description); pkg->url = cr_safe_string_chunk_insert(pkg->chunk, orig->url); pkg->time_file = orig->time_file; pkg->time_build = orig->time_build; pkg->rpm_license = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_license); pkg->rpm_vendor = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_vendor); pkg->rpm_group = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_group); pkg->rpm_buildhost = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_buildhost); pkg->rpm_sourcerpm = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_sourcerpm); pkg->rpm_header_start = orig->rpm_header_start; pkg->rpm_header_end = orig->rpm_header_end; pkg->rpm_packager = cr_safe_string_chunk_insert(pkg->chunk, orig->rpm_packager); pkg->size_package = orig->size_package; pkg->size_installed = orig->size_installed; pkg->size_archive = orig->size_archive; pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, orig->location_href); pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, orig->location_base); pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk, orig->checksum_type); pkg->requires = cr_dependency_dup(pkg->chunk, orig->requires); pkg->provides = cr_dependency_dup(pkg->chunk, orig->provides); pkg->conflicts = cr_dependency_dup(pkg->chunk, orig->conflicts); pkg->obsoletes = cr_dependency_dup(pkg->chunk, orig->obsoletes); pkg->suggests = cr_dependency_dup(pkg->chunk, orig->suggests); pkg->enhances = cr_dependency_dup(pkg->chunk, orig->enhances); pkg->recommends = cr_dependency_dup(pkg->chunk, orig->recommends); pkg->supplements = cr_dependency_dup(pkg->chunk, orig->supplements); for (GSList *elem = orig->files; elem; elem = g_slist_next(elem)) { cr_PackageFile *orig_file = elem->data; cr_PackageFile *file = cr_package_file_new(); file->type = cr_safe_string_chunk_insert(pkg->chunk, orig_file->type); file->path = cr_safe_string_chunk_insert(pkg->chunk, orig_file->path); file->name = cr_safe_string_chunk_insert(pkg->chunk, orig_file->name); pkg->files = g_slist_prepend(pkg->files, file); } for (GSList *elem = orig->changelogs; elem; elem = g_slist_next(elem)) { cr_ChangelogEntry *orig_log = elem->data; cr_ChangelogEntry *log = cr_changelog_entry_new(); log->author = cr_safe_string_chunk_insert(pkg->chunk, orig_log->author); log->date = orig_log->date; log->changelog = cr_safe_string_chunk_insert(pkg->chunk, orig_log->changelog); pkg->changelogs = g_slist_prepend(pkg->changelogs, log); } return pkg; } createrepo_c-0.17.0/src/package.h000066400000000000000000000163331400672373200165770ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * Copyright (C) 2007 James Bowes * Copyright (C) 2006 Seth Vidal * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_PACKAGE_H__ #define __C_CREATEREPOLIB_PACKAGE_H__ #ifdef __cplusplus extern "C" { #endif #include /** \defgroup package Package representation. * \addtogroup package * @{ */ typedef enum { CR_PACKAGE_FROM_HEADER = (1<<1), /*!< Metadata parsed from header */ CR_PACKAGE_FROM_XML = (1<<2), /*!< Metadata parsed xml */ /* Some values are reserved (for sqlite, solv, etc..) */ CR_PACKAGE_LOADED_PRI = (1<<10), /*!< Primary metadata was loaded */ CR_PACKAGE_LOADED_FIL = (1<<11), /*!< Filelists metadata was loaded */ CR_PACKAGE_LOADED_OTH = (1<<12), /*!< Other metadata was loaded */ CR_PACKAGE_SINGLE_CHUNK = (1<<13), /*!< Package uses single chunk */ } cr_PackageLoadingFlags; /** Dependency (Provides, Conflicts, Obsoletes, Requires). */ typedef struct { char *name; /*!< name */ char *flags; /*!< flags (value returned by cr_flag_to_str() from misc module) */ char *epoch; /*!< epoch */ char *version; /*!< version */ char *release; /*!< release */ gboolean pre; /*!< preinstall */ } cr_Dependency; /** File in package. */ typedef struct { char *type; /*!< one of "" (regular file), "dir", "ghost" */ char *path; /*!< path to file */ char *name; /*!< filename */ } cr_PackageFile; /** Changelog entry. */ typedef struct { char *author; /*!< author of changelog */ gint64 date; /*!< date of changelog - seconds since epoch */ char *changelog; /*!< text of changelog */ } cr_ChangelogEntry; /** Binary data. */ typedef struct { void *data; gsize size; } cr_BinaryData; /** Package */ typedef struct { gint64 pkgKey; /*!< used while inserting into sqlite db */ char *pkgId; /*!< package hash */ char *name; /*!< name */ char *arch; /*!< architecture */ char *version; /*!< version */ char *epoch; /*!< epoch */ char *release; /*!< release */ char *summary; /*!< summary */ char *description; /*!< description */ char *url; /*!< package homepage */ gint64 time_file; /*!< mtime of file */ gint64 time_build; /*!< build time (from rpm header) */ char *rpm_license; /*!< license */ char *rpm_vendor; /*!< vendor */ char *rpm_group; /*!< group (one value from /usr/share/doc/rpm- (your_rpm_version)/GROUPS) */ char *rpm_buildhost; /*!< hostname of machine which builds the package */ char *rpm_sourcerpm; /*!< name of srpms */ gint64 rpm_header_start; /*!< start byte of header in rpm */ gint64 rpm_header_end; /*!< end byte of header in rpm */ char *rpm_packager; /*!< packager of package */ gint64 size_package; /*!< size of rpm package */ gint64 size_installed; /*!< size of installed files */ gint64 size_archive; /*!< size of archive (I have no idea what does it mean) */ char *location_href; /*!< file location inside repository */ char *location_base; /*!< location (url) of repository */ char *checksum_type; /*!< type of checksum used ("sha1", "sha256", "md5", ..) */ GSList *requires; /*!< requires (list of cr_Dependency structs) */ GSList *provides; /*!< provides (list of cr_Dependency structs) */ GSList *conflicts; /*!< conflicts (list of cr_Dependency structs) */ GSList *obsoletes; /*!< obsoletes (list of cr_Dependency structs) */ GSList *suggests; /*!< suggests (list of cr_Dependency structs) */ GSList *enhances; /*!< enhances (list of cr_Dependency structs) */ GSList *recommends; /*!< recommends (list of cr_Dependency structs) */ GSList *supplements; /*!< supplements (list of cr_Dependency structs) */ GSList *files; /*!< files in the package (list of cr_PackageFile structs) */ GSList *changelogs; /*!< changelogs (list of cr_ChangelogEntry structs) */ char *hdrid; cr_BinaryData *siggpg; cr_BinaryData *sigpgp; GStringChunk *chunk; /*!< string chunk for store all package strings on the single place */ cr_PackageLoadingFlags loadingflags; /*!< Bitfield flags with information about package loading */ } cr_Package; /** Create new (empty) dependency structure. * @return new empty cr_Dependency */ cr_Dependency *cr_dependency_new(void); /** Create new (empty) package file structure. * @return new emtpy cr_PackageFile */ cr_PackageFile *cr_package_file_new(void); /** Create new (empty) changelog structure. * @return new empty cr_ChangelogEntry */ cr_ChangelogEntry *cr_changelog_entry_new(void); /** Create new (empty) structure for binary data * @return new mepty cr_BinaryData */ cr_BinaryData *cr_binary_data_new(void); /** Create new (empty) package structure. * @return new empty cr_Package */ cr_Package *cr_package_new(void); /** Create new (empty) package structure without initialized string chunk. * @return new empty cr_Package */ cr_Package *cr_package_new_without_chunk(void); /** Free package structure and all its structures. * @param package cr_Package */ void cr_package_free(cr_Package *package); /** Get NVRA package string * @param package cr_Package * @return nvra string */ gchar *cr_package_nvra(cr_Package *package); /** Get NEVRA package string * @param package cr_Package * @return nevra string */ gchar *cr_package_nevra(cr_Package *package); /** Create a standalone copy of the package. * @param package cr_Package * @return copy of the package */ cr_Package *cr_package_copy(cr_Package *package); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_PACKAGE_H__ */ createrepo_c-0.17.0/src/parsehdr.c000066400000000000000000000613461400672373200170130ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include "parsehdr.h" #include "xml_dump.h" #include "misc.h" #include "cleanup.h" #if defined(RPMTAG_SUGGESTS) && defined(RPMTAG_ENHANCES) \ && defined(RPMTAG_RECOMMENDS) && defined(RPMTAG_SUPPLEMENTS) #define RPM_WEAK_DEPS_SUPPORT 1 #endif #ifdef ENABLE_LEGACY_WEAKDEPS #define RPMSENSE_STRONG (1 << 27) #define RPMSENSE_MISSINGOK (1 << 19) #endif typedef enum DepType_e { DEP_PROVIDES, DEP_CONFLICTS, DEP_OBSOLETES, DEP_REQUIRES, DEP_SUGGESTS, DEP_ENHANCES, DEP_RECOMMENDS, DEP_SUPPLEMENTS, #ifdef ENABLE_LEGACY_WEAKDEPS DEP_OLDSUGGESTS, DEP_OLDENHANCES, #endif DEP_SENTINEL } DepType; typedef struct DepItem_s { DepType type; int nametag; int flagstag; int versiontag; } DepItem; // Keep this list sorted in the same order as the enum DepType_e! static DepItem dep_items[] = { { DEP_PROVIDES, RPMTAG_PROVIDENAME, RPMTAG_PROVIDEFLAGS, RPMTAG_PROVIDEVERSION }, { DEP_CONFLICTS, RPMTAG_CONFLICTNAME, RPMTAG_CONFLICTFLAGS, RPMTAG_CONFLICTVERSION }, { DEP_OBSOLETES, RPMTAG_OBSOLETENAME, RPMTAG_OBSOLETEFLAGS, RPMTAG_OBSOLETEVERSION }, { DEP_REQUIRES, RPMTAG_REQUIRENAME, RPMTAG_REQUIREFLAGS, RPMTAG_REQUIREVERSION }, #ifdef RPM_WEAK_DEPS_SUPPORT { DEP_SUGGESTS, RPMTAG_SUGGESTNAME, RPMTAG_SUGGESTFLAGS, RPMTAG_SUGGESTVERSION }, { DEP_ENHANCES, RPMTAG_ENHANCENAME, RPMTAG_ENHANCEFLAGS, RPMTAG_ENHANCEVERSION }, { DEP_RECOMMENDS, RPMTAG_RECOMMENDNAME, RPMTAG_RECOMMENDFLAGS, RPMTAG_RECOMMENDVERSION }, { DEP_SUPPLEMENTS, RPMTAG_SUPPLEMENTNAME, RPMTAG_SUPPLEMENTFLAGS, RPMTAG_SUPPLEMENTVERSION }, #ifdef ENABLE_LEGACY_WEAKDEPS { DEP_OLDSUGGESTS, RPMTAG_OLDSUGGESTSNAME, RPMTAG_OLDSUGGESTSFLAGS, RPMTAG_OLDSUGGESTSVERSION }, { DEP_OLDENHANCES, RPMTAG_OLDENHANCESNAME, RPMTAG_OLDENHANCESFLAGS, RPMTAG_OLDENHANCESVERSION }, #endif #endif { DEP_SENTINEL, 0, 0, 0 }, }; static inline int cr_compare_dependency(const char *dep1, const char *dep2) { /* Compares two dependency by name * NOTE: The function assume first parts must be same! * libc.so.6() < libc.so.6(GLIBC_2.3.4)(64 bit) < libc.so.6(GLIBC_2.4) * Return values: 0 - same; 1 - first is bigger; 2 - second is bigger, * -1 - error */ int ret1; char *ver1, *ver2, *ver1_e, *ver2_e; if (dep1 == dep2) return 0; ver1 = strchr(dep1, '('); // libc.so.6(... ver2 = strchr(dep2, '('); // verX ^ // There is no '(' if (!ver1 && !ver2) return 0; if (!ver1) return 2; if (!ver2) return 1; ver1_e = strchr(ver1, ')'); // libc.so.6(xxx)... ver2_e = strchr(ver2, ')'); // verX_e ^ // If there is no ')' if (!ver1_e && !ver2_e) return -1; if (!ver1_e) return 2; if (!ver2_e) return 1; // Go to char next to '(' ver1++; // libc.so.6(... ver2++; // verX ^ // If parentheses have no content - libc.so.6()... == libc.so.6()... if (ver1 == ver1_e && ver2 == ver2_e) return 0; if (ver1 == ver1_e) return 2; if (ver2 == ver2_e) return 1; // Go to first number for (; *ver1 && (*ver1 < '0' || *ver1 > '9'); ver1++); // libc.so.6(GLIBC_2... for (; *ver2 && (*ver2 < '0' || *ver2 > '9'); ver2++); // verX ^ // Too far // libc.so.6(xxx)(64bit) // verX ^ if (ver1 > ver1_e && ver2 > ver2_e) return 0; if (ver1 > ver1_e) return 2; if (ver2 > ver2_e) return 1; /* XXX: This piece of code could be removed in future // Check if version is really version and not an architecture // case: libc.so.6(64bit) = 64 is not a version! ret1 = strncmp(ver1, "64bit", 5); ret2 = strncmp(ver2, "64bit", 5); if (!ret1 && !ret2) return 0; if (!ret1) return 2; if (!ret2) return 1; */ // Get version string ver1 = g_strndup(ver1, (ver1_e - ver1)); ver2 = g_strndup(ver2, (ver2_e - ver2)); // Compare versions ret1 = rpmvercmp(ver1, ver2); if (ret1 == -1) ret1 = 2; g_free(ver1); g_free(ver2); return ret1; } cr_Package * cr_package_from_header(Header hdr, int changelog_limit, cr_HeaderReadingFlags hdrrflags, G_GNUC_UNUSED GError **err) { cr_Package *pkg; assert(hdr); assert(!err || *err == NULL); // Create new package structure pkg = cr_package_new(); pkg->loadingflags |= CR_PACKAGE_FROM_HEADER; pkg->loadingflags |= CR_PACKAGE_LOADED_PRI; pkg->loadingflags |= CR_PACKAGE_LOADED_FIL; pkg->loadingflags |= CR_PACKAGE_LOADED_OTH; // Create rpm tag data container rpmtd td = rpmtdNew(); headerGetFlags flags = HEADERGET_MINMEM | HEADERGET_EXT; // Fill package structure pkg->name = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_NAME)); gint64 is_src = headerGetNumber(hdr, RPMTAG_SOURCEPACKAGE); if (is_src) { pkg->arch = cr_safe_string_chunk_insert(pkg->chunk, "src"); } else { pkg->arch = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_ARCH)); } pkg->version = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_VERSION)); #define MAX_STR_INT_LEN 24 char tmp_epoch[MAX_STR_INT_LEN]; if (snprintf(tmp_epoch, MAX_STR_INT_LEN, "%llu", (long long unsigned int) headerGetNumber(hdr, RPMTAG_EPOCH)) <= 0) { tmp_epoch[0] = '\0'; } pkg->epoch = g_string_chunk_insert_len(pkg->chunk, tmp_epoch, MAX_STR_INT_LEN); pkg->release = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_RELEASE)); pkg->summary = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_SUMMARY)); pkg->description = cr_safe_string_chunk_insert_null(pkg->chunk, headerGetString(hdr, RPMTAG_DESCRIPTION)); pkg->url = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_URL)); if (headerGet(hdr, RPMTAG_BUILDTIME, td, flags)) { pkg->time_build = rpmtdGetNumber(td); } pkg->rpm_license = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_LICENSE)); pkg->rpm_vendor = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_VENDOR)); pkg->rpm_group = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_GROUP)); pkg->rpm_buildhost = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_BUILDHOST)); pkg->rpm_sourcerpm = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_SOURCERPM)); pkg->rpm_packager = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_PACKAGER)); // RPMTAG_LONGSIZE is allways present (is emulated for small packages because HEADERGET_EXT flag was used) if (headerGet(hdr, RPMTAG_LONGSIZE, td, flags)) { pkg->size_installed = rpmtdGetNumber(td); } rpmtdFreeData(td); // RPMTAG_LONGARCHIVESIZE is allways present (is emulated for small packages because HEADERGET_EXT flag was used) if (headerGet(hdr, RPMTAG_LONGARCHIVESIZE, td, flags)) { pkg->size_archive = rpmtdGetNumber(td); } rpmtdFreeData(td); rpmtdFree(td); // // Fill files // rpmtd full_filenames = rpmtdNew(); // Only for filenames_hashtable rpmtd indexes = rpmtdNew(); rpmtd filenames = rpmtdNew(); rpmtd fileflags = rpmtdNew(); rpmtd filemodes = rpmtdNew(); GHashTable *filenames_hashtable = g_hash_table_new(g_str_hash, g_str_equal); rpmtd dirnames = rpmtdNew(); // Create list of pointer to directory names int dir_count; char **dir_list = NULL; if (headerGet(hdr, RPMTAG_DIRNAMES, dirnames, flags) && (dir_count = rpmtdCount(dirnames))) { int x = 0; dir_list = malloc(sizeof(char *) * dir_count); while (rpmtdNext(dirnames) != -1) { dir_list[x] = cr_safe_string_chunk_insert(pkg->chunk, rpmtdGetString(dirnames)); x++; } assert(x == dir_count); } if (headerGet(hdr, RPMTAG_FILENAMES, full_filenames, flags) && headerGet(hdr, RPMTAG_DIRINDEXES, indexes, flags) && headerGet(hdr, RPMTAG_BASENAMES, filenames, flags) && headerGet(hdr, RPMTAG_FILEFLAGS, fileflags, flags) && headerGet(hdr, RPMTAG_FILEMODES, filemodes, flags)) { rpmtdInit(full_filenames); rpmtdInit(indexes); rpmtdInit(filenames); rpmtdInit(fileflags); rpmtdInit(filemodes); while ((rpmtdNext(full_filenames) != -1) && (rpmtdNext(indexes) != -1) && (rpmtdNext(filenames) != -1) && (rpmtdNext(fileflags) != -1) && (rpmtdNext(filemodes) != -1)) { cr_PackageFile *packagefile = cr_package_file_new(); packagefile->name = cr_safe_string_chunk_insert(pkg->chunk, rpmtdGetString(filenames)); packagefile->path = (dir_list) ? dir_list[(int) rpmtdGetNumber(indexes)] : ""; if (S_ISDIR(rpmtdGetNumber(filemodes))) { // Directory packagefile->type = cr_safe_string_chunk_insert(pkg->chunk, "dir"); } else if (rpmtdGetNumber(fileflags) & RPMFILE_GHOST) { // Ghost packagefile->type = cr_safe_string_chunk_insert(pkg->chunk, "ghost"); } else { // Regular file packagefile->type = cr_safe_string_chunk_insert(pkg->chunk, ""); } g_hash_table_replace(filenames_hashtable, (gpointer) rpmtdGetString(full_filenames), (gpointer) rpmtdGetString(full_filenames)); pkg->files = g_slist_prepend(pkg->files, packagefile); } pkg->files = g_slist_reverse (pkg->files); rpmtdFreeData(dirnames); rpmtdFreeData(indexes); rpmtdFreeData(filenames); rpmtdFreeData(fileflags); rpmtdFreeData(filemodes); } rpmtdFree(dirnames); rpmtdFree(indexes); rpmtdFree(filemodes); if (dir_list) { free((void *) dir_list); } // // PCOR (provides, conflicts, obsoletes, requires) // rpmtd fileversions = rpmtdNew(); // Struct used as value in ap_hashtable struct ap_value_struct { const char *flags; const char *version; int pre; }; // Hastable with filenames from provided GHashTable *provided_hashtable = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); // Hashtable with already processed files from requires GHashTable *ap_hashtable = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, free); for (int deptype=0; dep_items[deptype].type != DEP_SENTINEL; deptype++) { if (headerGet(hdr, dep_items[deptype].nametag, filenames, flags) && headerGet(hdr, dep_items[deptype].flagstag, fileflags, flags) && headerGet(hdr, dep_items[deptype].versiontag, fileversions, flags)) { // Because we have to select only libc.so with highest version // e.g. libc.so.6(GLIBC_2.4) cr_Dependency *libc_require_highest = NULL; rpmtdInit(filenames); rpmtdInit(fileflags); rpmtdInit(fileversions); while ((rpmtdNext(filenames) != -1) && (rpmtdNext(fileflags) != -1) && (rpmtdNext(fileversions) != -1)) { int pre = 0; const char *filename = rpmtdGetString(filenames); guint64 num_flags = rpmtdGetNumber(fileflags); const char *flags = cr_flag_to_str(num_flags); const char *full_version = rpmtdGetString(fileversions); _cleanup_free_ char *depnfv = NULL; // Dep NameFlagsVersion depnfv = g_strconcat(filename, flags ? flags : "", full_version ? full_version : "", NULL); // Requires specific stuff if (deptype == DEP_REQUIRES) { // Skip requires which start with "rpmlib(" if (!strncmp("rpmlib(", filename, 7)) { continue; } // Skip package primary files if (*filename == '/' && g_hash_table_lookup_extended(filenames_hashtable, filename, NULL, NULL)) { if (cr_is_primary(filename)) { continue; } } // Skip files which are provided if (g_hash_table_lookup_extended(provided_hashtable, depnfv, NULL, NULL)) { continue; } // Calculate pre value if (num_flags & (RPMSENSE_PREREQ | RPMSENSE_SCRIPT_PRE | RPMSENSE_SCRIPT_POST)) { pre = 1; } // Skip duplicate files gpointer value; if (g_hash_table_lookup_extended(ap_hashtable, filename, NULL, &value)) { struct ap_value_struct *ap_value = value; if (!g_strcmp0(ap_value->flags, flags) && !strcmp(ap_value->version, (full_version ? full_version : "")) && (ap_value->pre == pre)) { continue; } } } // Parse dep string cr_EVR *evr = cr_str_to_evr(full_version, pkg->chunk); if ((full_version && *full_version) && !evr->epoch) { // NULL in epoch mean that the epoch was bad (non-numerical) _cleanup_free_ gchar *pkg_nevra = cr_package_nevra(pkg); g_warning("Bad epoch in version string \"%s\" for dependency \"%s\" in package \"%s\"", full_version, filename, pkg_nevra); g_warning("Skipping this dependency"); g_free(evr); continue; } // Create dynamic dependency object cr_Dependency *dependency = cr_dependency_new(); dependency->name = cr_safe_string_chunk_insert(pkg->chunk, filename); dependency->flags = cr_safe_string_chunk_insert(pkg->chunk, flags); dependency->epoch = evr->epoch; dependency->version = evr->version; dependency->release = evr->release; g_free(evr); switch (deptype) { case DEP_PROVIDES: { char *depnfv_dup = g_strdup(depnfv); g_hash_table_replace(provided_hashtable, depnfv_dup, NULL); pkg->provides = g_slist_prepend(pkg->provides, dependency); break; } case DEP_CONFLICTS: pkg->conflicts = g_slist_prepend(pkg->conflicts, dependency); break; case DEP_OBSOLETES: pkg->obsoletes = g_slist_prepend(pkg->obsoletes, dependency); break; case DEP_REQUIRES: #ifdef ENABLE_LEGACY_WEAKDEPS if ( num_flags & RPMSENSE_MISSINGOK ) { pkg->recommends = g_slist_prepend(pkg->recommends, dependency); break; } #endif dependency->pre = pre; // XXX: libc.so filtering //////////////////////////// if (g_str_has_prefix(dependency->name, "libc.so.6")) { if (!libc_require_highest) libc_require_highest = dependency; else { if (cr_compare_dependency(libc_require_highest->name, dependency->name) == 2) { g_free(libc_require_highest); libc_require_highest = dependency; } else g_free(dependency); } break; } // XXX: libc.so filtering - END /////////////////////// pkg->requires = g_slist_prepend(pkg->requires, dependency); // Add file into ap_hashtable struct ap_value_struct *value = malloc(sizeof(struct ap_value_struct)); value->flags = flags; value->version = full_version; value->pre = dependency->pre; g_hash_table_replace(ap_hashtable, dependency->name, value); break; //case REQUIRES end case DEP_SUGGESTS: pkg->suggests = g_slist_prepend(pkg->suggests, dependency); break; case DEP_ENHANCES: pkg->enhances = g_slist_prepend(pkg->enhances, dependency); break; case DEP_RECOMMENDS: pkg->recommends = g_slist_prepend(pkg->recommends, dependency); break; case DEP_SUPPLEMENTS: pkg->supplements = g_slist_prepend(pkg->supplements, dependency); break; #ifdef ENABLE_LEGACY_WEAKDEPS case DEP_OLDSUGGESTS: if ( num_flags & RPMSENSE_STRONG ) { pkg->recommends = g_slist_prepend(pkg->recommends, dependency); } else { pkg->suggests = g_slist_prepend(pkg->suggests, dependency); } break; case DEP_OLDENHANCES: if ( num_flags & RPMSENSE_STRONG ) { pkg->supplements = g_slist_prepend(pkg->supplements, dependency); } else { pkg->enhances = g_slist_prepend(pkg->enhances, dependency); } break; #endif } // Switch end } // While end // XXX: libc.so filtering //////////////////////////////// if (deptype == DEP_REQUIRES && libc_require_highest) pkg->requires = g_slist_prepend(pkg->requires, libc_require_highest); // XXX: libc.so filtering - END //////////////////////////////// } rpmtdFreeData(filenames); rpmtdFreeData(fileflags); rpmtdFreeData(fileversions); } pkg->provides = g_slist_reverse (pkg->provides); pkg->conflicts = g_slist_reverse (pkg->conflicts); pkg->obsoletes = g_slist_reverse (pkg->obsoletes); pkg->requires = g_slist_reverse (pkg->requires); pkg->suggests = g_slist_reverse (pkg->suggests); pkg->enhances = g_slist_reverse (pkg->enhances); pkg->recommends = g_slist_reverse (pkg->recommends); pkg->supplements = g_slist_reverse (pkg->supplements); g_hash_table_remove_all(filenames_hashtable); g_hash_table_remove_all(provided_hashtable); g_hash_table_remove_all(ap_hashtable); g_hash_table_unref(filenames_hashtable); g_hash_table_unref(provided_hashtable); g_hash_table_unref(ap_hashtable); rpmtdFree(filenames); rpmtdFree(fileflags); rpmtdFree(fileversions); rpmtdFreeData(full_filenames); rpmtdFree(full_filenames); // // Changelogs // rpmtd changelogtimes = rpmtdNew(); rpmtd changelognames = rpmtdNew(); rpmtd changelogtexts = rpmtdNew(); if (headerGet(hdr, RPMTAG_CHANGELOGTIME, changelogtimes, flags) && headerGet(hdr, RPMTAG_CHANGELOGNAME, changelognames, flags) && headerGet(hdr, RPMTAG_CHANGELOGTEXT, changelogtexts, flags)) { gint64 last_time = G_GINT64_CONSTANT(0); rpmtdInit(changelogtimes); rpmtdInit(changelognames); rpmtdInit(changelogtexts); while ((rpmtdNext(changelogtimes) != -1) && (rpmtdNext(changelognames) != -1) && (rpmtdNext(changelogtexts) != -1) && (changelog_limit > 0 || changelog_limit == -1)) { gint64 time = rpmtdGetNumber(changelogtimes); cr_ChangelogEntry *changelog = cr_changelog_entry_new(); changelog->author = cr_safe_string_chunk_insert(pkg->chunk, rpmtdGetString(changelognames)); changelog->date = time; changelog->changelog = cr_safe_string_chunk_insert(pkg->chunk, rpmtdGetString(changelogtexts)); // Remove space from end of author name if (changelog->author) { size_t len, x; len = strlen(changelog->author); for (x=(len-1); x > 0; x--) { if (changelog->author[x] == ' ') { changelog->author[x] = '\0'; } else { break; } } } pkg->changelogs = g_slist_prepend(pkg->changelogs, changelog); if (changelog_limit != -1) changelog_limit--; // If a previous entry has the same time, increment time of the previous // entry by one. Ugly but works! if (last_time == time) { int tmp_time = time; GSList *previous = pkg->changelogs; while ((previous = g_slist_next(previous)) != NULL && ((cr_ChangelogEntry *) (previous->data))->date == tmp_time) { ((cr_ChangelogEntry *) (previous->data))->date++; tmp_time++; } } else { last_time = time; } } //pkg->changelogs = g_slist_reverse (pkg->changelogs); } rpmtdFreeData(changelogtimes); rpmtdFreeData(changelognames); rpmtdFreeData(changelogtexts); rpmtdFree(changelogtimes); rpmtdFree(changelognames); rpmtdFree(changelogtexts); // // Keys and hdrid (data used for caching when the --cachedir is specified) // if (hdrrflags & CR_HDRR_LOADHDRID) pkg->hdrid = cr_safe_string_chunk_insert(pkg->chunk, headerGetString(hdr, RPMTAG_HDRID)); if (hdrrflags & CR_HDRR_LOADSIGNATURES) { rpmtd gpgtd = rpmtdNew(); rpmtd pgptd = rpmtdNew(); if (headerGet(hdr, RPMTAG_SIGGPG, gpgtd, hdrrflags) && gpgtd->count > 0) { pkg->siggpg = cr_binary_data_new(); pkg->siggpg->size = gpgtd->count; pkg->siggpg->data = g_string_chunk_insert_len(pkg->chunk, gpgtd->data, gpgtd->count); } if (headerGet(hdr, RPMTAG_SIGPGP, pgptd, hdrrflags) && pgptd->count > 0) { pkg->sigpgp = cr_binary_data_new(); pkg->sigpgp->size = pgptd->count; pkg->sigpgp->data = g_string_chunk_insert_len(pkg->chunk, pgptd->data, pgptd->count); } rpmtdFree(gpgtd); rpmtdFree(pgptd); } return pkg; } createrepo_c-0.17.0/src/parsehdr.h000066400000000000000000000037441400672373200170160ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_PARSEHDR_H__ #define __C_CREATEREPOLIB_PARSEHDR_H__ #ifdef __cplusplus extern "C" { #endif #include #include #include "package.h" /** \defgroup parsehdr Header parser API. * \addtogroup parsehdr * @{ */ /** Flags */ typedef enum { CR_HDRR_NONE = (1 << 0), CR_HDRR_LOADHDRID = (1 << 1), /*!< Load hdrid */ CR_HDRR_LOADSIGNATURES = (1 << 2), /*!< Load siggpg and siggpg */ } cr_HeaderReadingFlags; /** Read data from header and return filled cr_Package structure. * All const char * params could be NULL. * @param hdr Header * @param changelog_limit number of changelog entries * @param flags Flags for header reading * @param err GError ** * @return Newly allocated cr_Package or NULL on error */ cr_Package *cr_package_from_header(Header hdr, int changelog_limit, cr_HeaderReadingFlags flags, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_PARSEHDR_H__ */ createrepo_c-0.17.0/src/parsepkg.c000066400000000000000000000162541400672373200170150ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "parsehdr.h" #include "parsepkg.h" #include "misc.h" #include "checksum.h" #define ERR_DOMAIN CREATEREPO_C_ERROR rpmts cr_ts = NULL; static gpointer cr_package_parser_init_once_cb(gpointer user_data G_GNUC_UNUSED) { rpmReadConfigFiles(NULL, NULL); cr_ts = rpmtsCreate(); if (!cr_ts) g_critical("%s: rpmtsCreate() failed", __func__); rpmVSFlags vsflags = 0; vsflags |= _RPMVSF_NODIGESTS; vsflags |= _RPMVSF_NOSIGNATURES; vsflags |= RPMVSF_NOHDRCHK; rpmtsSetVSFlags(cr_ts, vsflags); return NULL; } void cr_package_parser_init() { static GOnce package_parser_init_once = G_ONCE_INIT; g_once(&package_parser_init_once, cr_package_parser_init_once_cb, NULL); } static gpointer cr_package_parser_cleanup_once_cb(gpointer user_data G_GNUC_UNUSED) { if (cr_ts) { rpmtsFree(cr_ts); cr_ts = NULL; } rpmFreeMacros(NULL); rpmFreeRpmrc(); return NULL; } void cr_package_parser_cleanup() { static GOnce package_parser_cleanup_once = G_ONCE_INIT; g_once(&package_parser_cleanup_once, cr_package_parser_cleanup_once_cb, NULL); } static gboolean read_header(const char *filename, Header *hdr, GError **err) { assert(filename); assert(!err || *err == NULL); FD_t fd = Fopen(filename, "r.ufdio"); if (!fd) { g_warning("%s: Fopen of %s failed %s", __func__, filename, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "Fopen failed: %s", g_strerror(errno)); return FALSE; } int rc = rpmReadPackageFile(cr_ts, fd, NULL, hdr); if (rc != RPMRC_OK) { switch (rc) { case RPMRC_NOKEY: g_debug("%s: %s: Public key is unavailable.", __func__, filename); break; case RPMRC_NOTTRUSTED: g_debug("%s: %s: Signature is OK, but key is not trusted.", __func__, filename); break; default: g_warning("%s: rpmReadPackageFile() error", __func__); g_set_error(err, ERR_DOMAIN, CRE_IO, "rpmReadPackageFile() error"); Fclose(fd); return FALSE; } } Fclose(fd); return TRUE; } cr_Package * cr_package_from_rpm_base(const char *filename, int changelog_limit, cr_HeaderReadingFlags flags, GError **err) { Header hdr; cr_Package *pkg; assert(filename); assert(!err || *err == NULL); if (!read_header(filename, &hdr, err)) return NULL; pkg = cr_package_from_header(hdr, changelog_limit, flags, err); headerFree(hdr); return pkg; } cr_Package * cr_package_from_rpm(const char *filename, cr_ChecksumType checksum_type, const char *location_href, const char *location_base, int changelog_limit, struct stat *stat_buf, cr_HeaderReadingFlags flags, GError **err) { cr_Package *pkg = NULL; GError *tmp_err = NULL; assert(filename); assert(!err || *err == NULL); // Get a package object pkg = cr_package_from_rpm_base(filename, changelog_limit, flags, err); if (!pkg) goto errexit; pkg->location_href = cr_safe_string_chunk_insert(pkg->chunk, location_href); pkg->location_base = cr_safe_string_chunk_insert(pkg->chunk, location_base); // Get checksum type string pkg->checksum_type = cr_safe_string_chunk_insert(pkg->chunk, cr_checksum_name_str(checksum_type)); // Get file stat if (!stat_buf) { struct stat stat_buf_own; if (stat(filename, &stat_buf_own) == -1) { g_warning("%s: stat(%s) error (%s)", __func__, filename, g_strerror(errno)); g_set_error(err, ERR_DOMAIN, CRE_IO, "stat(%s) failed: %s", filename, g_strerror(errno)); goto errexit; } pkg->time_file = stat_buf_own.st_mtime; pkg->size_package = stat_buf_own.st_size; } else { pkg->time_file = stat_buf->st_mtime; pkg->size_package = stat_buf->st_size; } // Compute checksum char *checksum = cr_checksum_file(filename, checksum_type, &tmp_err); if (!checksum) { g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation: "); goto errexit; } pkg->pkgId = cr_safe_string_chunk_insert(pkg->chunk, checksum); free(checksum); // Get header range struct cr_HeaderRangeStruct hdr_r = cr_get_header_byte_range(filename, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error while determinig header range: "); goto errexit; } pkg->rpm_header_start = hdr_r.start; pkg->rpm_header_end = hdr_r.end; return pkg; errexit: cr_package_free(pkg); return NULL; } struct cr_XmlStruct cr_xml_from_rpm(const char *filename, cr_ChecksumType checksum_type, const char *location_href, const char *location_base, int changelog_limit, struct stat *stat_buf, GError **err) { cr_Package *pkg; struct cr_XmlStruct result; assert(filename); assert(!err || *err == NULL); result.primary = NULL; result.filelists = NULL; result.other = NULL; pkg = cr_package_from_rpm(filename, checksum_type, location_href, location_base, changelog_limit, stat_buf, CR_HDRR_NONE, err); if (!pkg) return result; result = cr_xml_dump(pkg, err); cr_package_free(pkg); return result; } createrepo_c-0.17.0/src/parsepkg.h000066400000000000000000000104131400672373200170110ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_PARSEPKG_H__ #define __C_CREATEREPOLIB_PARSEPKG_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" #include "parsehdr.h" #include "package.h" #include "xml_dump.h" /** \defgroup parsepkg Package parser API. * \addtogroup parsepkg * @{ */ /** Initialize global structures for package parsing. * This function call rpmReadConfigFiles() and create global transaction set. * This function should be called only once! This function is not thread safe! */ void cr_package_parser_init(); /** Free global structures for package parsing. */ void cr_package_parser_cleanup(); /** Generate a package object from a package file. * Some attributes like pkgId (checksum), checksum_type, time_file, * location_href, location_base, rpm_header_start, rpm_header_end * are not filled. * @param filename filename * @param changelog_limit number of changelogs that will be loaded * @param flags Flags for header reading * @param err GError ** * @return cr_Package or NULL on error */ cr_Package * cr_package_from_rpm_base(const char *filename, int changelog_limit, cr_HeaderReadingFlags flags, GError **err); /** Generate a package object from a package file. * @param filename filename * @param checksum_type type of checksum to be used * @param location_href package location inside repository * @param location_base location (url) of repository * @param changelog_limit number of changelog entries * @param stat_buf struct stat of the filename * (optional - could be NULL) * @param flags Flags for header reading * @param err GError ** * @return cr_Package or NULL on error */ cr_Package *cr_package_from_rpm(const char *filename, cr_ChecksumType checksum_type, const char *location_href, const char *location_base, int changelog_limit, struct stat *stat_buf, cr_HeaderReadingFlags flags, GError **err); /** Generate XML for the specified package. * @param filename rpm filename * @param checksum_type type of checksum to be used * @param location_href package location inside repository * @param location_base location (url) of repository * @param changelog_limit number of changelog entries * @param stat_buf struct stat of the filename * (optional - could be NULL) * @param err GError ** * @return struct cr_XmlStruct with primary, filelists and * other xmls */ struct cr_XmlStruct cr_xml_from_rpm(const char *filename, cr_ChecksumType checksum_type, const char *location_href, const char *location_base, int changelog_limit, struct stat *stat_buf, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_PARSEPKG_H__ */ createrepo_c-0.17.0/src/python/000077500000000000000000000000001400672373200163465ustar00rootroot00000000000000createrepo_c-0.17.0/src/python/CMakeLists.txt000066400000000000000000000052711400672373200211130ustar00rootroot00000000000000MACRO(PYTHON_UNSET) unset(PYTHON_LIBRARY) unset(PYTHON_INCLUDE_DIR) unset(PYTHON_EXECUTABLE) unset(PYTHON_LIBRARY CACHE) unset(PYTHON_INCLUDE_DIR CACHE) unset(PYTHON_EXECUTABLE CACHE) ENDMACRO(PYTHON_UNSET) if (NOT SKBUILD) PYTHON_UNSET() SET(Python_ADDITIONAL_VERSIONS 3.0 CACHE INTERNAL "") FIND_PACKAGE(PythonLibs 3 EXACT) FIND_PACKAGE(PythonInterp 3 EXACT REQUIRED) endif (NOT SKBUILD) EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} -c "from sys import stdout; from distutils import sysconfig; stdout.write(sysconfig.get_python_lib(True))" OUTPUT_VARIABLE PYTHON_INSTALL_DIR) INCLUDE_DIRECTORIES (${PYTHON_INCLUDE_PATH}) MESSAGE(STATUS "Python install dir is ${PYTHON_INSTALL_DIR}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-strict-aliasing") set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -fno-strict-aliasing") SET (createrepo_cmodule_SRCS checksum-py.c compression_wrapper-py.c contentstat-py.c createrepo_cmodule.c exception-py.c load_metadata-py.c locate_metadata-py.c misc-py.c package-py.c parsepkg-py.c repomd-py.c repomdrecord-py.c sqlite-py.c typeconversion.c updatecollection-py.c updatecollectionmodule-py.c updatecollectionpackage-py.c updateinfo-py.c updaterecord-py.c updatereference-py.c xml_dump-py.c xml_file-py.c xml_parser-py.c ) ADD_LIBRARY(_createrepo_c SHARED ${createrepo_cmodule_SRCS}) SET_TARGET_PROPERTIES(_createrepo_c PROPERTIES PREFIX "") SET_TARGET_PROPERTIES(_createrepo_c PROPERTIES LIBRARY_OUTPUT_DIRECTORY "./createrepo_c") TARGET_LINK_LIBRARIES(_createrepo_c libcreaterepo_c) TARGET_LINK_LIBRARIES(_createrepo_c ${CURL_LIBRARY} ) IF (SKBUILD) find_package(PythonExtensions REQUIRED) python_extension_module(_createrepo_c) ELSE () TARGET_LINK_LIBRARIES(_createrepo_c ${PYTHON_LIBRARY}) ENDIF () IF (NOT SKBUILD) FILE(COPY createrepo_c/__init__.py DESTINATION createrepo_c) ENDIF() IF (SKBUILD) INSTALL(FILES createrepo_c/__init__.py DESTINATION src/python/createrepo_c) INSTALL(TARGETS _createrepo_c LIBRARY DESTINATION src/python/createrepo_c) ELSE () INSTALL(FILES createrepo_c/__init__.py DESTINATION ${PYTHON_INSTALL_DIR}/createrepo_c) INSTALL(TARGETS _createrepo_c LIBRARY DESTINATION ${PYTHON_INSTALL_DIR}/createrepo_c) # Version has to be passed as last argument. INSTALL(CODE "EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/setup_for_python_metadata.py install_egg_info --install-dir \$ENV{DESTDIR}/${PYTHON_INSTALL_DIR} ${VERSION})") ENDIF () createrepo_c-0.17.0/src/python/checksum-py.c000066400000000000000000000027011400672373200207420ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "src/createrepo_c.h" #include "typeconversion.h" #include "exception-py.h" PyObject * py_checksum_name_str(G_GNUC_UNUSED PyObject *self, PyObject *args) { int type; if (!PyArg_ParseTuple(args, "i:py_checksum_name_Str", &type)) return NULL; return PyUnicodeOrNone_FromString(cr_checksum_name_str(type)); } PyObject * py_checksum_type(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *type; if (!PyArg_ParseTuple(args, "s:py_checksum_type", &type)) return NULL; return PyLong_FromLong((long) cr_checksum_type(type)); } createrepo_c-0.17.0/src/python/checksum-py.h000066400000000000000000000024221400672373200207470ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_CHECKSUM_PY_H #define CR_CHECKSUM_PY_H #include "src/createrepo_c.h" PyDoc_STRVAR(checksum_name_str__doc__, "checksum_name_str(checksum_type) -> str\n\n" "Checksum name from checksum type constant"); PyObject *py_checksum_name_str(PyObject *self, PyObject *args); PyDoc_STRVAR(checksum_type__doc__, "checksum_type(checksum_name) -> long\n\n" "Checksum type from checksum name"); PyObject *py_checksum_type(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/python/compression_wrapper-py.c000066400000000000000000000144621400672373200232500ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #define PY_SSIZE_T_CLEAN #include #include #include #include "compression_wrapper-py.h" #include "exception-py.h" #include "contentstat-py.h" #include "typeconversion.h" /* * Module functions */ PyObject * py_compression_suffix(G_GNUC_UNUSED PyObject *self, PyObject *args) { int type; if (!PyArg_ParseTuple(args, "i:py_compression_suffix", &type)) return NULL; return PyUnicodeOrNone_FromString(cr_compression_suffix(type)); } PyObject * py_detect_compression(G_GNUC_UNUSED PyObject *self, PyObject *args) { long type; char *filename; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "s:py_detect_compression", &filename)) return NULL; type = cr_detect_compression(filename, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } return PyLong_FromLong(type); } PyObject * py_compression_type(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *name; if (!PyArg_ParseTuple(args, "z:py_compression_type", &name)) return NULL; return PyLong_FromLong((long) cr_compression_type(name)); } /* * CrFile object */ typedef struct { PyObject_HEAD CR_FILE *f; PyObject *py_stat; } _CrFileObject; static PyObject * py_close(_CrFileObject *self, void *nothing); static int check_CrFileStatus(const _CrFileObject *self) { assert(self != NULL); assert(CrFileObject_Check(self)); if (self->f == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c CrFile object (Already closed file?)."); return -1; } return 0; } /* Function on the type */ static PyObject * crfile_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _CrFileObject *self = (_CrFileObject *)type->tp_alloc(type, 0); if (self) { self->f = NULL; self->py_stat = NULL; } return (PyObject *)self; } static int crfile_init(_CrFileObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { char *path; int mode, comtype; GError *err = NULL; PyObject *py_stat, *ret; cr_ContentStat *stat; if (!PyArg_ParseTuple(args, "siiO|:crfile_init", &path, &mode, &comtype, &py_stat)) return -1; /* Check arguments */ if (mode != CR_CW_MODE_READ && mode != CR_CW_MODE_WRITE) { PyErr_SetString(PyExc_ValueError, "Bad open mode"); return -1; } if (comtype < 0 || comtype >= CR_CW_COMPRESSION_SENTINEL) { PyErr_SetString(PyExc_ValueError, "Unknown compression type"); return -1; } if (py_stat == Py_None) { stat = NULL; } else if (ContentStatObject_Check(py_stat)) { stat = ContentStat_FromPyObject(py_stat); } else { PyErr_SetString(PyExc_TypeError, "Use ContentStat or None"); return -1; } /* Free all previous resources when reinitialization */ ret = py_close(self, NULL); Py_XDECREF(ret); Py_XDECREF(self->py_stat); self->py_stat = NULL; if (ret == NULL) { // Error encountered! return -1; } /* Init */ self->f = cr_sopen(path, mode, comtype, stat, &err); if (err) { nice_exception(&err, "CrFile %s init failed: ", path); return -1; } self->py_stat = py_stat; Py_XINCREF(py_stat); return 0; } static void crfile_dealloc(_CrFileObject *self) { cr_close(self->f, NULL); Py_XDECREF(self->py_stat); Py_TYPE(self)->tp_free(self); } static PyObject * crfile_repr(_CrFileObject *self) { char *mode; switch (self->f->mode) { case CR_CW_MODE_READ: mode = "Read mode"; break; case CR_CW_MODE_WRITE: mode = "Write mode"; break; default: mode = "Unknown mode"; } return PyUnicode_FromFormat("", mode); } /* CrFile methods */ PyDoc_STRVAR(write__doc__, "write() -> None\n\n" "Write a data to the file"); static PyObject * py_write(_CrFileObject *self, PyObject *args) { char *str; Py_ssize_t len; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "s#:set_num_of_pkgs", &str, &len)) return NULL; if (check_CrFileStatus(self)) return NULL; cr_write(self->f, str, len, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(close__doc__, "close() -> None\n\n" "Close the file"); static PyObject * py_close(_CrFileObject *self, G_GNUC_UNUSED void *nothing) { GError *tmp_err = NULL; if (self->f) { cr_close(self->f, &tmp_err); self->f = NULL; } Py_XDECREF(self->py_stat); self->py_stat = NULL; if (tmp_err) { nice_exception(&tmp_err, "Close error: "); return NULL; } Py_RETURN_NONE; } static struct PyMethodDef crfile_methods[] = { {"write", (PyCFunction)py_write, METH_VARARGS, write__doc__}, {"close", (PyCFunction)py_close, METH_NOARGS, close__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; PyTypeObject CrFile_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.CrFile", .tp_basicsize = sizeof(_CrFileObject), .tp_dealloc = (destructor) crfile_dealloc, .tp_repr = (reprfunc) crfile_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = "CrFile object", .tp_iter = PyObject_SelfIter, .tp_methods = crfile_methods, .tp_init = (initproc) crfile_init, .tp_new = crfile_new, }; createrepo_c-0.17.0/src/python/compression_wrapper-py.h000066400000000000000000000031261400672373200232500ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_COMPRESSION_WRAPPER_PY_H #define CR_COMPRESSION_WRAPPER_PY_H #include "src/createrepo_c.h" extern PyTypeObject CrFile_Type; #define CrFileObject_Check(o) PyObject_TypeCheck(o, &CrFile_Type) PyDoc_STRVAR(compression_suffix__doc__, "compression_suffix(compression_type) -> str or None\n\n" "Compression suffix for the compression type"); PyObject *py_compression_suffix(PyObject *self, PyObject *args); PyDoc_STRVAR(detect_compression__doc__, "detect_compression(path) -> long\n\n" "Detect compression type used on the file"); PyObject *py_detect_compression(PyObject *self, PyObject *args); PyDoc_STRVAR(compression_type__doc__, "compression_type(string) -> int\n\n" "Compression type value"); PyObject *py_compression_type(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/python/contentstat-py.c000066400000000000000000000150501400672373200215070ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "contentstat-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_ContentStat *stat; } _ContentStatObject; cr_ContentStat * ContentStat_FromPyObject(PyObject *o) { if (!ContentStatObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a ContentStat object."); return NULL; } return ((_ContentStatObject *)o)->stat; } static int check_ContentStatStatus(const _ContentStatObject *self) { assert(self != NULL); assert(ContentStatObject_Check(self)); if (self->stat == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c ContentStat object."); return -1; } return 0; } /* Function on the type */ static PyObject * contentstat_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _ContentStatObject *self = (_ContentStatObject *)type->tp_alloc(type, 0); if (self) self->stat = NULL; return (PyObject *)self; } PyDoc_STRVAR(contentstat_init__doc__, "ContentStat object representing statistical information about content\n\n" ".. method:: __init__(checksum_type)\n\n" " :arg checksum_type: Type of checksum that should be used\n"); static int contentstat_init(_ContentStatObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { int type; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "i:contentstat_init", &type)) return -1; /* Free all previous resources when reinitialization */ if (self->stat) cr_contentstat_free(self->stat, NULL); /* Init */ self->stat = cr_contentstat_new(type, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, "ContentStat init failed: "); return -1; } return 0; } static void contentstat_dealloc(_ContentStatObject *self) { if (self->stat) cr_contentstat_free(self->stat, NULL); Py_TYPE(self)->tp_free(self); } static PyObject * contentstat_repr(G_GNUC_UNUSED _ContentStatObject *self) { return PyUnicode_FromFormat(""); } /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_ContentStat, member) static PyObject * get_num(_ContentStatObject *self, void *member_offset) { if (check_ContentStatStatus(self)) return NULL; cr_ContentStat *rec = self->stat; gint64 val = (gint64) *((gint64 *) ((size_t)rec + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_int(_ContentStatObject *self, void *member_offset) { if (check_ContentStatStatus(self)) return NULL; cr_ContentStat *rec = self->stat; gint64 val = (gint64) *((int *) ((size_t)rec + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_str(_ContentStatObject *self, void *member_offset) { if (check_ContentStatStatus(self)) return NULL; cr_ContentStat *rec = self->stat; char *str = *((char **) ((size_t) rec + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static int set_num(_ContentStatObject *self, PyObject *value, void *member_offset) { gint64 val; if (check_ContentStatStatus(self)) return -1; if (PyLong_Check(value)) { val = (gint64) PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (gint64) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_ContentStat *rec = self->stat; *((gint64 *) ((size_t) rec + (size_t) member_offset)) = val; return 0; } static int set_int(_ContentStatObject *self, PyObject *value, void *member_offset) { long val; if (check_ContentStatStatus(self)) return -1; if (PyLong_Check(value)) { val = PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (gint64) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_ContentStat *rec = self->stat; *((int *) ((size_t) rec + (size_t) member_offset)) = (int) val; return 0; } static int set_str(_ContentStatObject *self, PyObject *value, void *member_offset) { if (check_ContentStatStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_ContentStat *rec = self->stat; PyObject *pybytes = PyObject_ToPyBytesOrNull(value); char *str = g_strdup(PyBytes_AsString(pybytes)); Py_XDECREF(pybytes); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } static PyGetSetDef contentstat_getsetters[] = { {"size", (getter)get_num, (setter)set_num, "Number of uncompressed bytes written", OFFSET(size)}, {"checksum_type", (getter)get_int, (setter)set_int, "Type of used checksum", OFFSET(checksum_type)}, {"checksum", (getter)get_str, (setter)set_str, "Calculated checksum", OFFSET(checksum)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject ContentStat_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.ContentStat", .tp_basicsize = sizeof(_ContentStatObject), .tp_dealloc = (destructor) contentstat_dealloc, .tp_repr = (reprfunc) contentstat_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = contentstat_init__doc__, .tp_iter = PyObject_SelfIter, .tp_getset = contentstat_getsetters, .tp_init = (initproc) contentstat_init, .tp_new = contentstat_new, }; createrepo_c-0.17.0/src/python/contentstat-py.h000066400000000000000000000021201400672373200215060ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_CONTENTSTAT_PY_H #define CR_CONTENTSTAT_PY_H #include "src/createrepo_c.h" extern PyTypeObject ContentStat_Type; #define ContentStatObject_Check(o) PyObject_TypeCheck(o, &ContentStat_Type) cr_ContentStat *ContentStat_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/createrepo_c/000077500000000000000000000000001400672373200210015ustar00rootroot00000000000000createrepo_c-0.17.0/src/python/createrepo_c/__init__.py000066400000000000000000000327251400672373200231230ustar00rootroot00000000000000""" """ import os import subprocess import sys from . import _createrepo_c from ._createrepo_c import * VERSION_MAJOR = _createrepo_c.VERSION_MAJOR #: Major version VERSION_MINOR = _createrepo_c.VERSION_MINOR #: Minor version VERSION_PATCH = _createrepo_c.VERSION_PATCH #: Patch version #: Version string VERSION = u"%d.%d.%d" % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) UNKNOWN_CHECKSUM = _createrepo_c.CHECKSUM_UNKNOWN #: Checksum unknown CHECKSUM_UNKNOWN = _createrepo_c.CHECKSUM_UNKNOWN #: Checksum unknown MD5 = _createrepo_c.MD5 #: MD5 checksum SHA = _createrepo_c.SHA #: SHA1 checksum alias SHA1 = _createrepo_c.SHA1 #: SHA1 checksum SHA224 = _createrepo_c.SHA224 #: SHA224 checksum SHA256 = _createrepo_c.SHA256 #: SHA256 checksum SHA384 = _createrepo_c.SHA384 #: SHA384 checksum SHA512 = _createrepo_c.SHA512 #: SHA512 checksum MODE_READ = _createrepo_c.MODE_READ #: Read open mode MODE_WRITE = _createrepo_c.MODE_WRITE #: Write open mode #: Use compression autodetection AUTO_DETECT_COMPRESSION = _createrepo_c.AUTO_DETECT_COMPRESSION #: Unknown compression UNKNOWN_COMPRESSION = _createrepo_c.UNKNOWN_COMPRESSION #: No compression NO_COMPRESSION = _createrepo_c.NO_COMPRESSION #: Gzip compression GZ_COMPRESSION = _createrepo_c.GZ_COMPRESSION #: Bzip2 compression BZ2_COMPRESSION = _createrepo_c.BZ2_COMPRESSION #: XZ compression XZ_COMPRESSION = _createrepo_c.XZ_COMPRESSION #: Zchunk compression ZCK_COMPRESSION = _createrepo_c.ZCK_COMPRESSION #: Gzip compression alias GZ = _createrepo_c.GZ_COMPRESSION #: Bzip2 compression alias BZ2 = _createrepo_c.BZ2_COMPRESSION #: XZ compression alias XZ = _createrepo_c.XZ_COMPRESSION #: Zchunk compression alias ZCK = _createrepo_c.ZCK_COMPRESSION HT_KEY_DEFAULT = _createrepo_c.HT_KEY_DEFAULT #: Default key (hash) HT_KEY_HASH = _createrepo_c.HT_KEY_HASH #: Package hash as a key HT_KEY_NAME = _createrepo_c.HT_KEY_NAME #: Package name as a key HT_KEY_FILENAME = _createrepo_c.HT_KEY_FILENAME #: Package filename as a key HT_DUPACT_KEEPFIRST = _createrepo_c.HT_DUPACT_KEEPFIRST #: If an key is duplicated, keep only the first occurrence HT_DUPACT_REMOVEALL = _createrepo_c.HT_DUPACT_REMOVEALL #: If an key is duplicated, discard all occurrences DB_PRIMARY = _createrepo_c.DB_PRIMARY #: Primary database DB_FILELISTS = _createrepo_c.DB_FILELISTS #: Filelists database DB_OTHER = _createrepo_c.DB_OTHER #: Other database XMLFILE_PRIMARY = _createrepo_c.XMLFILE_PRIMARY #: Primary xml file XMLFILE_FILELISTS = _createrepo_c.XMLFILE_FILELISTS #: Filelists xml file XMLFILE_OTHER = _createrepo_c.XMLFILE_OTHER #: Other xml file XMLFILE_PRESTODELTA = _createrepo_c.XMLFILE_PRESTODELTA #: Prestodelta xml file XMLFILE_UPDATEINFO = _createrepo_c.XMLFILE_UPDATEINFO #: Updateinfo xml file #: XML warning - Unknown tag XML_WARNING_UNKNOWNTAG = _createrepo_c.XML_WARNING_UNKNOWNTAG #: XML warning - Missing attribute XML_WARNING_MISSINGATTR = _createrepo_c.XML_WARNING_MISSINGATTR #: XML warning - Unknown value XML_WARNING_UNKNOWNVAL = _createrepo_c.XML_WARNING_UNKNOWNVAL #: XML warning - Bad attribute value XML_WARNING_BADATTRVAL = _createrepo_c.XML_WARNING_BADATTRVAL # Helper contants # Tuple indexes for provide, conflict, obsolete or require entry PCOR_ENTRY_NAME = 0 #: PCOR entry tuple index - name PCOR_ENTRY_FLAGS = 1 #: PCOR entry tuple index - flags PCOR_ENTRY_EPOCH = 2 #: PCOR entry tuple index - epoch PCOR_ENTRY_VERSION = 3 #: PCOR entry tuple index - version PCOR_ENTRY_RELEASE = 4 #: PCOR entry tuple index - release PCOR_ENTRY_PRE = 5 #: PCOR entry tuple index - pre # Tuple indexes for file entry FILE_ENTRY_TYPE = 0 #: File entry tuple index - file type FILE_ENTRY_PATH = 1 #: File entry tuple index - path FILE_ENTRY_NAME = 2 #: File entry tuple index - file name # Tuple indexes for changelog entry CHANGELOG_ENTRY_AUTHOR = 0 #: Changelog entry tuple index - Author CHANGELOG_ENTRY_DATE = 1 #: Changelog entry tuple index - Date CHANGELOG_ENTRY_CHANGELOG = 2 #: Changelog entry tuple index - Changelog # Exception CreaterepoCError = _createrepo_c.CreaterepoCError # ContentStat class ContentStat = _createrepo_c.ContentStat # CrFile class class CrFile(_createrepo_c.CrFile): def __init__(self, filename, mode=MODE_READ, comtype=NO_COMPRESSION, stat=None): """:arg filename: Filename :arg mode: MODE_READ or MODE_WRITE :arg comtype: Compression type (GZ, BZ, XZ or NO_COMPRESSION) :arg stat: ContentStat object or None""" _createrepo_c.CrFile.__init__(self, filename, mode, comtype, stat) # Metadata class Metadata = _createrepo_c.Metadata # MetadataLocation class MetadataLocation = _createrepo_c.MetadataLocation # Package class Package = _createrepo_c.Package # Repomd class class Repomd(_createrepo_c.Repomd): def __init__(self, path=None): """:arg path: Path to existing repomd.xml or None""" _createrepo_c.Repomd.__init__(self) if path: xml_parse_repomd(path, self) def __iter__(self): for rec in self.records: yield rec return def __getitem__(self, key): for rec in self.records: if rec.type == key: return rec self.__missing__(key) def __missing__(self, key): raise KeyError("Record with type '%s' doesn't exist" % key) def __contains__(self, key): for rec in self.records: if rec.type == key: return True return False # RepomdRecord class class RepomdRecord(_createrepo_c.RepomdRecord): def __init__(self, type=None, path=None): """:arg type: String with type of the file (e.g. other, other_db etc.) :arg path: Path to the file """ _createrepo_c.RepomdRecord.__init__(self, type, path) def compress_and_fill(self, hashtype, compresstype): rec = RepomdRecord(self.type + "_gz", None) _createrepo_c.RepomdRecord.compress_and_fill(self, rec, hashtype, compresstype) return rec # Sqlite class Sqlite = _createrepo_c.Sqlite class PrimarySqlite(Sqlite): def __init__(self, path): """:arg path: path to the primary.sqlite database""" Sqlite.__init__(self, path, DB_PRIMARY) class FilelistsSqlite(Sqlite): def __init__(self, path): """:arg path: Path to the filelists.sqlite database""" Sqlite.__init__(self, path, DB_FILELISTS) class OtherSqlite(Sqlite): def __init__(self, path): """:arg path: Path to the other.sqlite database""" Sqlite.__init__(self, path, DB_OTHER) # UpdateCollection class UpdateCollection = _createrepo_c.UpdateCollection # UpdateCollectionModule class UpdateCollectionModule = _createrepo_c.UpdateCollectionModule # UpdateCollectionPackage class UpdateCollectionPackage = _createrepo_c.UpdateCollectionPackage # UpdateInfo class class UpdateInfo(_createrepo_c.UpdateInfo): def __init__(self, path=None): """:arg path: Path to existing updateinfo.xml or None""" _createrepo_c.UpdateInfo.__init__(self) if path: xml_parse_updateinfo(path, self) # UpdateRecord class UpdateRecord = _createrepo_c.UpdateRecord # UpdateReference class UpdateReference = _createrepo_c.UpdateReference # XmlFile class XmlFile = _createrepo_c.XmlFile class PrimaryXmlFile(XmlFile): def __init__(self, path, compressiontype=GZ_COMPRESSION, contentstat=None): """:arg path: Path to the primary xml file :arg compressiontype: Compression type :arg contentstat: ContentStat object""" XmlFile.__init__(self, path, XMLFILE_PRIMARY, compressiontype, contentstat) class FilelistsXmlFile(XmlFile): def __init__(self, path, compressiontype=GZ_COMPRESSION, contentstat=None): """:arg path: Path to the filelists xml file :arg compressiontype: Compression type :arg contentstat: ContentStat object""" XmlFile.__init__(self, path, XMLFILE_FILELISTS, compressiontype, contentstat) class OtherXmlFile(XmlFile): def __init__(self, path, compressiontype=GZ_COMPRESSION, contentstat=None): """:arg path: Path to the other xml file :arg compressiontype: Compression type :arg contentstat: ContentStat object""" XmlFile.__init__(self, path, XMLFILE_OTHER, compressiontype, contentstat) class UpdateInfoXmlFile(XmlFile): def __init__(self, path, compressiontype=GZ_COMPRESSION, contentstat=None): """:arg path: Path to the updateinfo xml file :arg compressiontype: Compression type :arg contentstat: ContentStat object""" XmlFile.__init__(self, path, XMLFILE_UPDATEINFO, compressiontype, contentstat) # Functions def package_from_rpm(filename, checksum_type=SHA256, location_href=None, location_base=None, changelog_limit=10): """:class:`.Package` object from the rpm package""" return _createrepo_c.package_from_rpm(filename, checksum_type, location_href, location_base, changelog_limit) def xml_from_rpm(filename, checksum_type=SHA256, location_href=None, location_base=None, changelog_limit=10): """XML for the rpm package""" return _createrepo_c.xml_from_rpm(filename, checksum_type, location_href, location_base, changelog_limit) xml_dump_primary = _createrepo_c.xml_dump_primary xml_dump_filelists = _createrepo_c.xml_dump_filelists xml_dump_other = _createrepo_c.xml_dump_other xml_dump_updaterecord = _createrepo_c.xml_dump_updaterecord xml_dump = _createrepo_c.xml_dump def xml_parse_primary(path, newpkgcb=None, pkgcb=None, warningcb=None, do_files=1): """Parse primary.xml""" return _createrepo_c.xml_parse_primary(path, newpkgcb, pkgcb, warningcb, do_files) def xml_parse_filelists(path, newpkgcb=None, pkgcb=None, warningcb=None): """Parse filelists.xml""" return _createrepo_c.xml_parse_filelists(path, newpkgcb, pkgcb, warningcb) def xml_parse_other(path, newpkgcb=None, pkgcb=None, warningcb=None): """Parse other.xml""" return _createrepo_c.xml_parse_other(path, newpkgcb, pkgcb, warningcb) def xml_parse_primary_snippet(xml_string, newpkgcb=None, pkgcb=None, warningcb=None, do_files=1): """Parse the contents of primary.xml from a string""" return _createrepo_c.xml_parse_primary_snippet(xml_string, newpkgcb, pkgcb, warningcb, do_files) def xml_parse_filelists_snippet(xml_string, newpkgcb=None, pkgcb=None, warningcb=None): """Parse the contents of filelists.xml from a string""" return _createrepo_c.xml_parse_filelists_snippet(xml_string, newpkgcb, pkgcb, warningcb) def xml_parse_other_snippet(xml_string, newpkgcb=None, pkgcb=None, warningcb=None): """Parse the contents of other.xml from a string""" return _createrepo_c.xml_parse_other_snippet(xml_string, newpkgcb, pkgcb, warningcb) def xml_parse_updateinfo(path, updateinfoobj, warningcb=None): """Parse updateinfo.xml""" return _createrepo_c.xml_parse_updateinfo(path, updateinfoobj, warningcb) def xml_parse_repomd(path, repomdobj, warningcb=None): """Parse repomd.xml""" return _createrepo_c.xml_parse_repomd(path, repomdobj, warningcb) checksum_name_str = _createrepo_c.checksum_name_str checksum_type = _createrepo_c.checksum_type def compress_file(src, dst, comtype, stat=None): return _createrepo_c.compress_file_with_stat(src, dst, comtype, stat) def decompress_file(src, dst, comtype, stat=None): return _createrepo_c.decompress_file_with_stat(src, dst, comtype, stat) compression_suffix = _createrepo_c.compression_suffix detect_compression = _createrepo_c.detect_compression compression_type = _createrepo_c.compression_type # If we have been built as a Python package, e.g. "setup.py", this is where the binaries # will be located. _DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') # Where we will look for the binaries. Default to looking on the system PATH. _BIN_DIR = "" # If the following test succeeds, then look for binaries in the Python pkg "data" location. # If not, we probably were not built as a Python package (e.g. RPM, "cmake ..; make"). # In that case, let's just assume that the binary will be on the PATH somewhere. if os.path.exists(_DATA_DIR): _BIN_DIR = os.path.join(_DATA_DIR, 'bin') def _program(name, args): return subprocess.call([os.path.join(_BIN_DIR, name)] + args) def createrepo_c(): raise SystemExit(_program('createrepo_c', sys.argv[1:])) def mergerepo_c(): raise SystemExit(_program('mergerepo_c', sys.argv[1:])) def modifyrepo_c(): raise SystemExit(_program('modifyrepo_c', sys.argv[1:])) def sqliterepo_c(): raise SystemExit(_program('sqliterepo_c', sys.argv[1:])) createrepo_c-0.17.0/src/python/createrepo_cmodule.c000066400000000000000000000263511400672373200223620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012-2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include // from python #include "src/createrepo_c.h" #include "checksum-py.h" #include "compression_wrapper-py.h" #include "contentstat-py.h" #include "exception-py.h" #include "load_metadata-py.h" #include "locate_metadata-py.h" #include "misc-py.h" #include "package-py.h" #include "parsepkg-py.h" #include "repomd-py.h" #include "repomdrecord-py.h" #include "sqlite-py.h" #include "updatecollection-py.h" #include "updatecollectionmodule-py.h" #include "updatecollectionpackage-py.h" #include "updateinfo-py.h" #include "updaterecord-py.h" #include "updatereference-py.h" #include "xml_dump-py.h" #include "xml_file-py.h" #include "xml_parser-py.h" struct module_state { PyObject *error; }; static struct PyMethodDef createrepo_c_methods[] = { {"package_from_rpm", (PyCFunction)py_package_from_rpm, METH_VARARGS | METH_KEYWORDS, package_from_rpm__doc__}, {"xml_from_rpm", (PyCFunction)py_xml_from_rpm, METH_VARARGS | METH_KEYWORDS, xml_from_rpm__doc__}, {"xml_dump_primary", (PyCFunction)py_xml_dump_primary, METH_VARARGS, xml_dump_primary__doc__}, {"xml_dump_filelists", (PyCFunction)py_xml_dump_filelists, METH_VARARGS, xml_dump_filelists__doc__}, {"xml_dump_other", (PyCFunction)py_xml_dump_other, METH_VARARGS, xml_dump_other__doc__}, {"xml_dump_updaterecord", (PyCFunction)py_xml_dump_updaterecord, METH_VARARGS, xml_dump_updaterecord__doc__}, {"xml_dump", (PyCFunction)py_xml_dump, METH_VARARGS, xml_dump__doc__}, {"xml_parse_primary", (PyCFunction)py_xml_parse_primary, METH_VARARGS, xml_parse_primary__doc__}, {"xml_parse_primary_snippet",(PyCFunction)py_xml_parse_primary_snippet, METH_VARARGS, xml_parse_primary_snippet__doc__}, {"xml_parse_filelists", (PyCFunction)py_xml_parse_filelists, METH_VARARGS, xml_parse_filelists__doc__}, {"xml_parse_filelists_snippet",(PyCFunction)py_xml_parse_filelists_snippet, METH_VARARGS, xml_parse_filelists_snippet__doc__}, {"xml_parse_other", (PyCFunction)py_xml_parse_other, METH_VARARGS, xml_parse_other__doc__}, {"xml_parse_other_snippet",(PyCFunction)py_xml_parse_other_snippet, METH_VARARGS, xml_parse_other_snippet__doc__}, {"xml_parse_repomd", (PyCFunction)py_xml_parse_repomd, METH_VARARGS, xml_parse_repomd__doc__}, {"xml_parse_updateinfo", (PyCFunction)py_xml_parse_updateinfo, METH_VARARGS, xml_parse_updateinfo__doc__}, {"checksum_name_str", (PyCFunction)py_checksum_name_str, METH_VARARGS, checksum_name_str__doc__}, {"checksum_type", (PyCFunction)py_checksum_type, METH_VARARGS, checksum_type__doc__}, {"compress_file_with_stat", (PyCFunction)py_compress_file_with_stat, METH_VARARGS, compress_file_with_stat__doc__}, {"decompress_file_with_stat",(PyCFunction)py_decompress_file_with_stat, METH_VARARGS, decompress_file_with_stat__doc__}, {"compression_suffix", (PyCFunction)py_compression_suffix, METH_VARARGS, compression_suffix__doc__}, {"detect_compression", (PyCFunction)py_detect_compression, METH_VARARGS, detect_compression__doc__}, {"compression_type", (PyCFunction)py_compression_type, METH_VARARGS, compression_type__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; static struct PyModuleDef createrepo_c_module_def = { PyModuleDef_HEAD_INIT, "_createrepo_c", NULL, sizeof(struct module_state), createrepo_c_methods, NULL, NULL, NULL, NULL }; PyObject * PyInit__createrepo_c(void) { PyObject *m = PyModule_Create(&createrepo_c_module_def); if (!m) return NULL; /* Exceptions */ if (!init_exceptions()) return NULL; PyModule_AddObject(m, "CreaterepoCError", CrErr_Exception); /* Objects */ /* _createrepo_c.ContentStat */ if (PyType_Ready(&ContentStat_Type) < 0) return NULL; Py_INCREF(&ContentStat_Type); PyModule_AddObject(m, "ContentStat", (PyObject *)&ContentStat_Type); /* _createrepo_c.CrFile */ if (PyType_Ready(&CrFile_Type) < 0) return NULL; Py_INCREF(&CrFile_Type); PyModule_AddObject(m, "CrFile", (PyObject *)&CrFile_Type); /* _createrepo_c.Package */ if (PyType_Ready(&Package_Type) < 0) return NULL; Py_INCREF(&Package_Type); PyModule_AddObject(m, "Package", (PyObject *)&Package_Type); /* _createrepo_c.Metadata */ if (PyType_Ready(&Metadata_Type) < 0) return NULL; Py_INCREF(&Metadata_Type); PyModule_AddObject(m, "Metadata", (PyObject *)&Metadata_Type); /* _createrepo_c.MetadataLocation */ if (PyType_Ready(&MetadataLocation_Type) < 0) return NULL; Py_INCREF(&MetadataLocation_Type); PyModule_AddObject(m, "MetadataLocation", (PyObject *)&MetadataLocation_Type); /* _createrepo_c.Repomd */ if (PyType_Ready(&Repomd_Type) < 0) return NULL; Py_INCREF(&Repomd_Type); PyModule_AddObject(m, "Repomd", (PyObject *)&Repomd_Type); /* _createrepo_c.RepomdRecord */ if (PyType_Ready(&RepomdRecord_Type) < 0) return NULL; Py_INCREF(&RepomdRecord_Type); PyModule_AddObject(m, "RepomdRecord", (PyObject *)&RepomdRecord_Type); /* _createrepo_c.Sqlite */ if (PyType_Ready(&Sqlite_Type) < 0) return NULL; Py_INCREF(&Sqlite_Type); PyModule_AddObject(m, "Sqlite", (PyObject *)&Sqlite_Type); /* _createrepo_c.UpdateCollection */ if (PyType_Ready(&UpdateCollection_Type) < 0) return NULL; Py_INCREF(&UpdateCollection_Type); PyModule_AddObject(m, "UpdateCollection", (PyObject *)&UpdateCollection_Type); /* _createrepo_c.UpdateCollectionModule */ if (PyType_Ready(&UpdateCollectionModule_Type) < 0) return NULL; Py_INCREF(&UpdateCollectionModule_Type); PyModule_AddObject(m, "UpdateCollectionModule", (PyObject *)&UpdateCollectionModule_Type); /* _createrepo_c.UpdateCollectionPackage */ if (PyType_Ready(&UpdateCollectionPackage_Type) < 0) return NULL; Py_INCREF(&UpdateCollectionPackage_Type); PyModule_AddObject(m, "UpdateCollectionPackage", (PyObject *)&UpdateCollectionPackage_Type); /* _createrepo_c.UpdateInfo */ if (PyType_Ready(&UpdateInfo_Type) < 0) return NULL; Py_INCREF(&UpdateInfo_Type); PyModule_AddObject(m, "UpdateInfo", (PyObject *)&UpdateInfo_Type); /* _createrepo_c.UpdateRecord */ if (PyType_Ready(&UpdateRecord_Type) < 0) return NULL; Py_INCREF(&UpdateRecord_Type); PyModule_AddObject(m, "UpdateRecord", (PyObject *)&UpdateRecord_Type); /* _createrepo_c.UpdateReference */ if (PyType_Ready(&UpdateReference_Type) < 0) return NULL; Py_INCREF(&UpdateReference_Type); PyModule_AddObject(m, "UpdateReference", (PyObject *)&UpdateReference_Type); /* _createrepo_c.XmlFile */ if (PyType_Ready(&XmlFile_Type) < 0) return NULL; Py_INCREF(&XmlFile_Type); PyModule_AddObject(m, "XmlFile", (PyObject *)&XmlFile_Type); /* Createrepo init */ cr_xml_dump_init(); cr_package_parser_init(); Py_AtExit(cr_xml_dump_cleanup); Py_AtExit(cr_package_parser_cleanup); /* Python macro to use datetime objects */ PyDateTime_IMPORT; /* Module constants */ /* Version */ PyModule_AddIntConstant(m, "VERSION_MAJOR", CR_VERSION_MAJOR); PyModule_AddIntConstant(m, "VERSION_MINOR", CR_VERSION_MINOR); PyModule_AddIntConstant(m, "VERSION_PATCH", CR_VERSION_PATCH); /* Checksum types */ PyModule_AddIntConstant(m, "CHECKSUM_UNKNOWN", CR_CHECKSUM_UNKNOWN); PyModule_AddIntConstant(m, "MD5", CR_CHECKSUM_MD5); PyModule_AddIntConstant(m, "SHA", CR_CHECKSUM_SHA); PyModule_AddIntConstant(m, "SHA1", CR_CHECKSUM_SHA1); PyModule_AddIntConstant(m, "SHA224", CR_CHECKSUM_SHA224); PyModule_AddIntConstant(m, "SHA256", CR_CHECKSUM_SHA256); PyModule_AddIntConstant(m, "SHA384", CR_CHECKSUM_SHA384); PyModule_AddIntConstant(m, "SHA512", CR_CHECKSUM_SHA512); /* File open modes */ PyModule_AddIntConstant(m, "MODE_READ", CR_CW_MODE_READ); PyModule_AddIntConstant(m, "MODE_WRITE", CR_CW_MODE_WRITE); /* Compression types */ PyModule_AddIntConstant(m, "AUTO_DETECT_COMPRESSION", CR_CW_AUTO_DETECT_COMPRESSION); PyModule_AddIntConstant(m, "UNKNOWN_COMPRESSION", CR_CW_UNKNOWN_COMPRESSION); PyModule_AddIntConstant(m, "NO_COMPRESSION", CR_CW_NO_COMPRESSION); PyModule_AddIntConstant(m, "GZ_COMPRESSION", CR_CW_GZ_COMPRESSION); PyModule_AddIntConstant(m, "BZ2_COMPRESSION", CR_CW_BZ2_COMPRESSION); PyModule_AddIntConstant(m, "XZ_COMPRESSION", CR_CW_XZ_COMPRESSION); PyModule_AddIntConstant(m, "ZCK_COMPRESSION", CR_CW_ZCK_COMPRESSION); /* Zchunk support */ #ifdef WITH_ZCHUNK PyModule_AddIntConstant(m, "HAS_ZCK", 1); #else PyModule_AddIntConstant(m, "HAS_ZCK", 0); #endif // WITH_ZCHUNK /* Load Metadata key values */ PyModule_AddIntConstant(m, "HT_KEY_DEFAULT", CR_HT_KEY_DEFAULT); PyModule_AddIntConstant(m, "HT_KEY_HASH", CR_HT_KEY_HASH); PyModule_AddIntConstant(m, "HT_KEY_NAME", CR_HT_KEY_NAME); PyModule_AddIntConstant(m, "HT_KEY_FILENAME", CR_HT_KEY_FILENAME); /* Load Metadata key dup action */ PyModule_AddIntConstant(m, "HT_DUPACT_KEEPFIRST", CR_HT_DUPACT_KEEPFIRST); PyModule_AddIntConstant(m, "HT_DUPACT_REMOVEALL", CR_HT_DUPACT_REMOVEALL); /* Sqlite DB types */ PyModule_AddIntConstant(m, "DB_PRIMARY", CR_DB_PRIMARY); PyModule_AddIntConstant(m, "DB_FILELISTS", CR_DB_FILELISTS); PyModule_AddIntConstant(m, "DB_OTHER", CR_DB_OTHER); /* XmlFile types */ PyModule_AddIntConstant(m, "XMLFILE_PRIMARY", CR_XMLFILE_PRIMARY); PyModule_AddIntConstant(m, "XMLFILE_FILELISTS", CR_XMLFILE_FILELISTS); PyModule_AddIntConstant(m, "XMLFILE_OTHER", CR_XMLFILE_OTHER); PyModule_AddIntConstant(m, "XMLFILE_PRESTODELTA", CR_XMLFILE_PRESTODELTA); PyModule_AddIntConstant(m, "XMLFILE_UPDATEINFO", CR_XMLFILE_UPDATEINFO); /* XmlParser types */ PyModule_AddIntConstant(m, "XML_WARNING_UNKNOWNTAG", CR_XML_WARNING_UNKNOWNTAG); PyModule_AddIntConstant(m, "XML_WARNING_MISSINGATTR", CR_XML_WARNING_MISSINGATTR); PyModule_AddIntConstant(m, "XML_WARNING_UNKNOWNVAL", CR_XML_WARNING_UNKNOWNVAL); PyModule_AddIntConstant(m, "XML_WARNING_BADATTRVAL", CR_XML_WARNING_BADATTRVAL); return m; } createrepo_c-0.17.0/src/python/exception-py.c000066400000000000000000000047251400672373200211460ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012-2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "exception-py.h" PyObject *CrErr_Exception = NULL; int init_exceptions() { CrErr_Exception = PyErr_NewExceptionWithDoc("createrepo_c.CreaterepoCError", "Createrepo_c library exception", NULL, NULL); if (!CrErr_Exception) return 0; Py_INCREF(CrErr_Exception); return 1; } void nice_exception(GError **err, const char *format, ...) { int ret; va_list vl; gchar *message, *usr_message = NULL; PyObject *exception; if (format) { // Prepare user message va_start(vl, format); ret = g_vasprintf(&usr_message, format, vl); va_end(vl); if (ret < 0) { // vasprintf failed - silently ignore this error g_free(usr_message); usr_message = NULL; } } // Prepare whole error message if (usr_message) message = g_strdup_printf("%s%s", usr_message, (*err)->message); else message = g_strdup((*err)->message); g_free(usr_message); // Select appropriate exception switch ((*err)->code) { case CRE_IO: case CRE_STAT: case CRE_NOFILE: case CRE_NODIR: case CRE_EXISTS: exception = PyExc_IOError; break; case CRE_MEMORY: exception = PyExc_MemoryError; break; case CRE_BADARG: exception = PyExc_ValueError; break; default: exception = CrErr_Exception; } g_clear_error(err); // Set exception PyErr_SetString(exception, message); g_free(message); } createrepo_c-0.17.0/src/python/exception-py.h000066400000000000000000000024331400672373200211450ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012-2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_EXCEPTION_PY_H #define CR_EXCEPTION_PY_H #include "src/createrepo_c.h" extern PyObject *CrErr_Exception; int init_exceptions(); /* Set exception by its return code (e.g., for CRE_IO, CRE_NOFILE, etc. will * be used a build-in python IOError exception) and free the GError. * @param err GError **, must be != NULL * @param format Prefix for the error message. */ void nice_exception(GError **err, const char *format, ...); #endif createrepo_c-0.17.0/src/python/load_metadata-py.c000066400000000000000000000235351400672373200217270ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "load_metadata-py.h" #include "locate_metadata-py.h" #include "package-py.h" #include "exception-py.h" #include "typeconversion.h" /* TODO: * keys() and records() method (same method - alias only) **/ typedef struct { PyObject_HEAD cr_Metadata *md; } _MetadataObject; static int check_MetadataStatus(const _MetadataObject *self) { assert(self != NULL); assert(MetadataObject_Check(self)); if (self->md == NULL) { PyErr_SetString(PyExc_TypeError, "Improper createrepo_c Metadata object."); return -1; } return 0; } /* Function on the type */ static PyObject * metadata_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _MetadataObject *self = (_MetadataObject *)type->tp_alloc(type, 0); if (self) self->md = NULL; return (PyObject *)self; } PyDoc_STRVAR(metadata_init__doc__, ".. method:: __init__(key=HT_KEY_DEFAULT, use_single_chunk=False, pkglist=[])\n\n" " :arg key: Which value should be used as a key. One of HT_KEY_* constants.\n" " :arg use_single_chunk: Specify if all package strings should be stored\n" " in metadata object instead of package iself. This save some\n" " space if you need to have a all packages loaded into a memory.\n" " :arg pkglist: Package list that specify which packages shloud be\n" " loaded. Use its base filename (e.g.\n" " \"GConf2-3.2.6-6.fc19.i686.rpm\").\n"); static int metadata_init(_MetadataObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = { "key", "use_single_chunk", "pkglist", NULL }; int key = CR_HT_KEY_DEFAULT; int use_single_chunk = 0; PyObject *py_pkglist = NULL; GSList *pkglist = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiO!:metadata_init", kwlist, &key, &use_single_chunk, &PyList_Type, &py_pkglist)) return -1; /* Free all previous resources when reinitialization */ if (self->md) { cr_metadata_free(self->md); } /* Init */ pkglist = GSList_FromPyList_Str(py_pkglist); self->md = cr_metadata_new(key, use_single_chunk, pkglist); g_slist_free(pkglist); if (self->md == NULL) { PyErr_SetString(CrErr_Exception, "Metadata initialization failed"); return -1; } return 0; } static void metadata_dealloc(_MetadataObject *self) { if (self->md) cr_metadata_free(self->md); Py_TYPE(self)->tp_free(self); } static PyObject * metadata_repr(G_GNUC_UNUSED _MetadataObject *self) { return PyUnicode_FromFormat(""); } /* Getters */ static PyObject * get_key(_MetadataObject *self, G_GNUC_UNUSED void *nothing) { if (check_MetadataStatus(self)) return NULL; cr_HashTableKey val = cr_metadata_key(self->md); return PyLong_FromLong((long) val); } static PyGetSetDef metadata_getsetters[] = { {"key", (getter)get_key, NULL, "Type of used key", NULL}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Metadata methods */ PyDoc_STRVAR(load_xml__doc__, "load_xml(metadata_location_object) -> None\n\n" "Load XML specified by MetadataLocation Object"); static PyObject * load_xml(_MetadataObject *self, PyObject *args) { PyObject *ml; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "O!:load_xml", &MetadataLocation_Type, &ml)) return NULL; if (check_MetadataStatus(self)) return NULL; if (cr_metadata_load_xml(self->md, MetadataLocation_FromPyObject(ml), &tmp_err) != CRE_OK) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(locate_and_load_xml__doc__, "locate_and_load_xml(path) -> None" "Load XML specified by path"); static PyObject * locate_and_load_xml(_MetadataObject *self, PyObject *args) { char *path; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "s:locate_and_load_xml", &path)) return NULL; if (check_MetadataStatus(self)) return NULL; cr_metadata_locate_and_load_xml(self->md, path, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } /* Hashtable methods */ PyDoc_STRVAR(len__doc__, "len() -> long\n\n" "Number of packages"); static PyObject * ht_len(_MetadataObject *self, G_GNUC_UNUSED PyObject *noarg) { unsigned long len = 0; if (check_MetadataStatus(self)) return NULL; GHashTable *ht = cr_metadata_hashtable(self->md); if (ht) len = (unsigned long) g_hash_table_size(ht); return PyLong_FromUnsignedLong(len); } /* static PyObject * ht_add(_MetadataObject *self, PyObject *args) { char *key; PyObject *py_pkg; cr_Package *pkg; if (!PyArg_ParseTuple(args, "sO!:add", &key, &Package_Type, &pkg)) return NULL; if (check_MetadataHashtableStatus(self)) return NULL; pkg = Package_FromPyObject(pkg); if (!pkg) Py_RETURN_NONE; Py_XINCREF(py_pkg); // XXX: Store referenced object for Py_XDECREF!!!!! g_hash_table_replace(self->md->ht, key, pkg); Py_RETURN_NONE; } */ PyDoc_STRVAR(has_key__doc__, "has_key(key) -> bool\n\n" "Test if metadata contains the key"); static PyObject * ht_has_key(_MetadataObject *self, PyObject *args) { char *key; if (!PyArg_ParseTuple(args, "s:has_key", &key)) return NULL; if (check_MetadataStatus(self)) return NULL; if (g_hash_table_lookup(cr_metadata_hashtable(self->md), key)) Py_RETURN_TRUE; Py_RETURN_FALSE; } PyDoc_STRVAR(keys__doc__, "keys() -> list\n\n" "List of all keys"); static PyObject * ht_keys(_MetadataObject *self, G_GNUC_UNUSED PyObject *args) { if (check_MetadataStatus(self)) return NULL; GList *keys = g_hash_table_get_keys(cr_metadata_hashtable(self->md)); PyObject *list = PyList_New(0); for (GList *elem = keys; elem; elem = g_list_next(elem)) { PyObject *py_str = PyUnicode_FromString(elem->data); assert(py_str); if (PyList_Append(list, py_str) == -1) { Py_XDECREF(list); g_list_free(keys); return NULL; } Py_DECREF(py_str); } g_list_free(keys); return list; } PyDoc_STRVAR(remove__doc__, "remove(key) -> bool\n\n" "Remove package which has a key key from the metadata"); static PyObject * ht_remove(_MetadataObject *self, PyObject *args) { char *key; if (!PyArg_ParseTuple(args, "s:del", &key)) return NULL; if (check_MetadataStatus(self)) return NULL; if (g_hash_table_remove(cr_metadata_hashtable(self->md), key)) Py_RETURN_TRUE; Py_RETURN_FALSE; } PyDoc_STRVAR(get__doc__, "get(key) -> Package\n\n" "Get Package which has a key key"); static PyObject * ht_get(_MetadataObject *self, PyObject *args) { char *key; if (!PyArg_ParseTuple(args, "s:get", &key)) return NULL; if (check_MetadataStatus(self)) return NULL; cr_Package *pkg = g_hash_table_lookup(cr_metadata_hashtable(self->md), key); if (!pkg) Py_RETURN_NONE; return (Object_FromPackage_WithParent(pkg, 0, (PyObject *) self)); } PyDoc_STRVAR(metadata_dupaction__doc__, ".. method:: dupaction(dupaction)\n\n" " :arg dupation: What to do when we encounter already existing key.\n" " use constants prefixed with HT_DUPACT_. I.e. \n" " HT_DUPACT_KEEPFIRST, HT_DUPACT_REMOVEALL.\n"); static PyObject * metadata_dupaction(_MetadataObject *self, PyObject *args) { int dupaction; gboolean res = TRUE; if (!PyArg_ParseTuple(args, "i:dupaction", &dupaction)) return NULL; res = cr_metadata_set_dupaction(self->md, dupaction); if (!res) { PyErr_SetString(CrErr_Exception, "Cannot set specified action"); return NULL; } Py_RETURN_NONE; } static struct PyMethodDef metadata_methods[] = { {"load_xml", (PyCFunction)load_xml, METH_VARARGS, load_xml__doc__}, {"locate_and_load_xml", (PyCFunction)locate_and_load_xml, METH_VARARGS, locate_and_load_xml__doc__}, {"len", (PyCFunction)ht_len, METH_NOARGS, len__doc__}, // {"add", (PyCFunction)ht_add, METH_VARARGS, NULL}, {"has_key", (PyCFunction)ht_has_key, METH_VARARGS, has_key__doc__}, {"keys", (PyCFunction)ht_keys, METH_NOARGS, keys__doc__}, {"remove", (PyCFunction)ht_remove, METH_VARARGS, remove__doc__}, {"get", (PyCFunction)ht_get, METH_VARARGS, get__doc__}, {"dupaction",(PyCFunction)metadata_dupaction, METH_VARARGS, metadata_dupaction__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Object */ PyTypeObject Metadata_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.Metadata", .tp_basicsize = sizeof(_MetadataObject), .tp_dealloc = (destructor)metadata_dealloc, .tp_repr = (reprfunc)metadata_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = metadata_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = metadata_methods, .tp_getset = metadata_getsetters, .tp_init = (initproc)metadata_init, .tp_new = metadata_new, }; createrepo_c-0.17.0/src/python/load_metadata-py.h000066400000000000000000000020231400672373200217210ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_LOAD_METADATA_PY_H #define CR_LOAD_METADATA_PY_H #include "src/createrepo_c.h" extern PyTypeObject Metadata_Type; #define MetadataObject_Check(o) PyObject_TypeCheck(o, &Metadata_Type) #endif createrepo_c-0.17.0/src/python/locate_metadata-py.c000066400000000000000000000161601400672373200222530ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "locate_metadata-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD struct cr_MetadataLocation *ml; } _MetadataLocationObject; struct cr_MetadataLocation * MetadataLocation_FromPyObject(PyObject *o) { if (!MetadataLocationObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a createrepo_c.MetadataLocation object."); return NULL; } return ((_MetadataLocationObject *) o)->ml; } static int check_MetadataLocationStatus(const _MetadataLocationObject *self) { assert(self != NULL); assert(MetadataLocationObject_Check(self)); if (self->ml == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c MetadataLocation object."); return -1; } return 0; } /* Function on the type */ static PyObject * metadatalocation_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _MetadataLocationObject *self = (_MetadataLocationObject *)type->tp_alloc(type, 0); if (self) self->ml = NULL; return (PyObject *)self; } PyDoc_STRVAR(metadatalocation_init__doc__, "Class representing location of metadata\n\n" ".. method:: __init__(path, ignore_db)\n\n" " :arg path: String with url/path to the repository\n" " :arg ignore_db: Boolean. If False then in case of remote repository\n" " databases will not be downloaded)\n"); static int metadatalocation_init(_MetadataLocationObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { char *repopath; PyObject *py_ignore_db = NULL; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sO|:metadatalocation_init", &repopath, &py_ignore_db)) return -1; /* Free all previous resources when reinitialization */ if (self->ml) { cr_metadatalocation_free(self->ml); } /* Init */ self->ml = cr_locate_metadata(repopath, PyObject_IsTrue(py_ignore_db), &tmp_err); if (tmp_err) { g_clear_pointer(&(self->ml), cr_metadatalocation_free); nice_exception(&tmp_err, NULL); return -1; } return 0; } static void metadatalocation_dealloc(_MetadataLocationObject *self) { if (self->ml) cr_metadatalocation_free(self->ml); Py_TYPE(self)->tp_free(self); } static PyObject * metadatalocation_repr(G_GNUC_UNUSED _MetadataLocationObject *self) { return PyUnicode_FromFormat(""); } /* MetadataLocation methods */ static struct PyMethodDef metadatalocation_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; /* Mapping interface */ Py_ssize_t length(_MetadataLocationObject *self) { if (self->ml) return 9; return 0; } PyObject * getitem(_MetadataLocationObject *self, PyObject *pykey) { char *key, *value; if (check_MetadataLocationStatus(self)) return NULL; if (!PyUnicode_Check(pykey) && !PyBytes_Check(pykey)) { PyErr_SetString(PyExc_TypeError, "Unicode or bytes expected!"); return NULL; } pykey = PyObject_ToPyBytesOrNull(pykey); if (!pykey) { return NULL; } key = PyBytes_AsString(pykey); value = NULL; if (!strcmp(key, "primary")) { value = self->ml->pri_xml_href; } else if (!strcmp(key, "filelists")) { value = self->ml->fil_xml_href; } else if (!strcmp(key, "other")) { value = self->ml->oth_xml_href; } else if (!strcmp(key, "primary_db")) { value = self->ml->pri_sqlite_href; } else if (!strcmp(key, "filelists_db")) { value = self->ml->fil_sqlite_href; } else if (!strcmp(key, "other_db")) { value = self->ml->oth_sqlite_href; } else if (!strcmp(key, "group")) { //NOTE(amatej): Preserve old API for these specific files (group, group_gz, updateinfo) if (self->ml->additional_metadata){ GSList *m = g_slist_find_custom(self->ml->additional_metadata, "group", cr_cmp_metadatum_type); if (m) value = ((cr_Metadatum *)(m->data))->name; } } else if (!strcmp(key, "group_gz")) { if (self->ml->additional_metadata){ GSList *m = g_slist_find_custom(self->ml->additional_metadata, "group_gz", cr_cmp_metadatum_type); if (m) value = ((cr_Metadatum *)(m->data))->name; } } else if (!strcmp(key, "updateinfo")) { if (self->ml->additional_metadata){ GSList *m = g_slist_find_custom(self->ml->additional_metadata, "updateinfo", cr_cmp_metadatum_type); if (m) value = ((cr_Metadatum *)(m->data))->name; } } else if (!strcmp(key, "additional_metadata")){ if (self->ml->additional_metadata){ PyObject *list = PyList_New(0); if (!list) { Py_XDECREF(pykey); return NULL; } for (GSList *elem = self->ml->additional_metadata; elem; elem=g_slist_next(elem)){ PyObject *namestr = PyUnicode_FromString(((cr_Metadatum *)(elem->data))->name); if (!namestr || PyList_Append(list, namestr)) { Py_DECREF(list); Py_XDECREF(pykey); return NULL; } else { Py_DECREF(namestr); } } Py_XDECREF(pykey); return list; } } Py_XDECREF(pykey); if (value) { return PyUnicode_FromString(value); } else { Py_RETURN_NONE; } } static PyMappingMethods mapping_methods = { .mp_length = (lenfunc) length, .mp_subscript = (binaryfunc) getitem, .mp_ass_subscript = NULL, }; /* Object */ PyTypeObject MetadataLocation_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.MetadataLocation", .tp_basicsize = sizeof(_MetadataLocationObject), .tp_dealloc = (destructor)metadatalocation_dealloc, .tp_repr = (reprfunc)metadatalocation_repr, .tp_as_mapping = &mapping_methods, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = metadatalocation_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = metadatalocation_methods, .tp_init = (initproc)metadatalocation_init, .tp_new = metadatalocation_new, }; createrepo_c-0.17.0/src/python/locate_metadata-py.h000066400000000000000000000021671400672373200222620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_LOCATE_METADATA_PY_H #define CR_LOCATE_METADATA_PY_H #include "src/createrepo_c.h" extern PyTypeObject MetadataLocation_Type; #define MetadataLocationObject_Check(o) PyObject_TypeCheck(o, &MetadataLocation_Type) struct cr_MetadataLocation *MetadataLocation_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/misc-py.c000066400000000000000000000050151400672373200200740ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "src/createrepo_c.h" #include "typeconversion.h" #include "exception-py.h" #include "misc-py.h" #include "contentstat-py.h" PyObject * py_compress_file_with_stat(G_GNUC_UNUSED PyObject *self, PyObject *args) { int type; char *src, *dst; PyObject *py_contentstat = NULL; cr_ContentStat *contentstat; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sziO:py_compress_file", &src, &dst, &type, &py_contentstat)) return NULL; if (!py_contentstat || py_contentstat == Py_None) { contentstat = NULL; } else { contentstat = ContentStat_FromPyObject(py_contentstat); if (!contentstat) return NULL; } cr_compress_file_with_stat(src, dst, type, contentstat, NULL, FALSE, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_decompress_file_with_stat(G_GNUC_UNUSED PyObject *self, PyObject *args) { int type; char *src, *dst; PyObject *py_contentstat = NULL; cr_ContentStat *contentstat; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sziO:py_decompress_file", &src, &dst, &type, &py_contentstat)) return NULL; if (!py_contentstat || py_contentstat == Py_None) { contentstat = NULL; } else { contentstat = ContentStat_FromPyObject(py_contentstat); if (!contentstat) return NULL; } cr_decompress_file_with_stat(src, dst, type, contentstat, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } createrepo_c-0.17.0/src/python/misc-py.h000066400000000000000000000027271400672373200201100ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_MISC_PY_H #define CR_MISC_PY_H #include "src/createrepo_c.h" PyDoc_STRVAR(compress_file_with_stat__doc__, "compress_file_with_stat(source, destination, compression_type, " "contentstat_object) -> None\n\n" "Compress file. destination and contentstat_object could be None"); PyObject *py_compress_file_with_stat(PyObject *self, PyObject *args); PyDoc_STRVAR(decompress_file_with_stat__doc__, "decompress_file_with_stat(source, destination, compression_type, " "contentstat_object) -> None\n\n" "Decompress file. destination and contentstat_object could be None"); PyObject *py_decompress_file_with_stat(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/python/package-py.c000066400000000000000000000463661400672373200205520ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "package-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_Package *package; int free_on_destroy; PyObject *parent; } _PackageObject; cr_Package * Package_FromPyObject(PyObject *o) { if (!PackageObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a createrepo_c.Package object."); return NULL; } return ((_PackageObject *)o)->package; } PyObject * Object_FromPackage(cr_Package *pkg, int free_on_destroy) { PyObject *pypkg; if (!pkg) { PyErr_SetString(PyExc_ValueError, "Expected a cr_Package pointer not NULL."); return NULL; } pypkg = PyObject_CallObject((PyObject*)&Package_Type, NULL); // XXX: Remove empty package in pypkg and replace it with pkg cr_package_free(((_PackageObject *)pypkg)->package); ((_PackageObject *)pypkg)->package = pkg; ((_PackageObject *)pypkg)->free_on_destroy = free_on_destroy; ((_PackageObject *)pypkg)->parent = NULL; return pypkg; } PyObject * Object_FromPackage_WithParent(cr_Package *pkg, int free_on_destroy, PyObject *parent) { PyObject *pypkg; pypkg = Object_FromPackage(pkg, free_on_destroy); if (pypkg) { ((_PackageObject *)pypkg)->parent = parent; Py_XINCREF(parent); } return pypkg; } static int check_PackageStatus(const _PackageObject *self) { assert(self != NULL); assert(PackageObject_Check(self)); if (self->package == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c Package object."); return -1; } return 0; } /* Function on the type */ static PyObject * package_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _PackageObject *self = (_PackageObject *)type->tp_alloc(type, 0); if (self) { self->package = NULL; self->free_on_destroy = 1; self->parent = NULL; } return (PyObject *)self; } PyDoc_STRVAR(package_init__doc__, "Package object\n\n" ".. method:: __init__()\n\n" " Default constructor\n"); static int package_init(_PackageObject *self, PyObject *args, PyObject *kwds) { char *kwlist[] = {NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|:package_init", kwlist)) return -1; if (self->package && self->free_on_destroy) // reinitialization by __init__() cr_package_free(self->package); if (self->parent) { Py_DECREF(self->parent); self->parent = NULL; } self->package = cr_package_new(); if (self->package == NULL) { PyErr_SetString(CrErr_Exception, "Package initialization failed"); return -1; } return 0; } static void package_dealloc(_PackageObject *self) { if (self->package && self->free_on_destroy) cr_package_free(self->package); if (self->parent) { Py_DECREF(self->parent); self->parent = NULL; } Py_TYPE(self)->tp_free(self); } static PyObject * package_repr(_PackageObject *self) { cr_Package *pkg = self->package; PyObject *repr; if (pkg) { repr = PyUnicode_FromFormat("", (pkg->pkgId ? pkg->pkgId : "-"), (pkg->name ? pkg->name : "-")); } else { repr = PyUnicode_FromFormat(""); } return repr; } static PyObject * package_str(_PackageObject *self) { PyObject *ret; if (check_PackageStatus(self)) return NULL; if (self->package) { char *nevra = cr_package_nvra(self->package); ret = PyUnicode_FromString(nevra); free(nevra); } else { ret = PyUnicode_FromString("-"); } return ret; } /* Package methods */ PyDoc_STRVAR(nvra__doc__, "nvra() -> str\n\n" "Package NVRA string (Name-Version-Release-Architecture)"); static PyObject * nvra(_PackageObject *self, G_GNUC_UNUSED void *nothing) { PyObject *pystr; if (check_PackageStatus(self)) return NULL; char *nvra = cr_package_nvra(self->package); pystr = PyUnicodeOrNone_FromString(nvra); free(nvra); return pystr; } PyDoc_STRVAR(nevra__doc__, "nevra() -> str\n\n" "Package NEVRA string (Name-Epoch-Version-Release-Architecture)"); static PyObject * nevra(_PackageObject *self, G_GNUC_UNUSED void *nothing) { PyObject *pystr; if (check_PackageStatus(self)) return NULL; char *nevra = cr_package_nevra(self->package); pystr = PyUnicodeOrNone_FromString(nevra); free(nevra); return pystr; } PyDoc_STRVAR(copy__doc__, "copy() -> Package\n\n" "Copy of the package object"); static PyObject * copy_pkg(_PackageObject *self, G_GNUC_UNUSED void *nothing) { if (check_PackageStatus(self)) return NULL; return Object_FromPackage(cr_package_copy(self->package), 1); } static PyObject * deepcopy_pkg(_PackageObject *self, PyObject *args) { PyObject *obj; if (!PyArg_ParseTuple(args, "O:deepcopy_pkg", &obj)) return NULL; if (check_PackageStatus(self)) return NULL; return Object_FromPackage(cr_package_copy(self->package), 1); } static struct PyMethodDef package_methods[] = { {"nvra", (PyCFunction)nvra, METH_NOARGS, nvra__doc__}, {"nevra", (PyCFunction)nevra, METH_NOARGS, nevra__doc__}, {"copy", (PyCFunction)copy_pkg, METH_NOARGS, copy__doc__}, {"__copy__", (PyCFunction)copy_pkg, METH_NOARGS, copy__doc__}, {"__deepcopy__", (PyCFunction)deepcopy_pkg, METH_VARARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Getters */ static PyObject * get_num(_PackageObject *self, void *member_offset) { if (check_PackageStatus(self)) return NULL; cr_Package *pkg = self->package; gint64 val = (gint64) *((gint64 *) ((size_t)pkg + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_str(_PackageObject *self, void *member_offset) { if (check_PackageStatus(self)) return NULL; cr_Package *pkg = self->package; char *str = *((char **) ((size_t) pkg + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } /** Return offset of a selected member of cr_Package structure. */ #define OFFSET(member) (void *) offsetof(cr_Package, member) /** Convert C object to PyObject. * @param C object * @return PyObject representation */ typedef PyObject *(*ConversionFromFunc)(void *); /** Check an element from a list if has a valid format. * @param a single list element * @return 0 if ok, 1 otherwise */ typedef int (*ConversionToCheckFunc)(PyObject *); /** Convert PyObject to C representation. * @param PyObject * @return C representation */ typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *); /* Pre-Declaration for check functions */ static int CheckPyDependency(PyObject *dep); static int CheckPyPackageFile(PyObject *dep); static int CheckPyChangelogEntry(PyObject *dep); typedef struct { size_t offset; /*!< Ofset of the list in cr_Package */ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */ ConversionToFunc t; /*!< Conversion func to C object from PyObject */ } ListConvertor; /** List of convertors for converting a lists in cr_Package. */ static ListConvertor list_convertors[] = { { offsetof(cr_Package, requires), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, provides), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, conflicts), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, obsoletes), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, suggests), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, enhances), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, recommends), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, supplements), (ConversionFromFunc) PyObject_FromDependency, (ConversionToCheckFunc) CheckPyDependency, (ConversionToFunc) PyObject_ToDependency }, { offsetof(cr_Package, files), (ConversionFromFunc) PyObject_FromPackageFile, (ConversionToCheckFunc) CheckPyPackageFile, (ConversionToFunc) PyObject_ToPackageFile }, { offsetof(cr_Package, changelogs), (ConversionFromFunc) PyObject_FromChangelogEntry, (ConversionToCheckFunc) CheckPyChangelogEntry, (ConversionToFunc) PyObject_ToChangelogEntry }, }; static PyObject * get_list(_PackageObject *self, void *conv) { ListConvertor *convertor = conv; PyObject *list; cr_Package *pkg = self->package; GSList *glist = *((GSList **) ((size_t) pkg + (size_t) convertor->offset)); if (check_PackageStatus(self)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (GSList *elem = glist; elem; elem = g_slist_next(elem)) { PyObject *obj = convertor->f(elem->data); if (!obj) continue; PyList_Append(list, obj); Py_DECREF(obj); } return list; } /* Setters */ static int set_num(_PackageObject *self, PyObject *value, void *member_offset) { gint64 val; if (check_PackageStatus(self)) return -1; if (PyLong_Check(value)) { val = (gint64) PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (gint64) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_Package *pkg = self->package; *((gint64 *) ((size_t) pkg + (size_t) member_offset)) = val; return 0; } static int set_str(_PackageObject *self, PyObject *value, void *member_offset) { if (check_PackageStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_Package *pkg = self->package; if (value == Py_None) { // If value is None exist right now (avoid possibly // creation of a string chunk) *((char **) ((size_t) pkg + (size_t) member_offset)) = NULL; return 0; } // Check if chunk exits // If it doesn't - this is package from loaded metadata and all its // strings are in a metadata common chunk (cr_Metadata->chunk). // In this case, we have to create a chunk for this package before // inserting a new string. if (!pkg->chunk) pkg->chunk = g_string_chunk_new(0); char *str = PyObject_ToChunkedString(value, pkg->chunk); *((char **) ((size_t) pkg + (size_t) member_offset)) = str; return 0; } static int CheckPyDependency(PyObject *dep) { if (!PyTuple_Check(dep) || PyTuple_Size(dep) != 6) { PyErr_SetString(PyExc_TypeError, "Element of list has to be a tuple with 6 items."); return 1; } return 0; } static int CheckPyPackageFile(PyObject *dep) { if (!PyTuple_Check(dep) || PyTuple_Size(dep) != 3) { PyErr_SetString(PyExc_TypeError, "Element of list has to be a tuple with 3 items."); return 1; } return 0; } static int CheckPyChangelogEntry(PyObject *dep) { if (!PyTuple_Check(dep) || PyTuple_Size(dep) != 3) { PyErr_SetString(PyExc_TypeError, "Element of list has to be a tuple with 3 items."); return 1; } return 0; } static int set_list(_PackageObject *self, PyObject *list, void *conv) { ListConvertor *convertor = conv; cr_Package *pkg = self->package; GSList *glist = NULL; if (check_PackageStatus(self)) return -1; if (!PyList_Check(list)) { PyErr_SetString(PyExc_TypeError, "List expected!"); return -1; } // Check if chunk exits // If it doesn't - this is package from loaded metadata and all its // strings are in a metadata common chunk (cr_Metadata->chunk). // In this case, we have to create a chunk for this package before // inserting a new string. if (!pkg->chunk) pkg->chunk = g_string_chunk_new(0); Py_ssize_t len = PyList_Size(list); // Check all elements for (Py_ssize_t x = 0; x < len; x++) { PyObject *elem = PyList_GetItem(list, x); if (convertor->t_check && convertor->t_check(elem)) return -1; } for (Py_ssize_t x = 0; x < len; x++) { glist = g_slist_prepend(glist, convertor->t(PyList_GetItem(list, x), pkg->chunk)); } *((GSList **) ((size_t) pkg + (size_t) convertor->offset)) = glist; return 0; } static PyGetSetDef package_getsetters[] = { {"pkgId", (getter)get_str, (setter)set_str, "Checksum of the package file", OFFSET(pkgId)}, {"name", (getter)get_str, (setter)set_str, "Name of the package", OFFSET(name)}, {"arch", (getter)get_str, (setter)set_str, "Architecture for which the package was built", OFFSET(arch)}, {"version", (getter)get_str, (setter)set_str, "Version of the packaged software", OFFSET(version)}, {"epoch", (getter)get_str, (setter)set_str, "Epoch", OFFSET(epoch)}, {"release", (getter)get_str, (setter)set_str, "Release number of the package", OFFSET(release)}, {"summary", (getter)get_str, (setter)set_str, "Short description of the packaged software", OFFSET(summary)}, {"description", (getter)get_str, (setter)set_str, "In-depth description of the packaged software", OFFSET(description)}, {"url", (getter)get_str, (setter)set_str, "URL with more information about packaged software", OFFSET(url)}, {"time_file", (getter)get_num, (setter)set_num, "mtime of the package file", OFFSET(time_file)}, {"time_build", (getter)get_num, (setter)set_num, "Time when package was builded", OFFSET(time_build)}, {"rpm_license", (getter)get_str, (setter)set_str, "License term applicable to the package software (GPLv2, etc.)", OFFSET(rpm_license)}, {"rpm_vendor", (getter)get_str, (setter)set_str, "Name of the organization producing the package", OFFSET(rpm_vendor)}, {"rpm_group", (getter)get_str, (setter)set_str, "RPM group (See: http://fedoraproject.org/wiki/RPMGroups)", OFFSET(rpm_group)}, {"rpm_buildhost", (getter)get_str, (setter)set_str, "Hostname of the system that built the package", OFFSET(rpm_buildhost)}, {"rpm_sourcerpm", (getter)get_str, (setter)set_str, "Name of the source package from which this binary package was built", OFFSET(rpm_sourcerpm)}, {"rpm_header_start", (getter)get_num, (setter)set_num, "First byte of the header", OFFSET(rpm_header_start)}, {"rpm_header_end", (getter)get_num, (setter)set_num, "Last byte of the header", OFFSET(rpm_header_end)}, {"rpm_packager", (getter)get_str, (setter)set_str, "Person or persons responsible for creating the package", OFFSET(rpm_packager)}, {"size_package", (getter)get_num, (setter)set_num, "Size, in bytes, of the package", OFFSET(size_package)}, {"size_installed", (getter)get_num, (setter)set_num, "Total size, in bytes, of every file installed by this package", OFFSET(size_installed)}, {"size_archive", (getter)get_num, (setter)set_num, "Size, in bytes, of the archive portion of the original package file", OFFSET(size_archive)}, {"location_href", (getter)get_str, (setter)set_str, "Relative location of package to the repodata", OFFSET(location_href)}, {"location_base", (getter)get_str, (setter)set_str, "Base location of this package", OFFSET(location_base)}, {"checksum_type", (getter)get_str, (setter)set_str, "Type of checksum", OFFSET(checksum_type)}, {"requires", (getter)get_list, (setter)set_list, "Capabilities the package requires", &(list_convertors[0])}, {"provides", (getter)get_list, (setter)set_list, "Capabilities the package provides", &(list_convertors[1])}, {"conflicts", (getter)get_list, (setter)set_list, "Capabilities the package conflicts with", &(list_convertors[2])}, {"obsoletes", (getter)get_list, (setter)set_list, "Capabilities the package obsoletes", &(list_convertors[3])}, {"suggests", (getter)get_list, (setter)set_list, "Capabilities the package suggests", &(list_convertors[4])}, {"enhances", (getter)get_list, (setter)set_list, "Capabilities the package enhances", &(list_convertors[5])}, {"recommends", (getter)get_list, (setter)set_list, "Capabilities the package recommends", &(list_convertors[6])}, {"supplements", (getter)get_list, (setter)set_list, "Capabilities the package supplements", &(list_convertors[7])}, {"files", (getter)get_list, (setter)set_list, "Files that package contains", &(list_convertors[8])}, {"changelogs", (getter)get_list, (setter)set_list, "Changelogs that package contains", &(list_convertors[9])}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject Package_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.Package", .tp_basicsize = sizeof(_PackageObject), .tp_dealloc = (destructor) package_dealloc, .tp_repr = (reprfunc) package_repr, .tp_str = (reprfunc)package_str, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = package_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = package_methods, .tp_getset = package_getsetters, .tp_init = (initproc) package_init, .tp_new = package_new, }; createrepo_c-0.17.0/src/python/package-py.h000066400000000000000000000023321400672373200205400ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_PACKAGE_PY_H #define CR_PACKAGE_PY_H #include "src/createrepo_c.h" extern PyTypeObject Package_Type; #define PackageObject_Check(o) PyObject_TypeCheck(o, &Package_Type) PyObject *Object_FromPackage(cr_Package *pkg, int free_on_destroy); cr_Package *Package_FromPyObject(PyObject *o); PyObject * Object_FromPackage_WithParent(cr_Package *pkg, int free_on_destroy, PyObject *parent); #endif createrepo_c-0.17.0/src/python/parsepkg-py.c000066400000000000000000000065311400672373200207610ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "src/createrepo_c.h" #include "typeconversion.h" #include "parsepkg-py.h" #include "package-py.h" #include "exception-py.h" PyObject * py_package_from_rpm(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *ret; cr_Package *pkg; int checksum_type, changelog_limit; char *filename, *location_href, *location_base; GError *tmp_err = NULL; cr_HeaderReadingFlags flags = CR_HDRR_NONE; // TODO - support for flags if (!PyArg_ParseTuple(args, "sizzi:py_package_from_rpm", &filename, &checksum_type, &location_href, &location_base, &changelog_limit)) { return NULL; } pkg = cr_package_from_rpm(filename, checksum_type, location_href, location_base, changelog_limit, NULL, flags, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, "Cannot load %s: ", filename); return NULL; } ret = Object_FromPackage(pkg, 1); return ret; } PyObject * py_xml_from_rpm(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *tuple; int checksum_type, changelog_limit; char *filename, *location_href, *location_base; struct cr_XmlStruct xml_res; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sizzi:py_xml_from_rpm", &filename, &checksum_type, &location_href, &location_base, &changelog_limit)) { return NULL; } xml_res = cr_xml_from_rpm(filename, checksum_type, location_href, location_base, changelog_limit, NULL, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, "Cannot load %s: ", filename); return NULL; } if ((tuple = PyTuple_New(3)) == NULL) goto py_xml_from_rpm_end; // Free xml_res and return NULL PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(xml_res.primary)); PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(xml_res.filelists)); PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(xml_res.other)); py_xml_from_rpm_end: free(xml_res.primary); free(xml_res.filelists); free(xml_res.other); return tuple; } createrepo_c-0.17.0/src/python/parsepkg-py.h000066400000000000000000000026141400672373200207640ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_PARSEPKG_PY_H #define CR_PARSEPKG_PY_H #include "src/createrepo_c.h" PyDoc_STRVAR(package_from_rpm__doc__, "package_from_rpm(filename, checksum_type, location_href, " "location_base, changelog_limit) -> Package\n\n" "Package object from the rpm package"); PyObject *py_package_from_rpm(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_from_rpm__doc__, "xml_from_rpm(filename, checksum_type, location_href, " "location_base, changelog_limit) -> (str, str, str)\n\n" "XML for the package rpm package"); PyObject *py_xml_from_rpm(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/python/repomd-py.c000066400000000000000000000332111400672373200204260ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "repomd-py.h" #include "repomdrecord-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_Repomd *repomd; } _RepomdObject; cr_Repomd * Repomd_FromPyObject(PyObject *o) { if (!RepomdObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a createrepo_c.Repomd object."); return NULL; } return ((_RepomdObject *)o)->repomd; } static int check_RepomdStatus(const _RepomdObject *self) { assert(self != NULL); assert(RepomdObject_Check(self)); if (self->repomd == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c Repomd object."); return -1; } return 0; } /* Function on the type */ static PyObject * repomd_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _RepomdObject *self = (_RepomdObject *)type->tp_alloc(type, 0); if (self) { self->repomd = NULL; } return (PyObject *)self; } PyDoc_STRVAR(repomd_init__doc__, "Repomd object"); static int repomd_init(_RepomdObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->repomd) { cr_repomd_free(self->repomd); } /* Init */ self->repomd = cr_repomd_new(); if (self->repomd == NULL) { PyErr_SetString(CrErr_Exception, "Repomd initialization failed"); return -1; } return 0; } static void repomd_dealloc(_RepomdObject *self) { if (self->repomd) cr_repomd_free(self->repomd); Py_TYPE(self)->tp_free(self); } static PyObject * repomd_repr(G_GNUC_UNUSED _RepomdObject *self) { return PyUnicode_FromString(""); } /* Repomd methods */ PyDoc_STRVAR(set_record__doc__, "set_record(repomdrecord) -> None\n\n" "Add RepomdRecord"); static PyObject * set_record(_RepomdObject *self, PyObject *args) { PyObject *record; cr_RepomdRecord *orig, *new; if (!PyArg_ParseTuple(args, "O!:set_record", &RepomdRecord_Type, &record)) return NULL; if (check_RepomdStatus(self)) return NULL; orig = RepomdRecord_FromPyObject(record); new = cr_repomd_record_copy(orig); cr_repomd_set_record(self->repomd, new); Py_RETURN_NONE; } PyDoc_STRVAR(set_revision__doc__, "set_revision(revision) -> None\n\n" "Set revision string"); static PyObject * set_revision(_RepomdObject *self, PyObject *args) { char *revision; if (!PyArg_ParseTuple(args, "s:set_revision", &revision)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_set_revision(self->repomd, revision); Py_RETURN_NONE; } PyDoc_STRVAR(set_repoid__doc__, "set_repoid(repoid, repoid_type) -> None\n\n" "Set repoid value and repoid_type"); static PyObject * set_repoid(_RepomdObject *self, PyObject *args) { char *repoid, *repoid_type; if (!PyArg_ParseTuple(args, "zz:set_repoid", &repoid, &repoid_type)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_set_repoid(self->repomd, repoid, repoid_type); Py_RETURN_NONE; } PyDoc_STRVAR(set_contenthash__doc__, "set_contenthash(contenthash, contenthash_type) -> None\n\n" "Set contenthash value and contenthash_type"); static PyObject * set_contenthash(_RepomdObject *self, PyObject *args) { char *contenthash, *contenthash_type; if (!PyArg_ParseTuple(args, "zz:set_contenthash", &contenthash, &contenthash_type)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_set_contenthash(self->repomd, contenthash, contenthash_type); Py_RETURN_NONE; } PyDoc_STRVAR(add_distro_tag__doc__, "add_distro_tag(tag[, cpeid=None]) -> None\n\n" "Add distro tag"); static PyObject * add_distro_tag(_RepomdObject *self, PyObject *args, PyObject *kwargs) { static char *kwlist[] = { "tag", "cpeid", NULL }; char *tag = NULL, *cpeid = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|z:add_distro_tag", kwlist, &tag, &cpeid)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_add_distro_tag(self->repomd, cpeid, tag); Py_RETURN_NONE; } PyDoc_STRVAR(add_repo_tag__doc__, "add_repo_tag(tag) -> None\n\n" "Add repo tag"); static PyObject * add_repo_tag(_RepomdObject *self, PyObject *args) { char *tag; if (!PyArg_ParseTuple(args, "s:add_repo_tag", &tag)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_add_repo_tag(self->repomd, tag); Py_RETURN_NONE; } PyDoc_STRVAR(add_content_tag__doc__, "add_content_tag(tag) -> None\n\n" "Add content tag"); static PyObject * add_content_tag(_RepomdObject *self, PyObject *args) { char *tag; if (!PyArg_ParseTuple(args, "s:add_content_tag", &tag)) return NULL; if (check_RepomdStatus(self)) return NULL; cr_repomd_add_content_tag(self->repomd, tag); Py_RETURN_NONE; } PyDoc_STRVAR(sort_records__doc__, "sort_records() -> str\n\n" "Sort repomd records to the createrepo_c preferred order"); static PyObject * sort_records(_RepomdObject *self, G_GNUC_UNUSED void *nothing) { cr_repomd_sort_records(self->repomd); Py_RETURN_NONE; } PyDoc_STRVAR(xml_dump__doc__, "xml_dump() -> str\n\n" "Generate xml representation of the repomd"); static PyObject * xml_dump(_RepomdObject *self, G_GNUC_UNUSED void *nothing) { PyObject *py_str; GError *tmp_err = NULL; char *xml = cr_xml_dump_repomd(self->repomd, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } static struct PyMethodDef repomd_methods[] = { {"set_record", (PyCFunction)set_record, METH_VARARGS, set_record__doc__}, {"set_revision", (PyCFunction)set_revision, METH_VARARGS, set_revision__doc__}, {"set_repoid", (PyCFunction)set_repoid, METH_VARARGS, set_repoid__doc__}, {"set_contenthash", (PyCFunction)set_contenthash, METH_VARARGS, set_contenthash__doc__}, {"add_distro_tag", (PyCFunction)add_distro_tag, METH_VARARGS|METH_KEYWORDS, add_distro_tag__doc__}, {"add_repo_tag", (PyCFunction)add_repo_tag, METH_VARARGS, add_repo_tag__doc__}, {"add_content_tag", (PyCFunction)add_content_tag, METH_VARARGS, add_content_tag__doc__}, {"sort_records", (PyCFunction)sort_records, METH_NOARGS, sort_records__doc__}, {"xml_dump", (PyCFunction)xml_dump, METH_NOARGS, xml_dump__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Convertors for getsetters */ /** Convert C object to PyObject. * @param C object * @return PyObject representation */ typedef PyObject *(*ConversionFromFunc)(void *); /** Check an element from a list if has a valid format. * @param a single list element * @return 0 if ok, 1 otherwise */ typedef int (*ConversionToCheckFunc)(PyObject *); /** Convert PyObject to C representation. * @param PyObject * @return C representation */ typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *); static int CheckPyUnicode(PyObject *dep) { if (!PyUnicode_Check(dep)) { PyErr_SetString(PyExc_TypeError, "Element of list has to be a string"); return 1; } return 0; } static int CheckPyDistroTag(PyObject *dep) { if (!PyTuple_Check(dep) || PyTuple_Size(dep) != 2) { PyErr_SetString(PyExc_TypeError, "Element of list has to be a tuple with 2 items."); return 1; } return 0; } PyObject * PyObject_FromRepomdRecord(cr_RepomdRecord *rec) { return Object_FromRepomdRecord(cr_repomd_record_copy(rec)); } typedef struct { size_t offset; /*!< Ofset of the list in cr_Repomd */ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */ ConversionToFunc t; /*!< Conversion func to C object from PyObject */ } ListConvertor; /** List of convertors for converting a lists in cr_Package. */ static ListConvertor list_convertors[] = { { offsetof(cr_Repomd, repo_tags), (ConversionFromFunc) PyUnicodeOrNone_FromString, (ConversionToCheckFunc) CheckPyUnicode, (ConversionToFunc) PyObject_ToChunkedString }, { offsetof(cr_Repomd, distro_tags), (ConversionFromFunc) PyObject_FromDistroTag, (ConversionToCheckFunc) CheckPyDistroTag, (ConversionToFunc) PyObject_ToDistroTag }, { offsetof(cr_Repomd, content_tags), (ConversionFromFunc) PyUnicodeOrNone_FromString, (ConversionToCheckFunc) CheckPyUnicode, (ConversionToFunc) PyObject_ToChunkedString }, { offsetof(cr_Repomd, records), (ConversionFromFunc) PyObject_FromRepomdRecord, (ConversionToCheckFunc) NULL, (ConversionToFunc) NULL }, }; /* Getters */ static PyObject * get_str(_RepomdObject *self, void *member_offset) { if (check_RepomdStatus(self)) return NULL; cr_Repomd *repomd = self->repomd; char *str = *((char **) ((size_t) repomd + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static PyObject * get_list(_RepomdObject *self, void *conv) { ListConvertor *convertor = conv; PyObject *list; cr_Repomd *repomd = self->repomd; GSList *glist = *((GSList **) ((size_t) repomd + (size_t) convertor->offset)); if (check_RepomdStatus(self)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (GSList *elem = glist; elem; elem = g_slist_next(elem)) { PyObject *obj = convertor->f(elem->data); if (!obj) continue; PyList_Append(list, obj); Py_DECREF(obj); } return list; } /* Setters */ static int set_str(_RepomdObject *self, PyObject *value, void *member_offset) { if (check_RepomdStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_Repomd *repomd = self->repomd; char *str = PyObject_ToChunkedString(value, repomd->chunk); *((char **) ((size_t) repomd + (size_t) member_offset)) = str; return 0; } static int set_list(_RepomdObject *self, PyObject *list, void *conv) { ListConvertor *convertor = conv; cr_Repomd *repomd = self->repomd; GSList *glist = NULL; if (check_RepomdStatus(self)) return -1; if (!PyList_Check(list)) { PyErr_SetString(PyExc_TypeError, "List expected!"); return -1; } Py_ssize_t len = PyList_Size(list); // Check all elements for (Py_ssize_t x = 0; x < len; x++) { PyObject *elem = PyList_GetItem(list, x); if (convertor->t_check && convertor->t_check(elem)) return -1; } for (Py_ssize_t x = 0; x < len; x++) { glist = g_slist_prepend(glist, convertor->t(PyList_GetItem(list, x), repomd->chunk)); } *((GSList **) ((size_t) repomd + (size_t) convertor->offset)) = glist; return 0; } /** Return offset of a selected member of cr_Repomd structure. */ #define OFFSET(member) (void *) offsetof(cr_Repomd, member) static PyGetSetDef repomd_getsetters[] = { {"revision", (getter)get_str, (setter)set_str, "Revision value", OFFSET(revision)}, {"repoid", (getter)get_str, (setter)set_str, "Repoid value", OFFSET(repoid)}, {"repoid_type", (getter)get_str, (setter)set_str, "Repoid type value", OFFSET(repoid_type)}, {"contenthash", (getter)get_str, (setter)set_str, "Contenthash value", OFFSET(contenthash)}, {"contenthash_type", (getter)get_str, (setter)set_str, "contenthash type value", OFFSET(contenthash_type)}, {"repo_tags", (getter)get_list, (setter)set_list, "List of repo tags", &(list_convertors[0])}, {"distro_tags", (getter)get_list, (setter)set_list, "List of distro tags", &(list_convertors[1])}, {"content_tags", (getter)get_list, (setter)set_list, "List of content tags", &(list_convertors[2])}, {"records", (getter)get_list, (setter)NULL, "List of RepomdRecords", &(list_convertors[3])}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject Repomd_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.Repomd", .tp_basicsize = sizeof(_RepomdObject), .tp_dealloc = (destructor) repomd_dealloc, .tp_repr = (reprfunc) repomd_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = repomd_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = repomd_methods, .tp_getset = repomd_getsetters, .tp_init = (initproc) repomd_init, .tp_new = repomd_new, }; createrepo_c-0.17.0/src/python/repomd-py.h000066400000000000000000000020551400672373200204350ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_REPOMD_PY_H #define CR_REPOMD_PY_H #include "src/createrepo_c.h" extern PyTypeObject Repomd_Type; #define RepomdObject_Check(o) PyObject_TypeCheck(o, &Repomd_Type) cr_Repomd *Repomd_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/repomdrecord-py.c000066400000000000000000000325351400672373200216350ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "repomdrecord-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_RepomdRecord *record; } _RepomdRecordObject; PyObject * Object_FromRepomdRecord(cr_RepomdRecord *rec) { PyObject *py_rec; if (!rec) { PyErr_SetString(PyExc_ValueError, "Expected a cr_RepomdRecord pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &RepomdRecord_Type, NULL); cr_repomd_record_free(((_RepomdRecordObject *)py_rec)->record); ((_RepomdRecordObject *)py_rec)->record = rec; return py_rec; } cr_RepomdRecord * RepomdRecord_FromPyObject(PyObject *o) { if (!RepomdRecordObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a RepomdRecord object."); return NULL; } return ((_RepomdRecordObject *)o)->record; } static int check_RepomdRecordStatus(const _RepomdRecordObject *self) { assert(self != NULL); assert(RepomdRecordObject_Check(self)); if (self->record == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c RepomdRecord object."); return -1; } return 0; } /* Function on the type */ static PyObject * repomdrecord_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _RepomdRecordObject *self = (_RepomdRecordObject *)type->tp_alloc(type, 0); if (self) { self->record = NULL; } return (PyObject *)self; } PyDoc_STRVAR(repomdrecord_init__doc__, ".. method:: __init__([type[, path]])\n\n" " :arg type: String with type of the file (e.g. primary, primary_db, etc.)\n" " :arg path: Path to the file\n"); static int repomdrecord_init(_RepomdRecordObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { char *type = NULL, *path = NULL; if (!PyArg_ParseTuple(args, "|zz:repomdrecord_init", &type, &path)) return -1; /* Free all previous resources when reinitialization */ if (self->record) cr_repomd_record_free(self->record); /* Init */ self->record = cr_repomd_record_new(type, path); if (self->record == NULL) { PyErr_SetString(CrErr_Exception, "RepomdRecord initialization failed"); return -1; } return 0; } static void repomdrecord_dealloc(_RepomdRecordObject *self) { if (self->record) cr_repomd_record_free(self->record); Py_TYPE(self)->tp_free(self); } static PyObject * repomdrecord_repr(G_GNUC_UNUSED _RepomdRecordObject *self) { if (self->record->type) return PyUnicode_FromFormat("", self->record->type); else return PyUnicode_FromFormat(""); } /* RepomdRecord methods */ PyDoc_STRVAR(copy__doc__, "copy() -> RepomdRecord\n\n" "Return copy of the RepomdRecord object"); static PyObject * copy_repomdrecord(_RepomdRecordObject *self, G_GNUC_UNUSED void *nothing) { if (check_RepomdRecordStatus(self)) return NULL; return Object_FromRepomdRecord(cr_repomd_record_copy(self->record)); } PyDoc_STRVAR(fill__doc__, "fill() -> None\n\n" "Fill unfilled items in the RepomdRecord (sizes and checksums)"); static PyObject * fill(_RepomdRecordObject *self, PyObject *args) { int checksum_type; GError *err = NULL; if (!PyArg_ParseTuple(args, "i:fill", &checksum_type)) return NULL; if (check_RepomdRecordStatus(self)) return NULL; cr_repomd_record_fill(self->record, checksum_type, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(compress_and_fill__doc__, "compress_and_fill(empty_repomdrecord[, checksum_type, compression_type]) " "-> None\n\n" "Almost analogous to fill() but suitable for groupfile. " "Record must be set with the path to existing non compressed groupfile. " "Compressed file will be created and compressed_record updated."); static PyObject * compress_and_fill(_RepomdRecordObject *self, PyObject *args) { int checksum_type, compression_type; PyObject *compressed_repomdrecord; gchar *zck_dict_dir = NULL; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!ii|s:compress_and_fill", &RepomdRecord_Type, &compressed_repomdrecord, &checksum_type, &compression_type, &zck_dict_dir)) return NULL; if (check_RepomdRecordStatus(self)) return NULL; cr_repomd_record_compress_and_fill(self->record, RepomdRecord_FromPyObject(compressed_repomdrecord), checksum_type, compression_type, zck_dict_dir, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(rename_file__doc__, "rename_file() -> None\n\n" "Add (prepend) file checksum to the filename"); static PyObject * rename_file(_RepomdRecordObject *self, G_GNUC_UNUSED void *nothing) { GError *err = NULL; cr_repomd_record_rename_file(self->record, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(set_timestamp__doc__, "set_timestamp(timestamp) -> None\n\n" "Set timestamp to specific value and adjust file modification time." "This is needed to reproduce exact metadata as one produced in the " "past from the same package(s)."); static PyObject * set_timestamp(_RepomdRecordObject *self, PyObject *args) { int timestamp; if (!PyArg_ParseTuple(args, "i:timestamp", ×tamp)) return NULL; if (check_RepomdRecordStatus(self)) return NULL; if (check_RepomdRecordStatus(self)) return NULL; cr_repomd_record_set_timestamp(self->record, timestamp); Py_RETURN_NONE; } PyDoc_STRVAR(load_contentstat__doc__, "load_contentstat(contentstat) -> None\n\n" "Load some content statistics from the ContentStat object. " "The statistics loaded from ContentStat doesn't have to be " "calculated during a fill() method call and thus speed up the method."); static PyObject * load_contentstat(_RepomdRecordObject *self, PyObject *args) { PyObject *contentstat; if (!PyArg_ParseTuple(args, "O!:load_contentstat", &ContentStat_Type, &contentstat)) return NULL; if (check_RepomdRecordStatus(self)) return NULL; cr_repomd_record_load_contentstat(self->record, ContentStat_FromPyObject(contentstat)); Py_RETURN_NONE; } static struct PyMethodDef repomdrecord_methods[] = { {"copy", (PyCFunction)copy_repomdrecord, METH_NOARGS, copy__doc__}, {"fill", (PyCFunction)fill, METH_VARARGS, fill__doc__}, {"compress_and_fill", (PyCFunction)compress_and_fill, METH_VARARGS, compress_and_fill__doc__}, {"rename_file", (PyCFunction)rename_file, METH_NOARGS, rename_file__doc__}, {"set_timestamp", (PyCFunction)set_timestamp, METH_VARARGS, set_timestamp__doc__}, {"load_contentstat", (PyCFunction)load_contentstat, METH_VARARGS, load_contentstat__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_RepomdRecord, member) static PyObject * get_num(_RepomdRecordObject *self, void *member_offset) { if (check_RepomdRecordStatus(self)) return NULL; cr_RepomdRecord *rec = self->record; gint64 val = (gint64) *((gint64 *) ((size_t)rec + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_int(_RepomdRecordObject *self, void *member_offset) { if (check_RepomdRecordStatus(self)) return NULL; cr_RepomdRecord *rec = self->record; gint64 val = (gint64) *((int *) ((size_t)rec + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_str(_RepomdRecordObject *self, void *member_offset) { if (check_RepomdRecordStatus(self)) return NULL; cr_RepomdRecord *rec = self->record; char *str = *((char **) ((size_t) rec + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static int set_num(_RepomdRecordObject *self, PyObject *value, void *member_offset) { gint64 val; if (check_RepomdRecordStatus(self)) return -1; if (PyLong_Check(value)) { val = (gint64) PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (gint64) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_RepomdRecord *rec = self->record; *((gint64 *) ((size_t) rec + (size_t) member_offset)) = val; return 0; } static int set_int(_RepomdRecordObject *self, PyObject *value, void *member_offset) { long val; if (check_RepomdRecordStatus(self)) return -1; if (PyLong_Check(value)) { val = PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (long long) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_RepomdRecord *rec = self->record; *((int *) ((size_t) rec + (size_t) member_offset)) = (int) val; return 0; } static int set_str(_RepomdRecordObject *self, PyObject *value, void *member_offset) { if (check_RepomdRecordStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_RepomdRecord *rec = self->record; char *str = PyObject_ToChunkedString(value, rec->chunk); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } static PyGetSetDef repomdrecord_getsetters[] = { {"type", (getter)get_str, (setter)set_str, "Record type", OFFSET(type)}, {"location_real", (getter)get_str, (setter)set_str, "Currentlocation of the file in the system", OFFSET(location_real)}, {"location_href", (getter)get_str, (setter)set_str, "Relative location of the file in a repository", OFFSET(location_href)}, {"location_base", (getter)get_str, (setter)set_str, "Base location of the file", OFFSET(location_base)}, {"checksum", (getter)get_str, (setter)set_str, "Checksum of the file", OFFSET(checksum)}, {"checksum_type", (getter)get_str, (setter)set_str, "Type of the file checksum", OFFSET(checksum_type)}, {"checksum_open", (getter)get_str, (setter)set_str, "Checksum of the archive content", OFFSET(checksum_open)}, {"checksum_open_type", (getter)get_str, (setter)set_str, "Type of the archive content checksum", OFFSET(checksum_open_type)}, {"checksum_header", (getter)get_str, (setter)set_str, "Checksum of the zchunk header", OFFSET(checksum_header)}, {"checksum_header_type", (getter)get_str, (setter)set_str, "Type of the zchunk header checksum", OFFSET(checksum_header_type)}, {"timestamp", (getter)get_num, (setter)set_num, "Mtime of the file", OFFSET(timestamp)}, {"size", (getter)get_num, (setter)set_num, "Size of the file", OFFSET(size)}, {"size_open", (getter)get_num, (setter)set_num, "Size of the archive content", OFFSET(size_open)}, {"size_header", (getter)get_num, (setter)set_num, "Size of the zchunk header", OFFSET(size_header)}, {"db_ver", (getter)get_int, (setter)set_int, "Database version (used only for sqlite databases like " "primary.sqlite etc.)", OFFSET(db_ver)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject RepomdRecord_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.RepomdRecord", .tp_basicsize = sizeof(_RepomdRecordObject), .tp_dealloc = (destructor) repomdrecord_dealloc, .tp_repr = (reprfunc) repomdrecord_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = repomdrecord_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = repomdrecord_methods, .tp_getset = repomdrecord_getsetters, .tp_init = (initproc) repomdrecord_init, .tp_new = repomdrecord_new, }; createrepo_c-0.17.0/src/python/repomdrecord-py.h000066400000000000000000000022201400672373200216260ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_REPOMDRECORD_PY_H #define CR_REPOMDRECORD_PY_H #include "src/createrepo_c.h" extern PyTypeObject RepomdRecord_Type; #define RepomdRecordObject_Check(o) PyObject_TypeCheck(o, &RepomdRecord_Type) PyObject *Object_FromRepomdRecord(cr_RepomdRecord *rec); cr_RepomdRecord *RepomdRecord_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/sqlite-py.c000066400000000000000000000127311400672373200204450ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "sqlite-py.h" #include "package-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_SqliteDb *db; } _SqliteObject; // Forward declaration static PyObject *close_db(_SqliteObject *self, void *nothing); static int check_SqliteStatus(const _SqliteObject *self) { assert(self != NULL); assert(SqliteObject_Check(self)); if (self->db == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c Sqlite object (Already closed db?)"); return -1; } return 0; } /* Function on the type */ static PyObject * sqlite_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _SqliteObject *self = (_SqliteObject *)type->tp_alloc(type, 0); if (self) self->db = NULL; return (PyObject *)self; } PyDoc_STRVAR(sqlite_init__doc__, "Sqlite object\n\n" ".. method:: __init__(path, db_type)\n\n" " :arg path: Path to the database\n" " :arg db_type: One from DB_PRIMARY, DB_FILELISTS, DB_OTHER constans\n"); static int sqlite_init(_SqliteObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { char *path; int db_type; GError *err = NULL; PyObject *ret; if (!PyArg_ParseTuple(args, "si|:sqlite_init", &path, &db_type)) return -1; /* Check arguments */ if (db_type < CR_DB_PRIMARY || db_type >= CR_DB_SENTINEL) { PyErr_SetString(PyExc_ValueError, "Unknown db type"); return -1; } /* Free all previous resources when reinitialization */ ret = close_db(self, NULL); Py_XDECREF(ret); if (ret == NULL) { // Error encountered! return -1; } /* Init */ self->db = cr_db_open(path, db_type, &err); if (err) { nice_exception(&err, NULL); return -1; } return 0; } static void sqlite_dealloc(_SqliteObject *self) { if (self->db) cr_db_close(self->db, NULL); Py_TYPE(self)->tp_free(self); } static PyObject * sqlite_repr(_SqliteObject *self) { char *type; if (self->db->type == CR_DB_PRIMARY) type = "PrimaryDb"; else if (self->db->type == CR_DB_FILELISTS) type = "FilelistsDb"; else if (self->db->type == CR_DB_OTHER) type = "OtherDb"; else type = "UnknownDb"; return PyUnicode_FromFormat("", type); } /* Sqlite methods */ PyDoc_STRVAR(add_pkg__doc__, "add_pkg(Package) -> None\n\n" "Add Package to the database"); static PyObject * add_pkg(_SqliteObject *self, PyObject *args) { PyObject *py_pkg; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:add_pkg", &Package_Type, &py_pkg)) return NULL; if (check_SqliteStatus(self)) return NULL; cr_db_add_pkg(self->db, Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(dbinfo_update__doc__, "dbinfo_update(checksum) -> None\n\n" "Set checksum of the xml file representing same data"); static PyObject * dbinfo_update(_SqliteObject *self, PyObject *args) { char *checksum; GError *err = NULL; if (!PyArg_ParseTuple(args, "s:dbinfo_update", &checksum)) return NULL; if (check_SqliteStatus(self)) return NULL; cr_db_dbinfo_update(self->db, checksum, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(close__doc__, "close() -> None\n\n" "Close the sqlite database"); static PyObject * close_db(_SqliteObject *self, G_GNUC_UNUSED void *nothing) { GError *err = NULL; if (self->db) { cr_db_close(self->db, &err); self->db = NULL; if (err) { nice_exception(&err, NULL); return NULL; } } Py_RETURN_NONE; } static struct PyMethodDef sqlite_methods[] = { {"add_pkg", (PyCFunction)add_pkg, METH_VARARGS, add_pkg__doc__}, {"dbinfo_update", (PyCFunction)dbinfo_update, METH_VARARGS, dbinfo_update__doc__}, {"close", (PyCFunction)close_db, METH_NOARGS, close__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; PyTypeObject Sqlite_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.Sqlite", .tp_basicsize = sizeof(_SqliteObject), .tp_dealloc = (destructor) sqlite_dealloc, .tp_repr = (reprfunc) sqlite_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = sqlite_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = sqlite_methods, .tp_init = (initproc) sqlite_init, .tp_new = sqlite_new, }; createrepo_c-0.17.0/src/python/sqlite-py.h000066400000000000000000000017771400672373200204620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_SQLITE_PY_H #define CR_SQLITE_PY_H #include "src/createrepo_c.h" extern PyTypeObject Sqlite_Type; #define SqliteObject_Check(o) PyObject_TypeCheck(o, &Sqlite_Type) #endif createrepo_c-0.17.0/src/python/typeconversion.c000066400000000000000000000163471400672373200216140ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012-2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include "typeconversion.h" #include "src/createrepo_c.h" #include "src/repomd_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR void PyErr_ToGError(GError **err) { PyObject *type, *val, *traceback, *pystr; if (!err) return; assert(*err == NULL); PyErr_Fetch(&type, &val, &traceback); pystr = PyObject_Str(val); Py_XDECREF(type); Py_XDECREF(val); Py_XDECREF(traceback); if (!pystr) { PyErr_Clear(); g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER, "Error while error handling"); } else { if (PyUnicode_Check(pystr)) { PyObject *pybytes = PyUnicode_AsUTF8String(pystr); Py_DECREF(pystr); if (!pybytes) { PyErr_Clear(); g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER, "Error while error handling"); return; } pystr = pybytes; } g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER, "%s", PyBytes_AsString(pystr)); } Py_XDECREF(pystr); } PyObject * PyUnicodeOrNone_FromString(const char *str) { if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } PyObject * PyObject_ToPyBytesOrNull(PyObject *pyobj) { if (PyUnicode_Check(pyobj)) { pyobj = PyUnicode_AsUTF8String(pyobj); if (!pyobj) { return NULL; } } else { Py_XINCREF(pyobj); } if (PyBytes_Check(pyobj)) { return pyobj; } return NULL; } char * PyObject_ToChunkedString(PyObject *pyobj, GStringChunk *chunk) { char *ret = NULL; PyObject *pybytes = PyObject_ToPyBytesOrNull(pyobj); if (pybytes) { ret = cr_safe_string_chunk_insert(chunk, PyBytes_AsString(pybytes)); Py_DECREF(pybytes); } return ret; } long long PyObject_ToLongLongOrZero(PyObject *pyobj) { long long num = 0; if (PyLong_Check(pyobj)) { num = (long long) PyLong_AsLongLong(pyobj); } else if (PyFloat_Check(pyobj)) { num = (long long) PyFloat_AS_DOUBLE(pyobj); } return num; } PyObject * PyObject_FromDependency(cr_Dependency *dep) { PyObject *tuple; if ((tuple = PyTuple_New(6)) == NULL) return NULL; PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(dep->name)); PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(dep->flags)); PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(dep->epoch)); PyTuple_SetItem(tuple, 3, PyUnicodeOrNone_FromString(dep->version)); PyTuple_SetItem(tuple, 4, PyUnicodeOrNone_FromString(dep->release)); PyTuple_SetItem(tuple, 5, PyBool_FromLong((long) dep->pre)); return tuple; } cr_Dependency * PyObject_ToDependency(PyObject *tuple, GStringChunk *chunk) { PyObject *pyobj; cr_Dependency *dep = cr_dependency_new(); pyobj = PyTuple_GetItem(tuple, 0); dep->name = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 1); dep->flags = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 2); dep->epoch = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 3); dep->version = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 4); dep->release = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 5); dep->pre = (PyObject_IsTrue(pyobj)) ? TRUE : FALSE; return dep; } PyObject * PyObject_FromPackageFile(cr_PackageFile *file) { PyObject *tuple; if ((tuple = PyTuple_New(3)) == NULL) return NULL; PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(file->type)); PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(file->path)); PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(file->name)); return tuple; } cr_PackageFile * PyObject_ToPackageFile(PyObject *tuple, GStringChunk *chunk) { PyObject *pyobj; cr_PackageFile *file = cr_package_file_new(); pyobj = PyTuple_GetItem(tuple, 0); file->type = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 1); file->path = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 2); file->name = PyObject_ToChunkedString(pyobj, chunk); return file; } PyObject * PyObject_FromChangelogEntry(cr_ChangelogEntry *log) { PyObject *tuple; if ((tuple = PyTuple_New(3)) == NULL) return NULL; PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(log->author)); PyTuple_SetItem(tuple, 1, PyLong_FromLong((long) log->date)); PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(log->changelog)); return tuple; } cr_ChangelogEntry * PyObject_ToChangelogEntry(PyObject *tuple, GStringChunk *chunk) { PyObject *pyobj; cr_ChangelogEntry *log = cr_changelog_entry_new(); pyobj = PyTuple_GetItem(tuple, 0); log->author = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 1); log->date = PyObject_ToLongLongOrZero(pyobj); pyobj = PyTuple_GetItem(tuple, 2); log->changelog = PyObject_ToChunkedString(pyobj, chunk); return log; } PyObject * PyObject_FromDistroTag(cr_DistroTag *tag) { PyObject *tuple; if ((tuple = PyTuple_New(2)) == NULL) return NULL; PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(tag->cpeid)); PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(tag->val)); return tuple; } cr_DistroTag * PyObject_ToDistroTag(PyObject *tuple, GStringChunk *chunk) { PyObject *pyobj; cr_DistroTag *tag = cr_distrotag_new(); pyobj = PyTuple_GetItem(tuple, 0); tag->cpeid = PyObject_ToChunkedString(pyobj, chunk); pyobj = PyTuple_GetItem(tuple, 2); tag->val = PyObject_ToChunkedString(pyobj, chunk); return tag; } GSList * GSList_FromPyList_Str(PyObject *py_list) { GSList *list = NULL; if (!py_list) return NULL; if (!PyList_Check(py_list)) return NULL; Py_ssize_t size = PyList_Size(py_list); for (Py_ssize_t x=0; x < size; x++) { PyObject *py_str = PyList_GetItem(py_list, x); assert(py_str != NULL); if (!PyUnicode_Check(py_str) && !PyBytes_Check(py_str)) // Hmm, element is not a string, just skip it continue; if (PyUnicode_Check(py_str)) py_str = PyUnicode_AsUTF8String(py_str); list = g_slist_prepend(list, PyBytes_AsString(py_str)); } return list; } createrepo_c-0.17.0/src/python/typeconversion.h000066400000000000000000000034711400672373200216130ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012-2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_TYPECONVERSION_PY_H #define CR_TYPECONVERSION_PY_H #include #include "src/createrepo_c.h" // Clears the current Python Exception and return its representation in GError void PyErr_ToGError(GError **err); PyObject *PyUnicodeOrNone_FromString(const char *str); PyObject *PyObject_ToPyBytesOrNull(PyObject *pyobj); char *PyObject_ToChunkedString(PyObject *pyobj, GStringChunk *chunk); PyObject *PyObject_FromDependency(cr_Dependency *dep); cr_Dependency *PyObject_ToDependency(PyObject *tuple, GStringChunk *chunk); PyObject *PyObject_FromPackageFile(cr_PackageFile *file); cr_PackageFile *PyObject_ToPackageFile(PyObject *tuple, GStringChunk *chunk); PyObject *PyObject_FromChangelogEntry(cr_ChangelogEntry *log); cr_ChangelogEntry *PyObject_ToChangelogEntry(PyObject *tuple, GStringChunk *chunk); PyObject *PyObject_FromDistroTag(cr_DistroTag *tag); cr_DistroTag *PyObject_ToDistroTag(PyObject *tuple, GStringChunk *chunk); GSList *GSList_FromPyList_Str(PyObject *py_list); #endif createrepo_c-0.17.0/src/python/updatecollection-py.c000066400000000000000000000241041400672373200224770ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "updatecollection-py.h" #include "updatecollectionmodule-py.h" #include "updatecollectionpackage-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_UpdateCollection *collection; } _UpdateCollectionObject; PyObject * Object_FromUpdateCollection(cr_UpdateCollection *rec) { PyObject *py_rec; if (!rec) { PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateCollection pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &UpdateCollection_Type, NULL); cr_updatecollection_free(((_UpdateCollectionObject *)py_rec)->collection); ((_UpdateCollectionObject *)py_rec)->collection = rec; return py_rec; } cr_UpdateCollection * UpdateCollection_FromPyObject(PyObject *o) { if (!UpdateCollectionObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a UpdateCollection object."); return NULL; } return ((_UpdateCollectionObject *)o)->collection; } static int check_UpdateCollectionStatus(const _UpdateCollectionObject *self) { assert(self != NULL); assert(UpdateCollectionObject_Check(self)); if (self->collection == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateCollection object."); return -1; } return 0; } /* Function on the type */ static PyObject * updatecollection_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateCollectionObject *self = (_UpdateCollectionObject *)type->tp_alloc(type, 0); if (self) { self->collection = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updatecollection_init__doc__, ".. method:: __init__()\n\n"); static int updatecollection_init(_UpdateCollectionObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->collection) cr_updatecollection_free(self->collection); /* Init */ self->collection = cr_updatecollection_new(); if (self->collection == NULL) { PyErr_SetString(CrErr_Exception, "UpdateCollection initialization failed"); return -1; } return 0; } static void updatecollection_dealloc(_UpdateCollectionObject *self) { if (self->collection) cr_updatecollection_free(self->collection); Py_TYPE(self)->tp_free(self); } static PyObject * updatecollection_repr(G_GNUC_UNUSED _UpdateCollectionObject *self) { return PyUnicode_FromFormat(""); } /* UpdateCollection methods */ PyDoc_STRVAR(append__doc__, "append(updatecollectionpackage) -> None\n\n" "Add UpdateCollectionPackage"); static PyObject * append(_UpdateCollectionObject *self, PyObject *args) { PyObject *pkg; cr_UpdateCollectionPackage *orig, *new; if (!PyArg_ParseTuple(args, "O!:append", &UpdateCollectionPackage_Type, &pkg)) return NULL; if (check_UpdateCollectionStatus(self)) return NULL; orig = UpdateCollectionPackage_FromPyObject(pkg); new = cr_updatecollectionpackage_copy(orig); cr_updatecollection_append_package(self->collection, new); Py_RETURN_NONE; } PyDoc_STRVAR(copy__doc__, "copy() -> UpdateCollection\n\n" "Return copy of the UpdateCollection object"); static PyObject * copy_updatecollection(_UpdateCollectionObject *self, G_GNUC_UNUSED void *nothing) { if (check_UpdateCollectionStatus(self)) return NULL; return Object_FromUpdateCollection(cr_updatecollection_copy(self->collection)); } static struct PyMethodDef updatecollection_methods[] = { {"append", (PyCFunction)append, METH_VARARGS, append__doc__}, {"copy", (PyCFunction)copy_updatecollection, METH_NOARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Convertors for getsetters */ /** Convert C object to PyObject. * @param C object * @return PyObject representation */ typedef PyObject *(*ConversionFromFunc)(void *); /** Check an element from a list if has a valid format. * @param a single list element * @return 0 if ok, 1 otherwise */ typedef int (*ConversionToCheckFunc)(PyObject *); /** Convert PyObject to C representation. * @param PyObject * @return C representation */ typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *); PyObject * PyObject_FromUpdateCollectionModule(cr_UpdateCollectionModule *module) { return Object_FromUpdateCollectionModule( cr_updatecollectionmodule_copy(module)); } PyObject * PyObject_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *pkg) { return Object_FromUpdateCollectionPackage( cr_updatecollectionpackage_copy(pkg)); } typedef struct { size_t offset; /*!< Ofset of the list in cr_UpdateInfo */ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */ ConversionToFunc t; /*!< Conversion func to C object from PyObject */ } ListConvertor; /** List of convertors for converting a lists in cr_Package. */ static ListConvertor list_convertors[] = { { offsetof(cr_UpdateCollection, packages), (ConversionFromFunc) PyObject_FromUpdateCollectionPackage, (ConversionToCheckFunc) NULL, (ConversionToFunc) NULL }, }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_UpdateCollection, member) static PyObject * get_str(_UpdateCollectionObject *self, void *member_offset) { if (check_UpdateCollectionStatus(self)) return NULL; cr_UpdateCollection *rec = self->collection; char *str = *((char **) ((size_t) rec + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static PyObject * get_list(_UpdateCollectionObject *self, void *conv) { ListConvertor *convertor = conv; PyObject *list; cr_UpdateCollection *collection = self->collection; GSList *glist = *((GSList **) ((size_t) collection + (size_t) convertor->offset)); if (check_UpdateCollectionStatus(self)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (GSList *elem = glist; elem; elem = g_slist_next(elem)) { PyObject *obj = convertor->f(elem->data); if (!obj) continue; PyList_Append(list, obj); Py_DECREF(obj); } return list; } static PyObject * get_module(_UpdateCollectionObject *self, void *member_offset) { if (check_UpdateCollectionStatus(self)) return NULL; cr_UpdateCollection *collection = self->collection; cr_UpdateCollectionModule *module = *((cr_UpdateCollectionModule **) ((size_t) collection + (size_t) member_offset)); if (module == NULL) Py_RETURN_NONE; PyObject *py_module = PyObject_FromUpdateCollectionModule(module); return py_module; } static int set_str(_UpdateCollectionObject *self, PyObject *value, void *member_offset) { if (check_UpdateCollectionStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_UpdateCollection *rec = self->collection; char *str = PyObject_ToChunkedString(value, rec->chunk); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } static int set_module(_UpdateCollectionObject *self, PyObject *value, void *member_offset) { cr_UpdateCollectionModule *orig, *new; if (check_UpdateCollectionStatus(self)) return -1; if (!UpdateCollectionModuleObject_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Module or None expected!"); return -1; } orig = UpdateCollectionModule_FromPyObject(value); new = cr_updatecollectionmodule_copy(orig); cr_UpdateCollection *collection = self->collection; *((cr_UpdateCollectionModule **) ((size_t) collection + (size_t) member_offset)) = new; return 0; } static PyGetSetDef updatecollection_getsetters[] = { {"shortname", (getter)get_str, (setter)set_str, "Short name", OFFSET(shortname)}, {"name", (getter)get_str, (setter)set_str, "Name of the collection", OFFSET(name)}, {"module", (getter)get_module, (setter)set_module, "Module information", OFFSET(module)}, {"packages", (getter)get_list, (setter)NULL, "List of packages", &(list_convertors[0])}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateCollection_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateCollection", .tp_basicsize = sizeof(_UpdateCollectionObject), .tp_dealloc = (destructor) updatecollection_dealloc, .tp_repr = (reprfunc) updatecollection_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updatecollection_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updatecollection_methods, .tp_getset = updatecollection_getsetters, .tp_init = (initproc) updatecollection_init, .tp_new = updatecollection_new, }; createrepo_c-0.17.0/src/python/updatecollection-py.h000066400000000000000000000023001400672373200224760ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATECOLLECTION_PY_H #define CR_UPDATECOLLECTION_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateCollection_Type; #define UpdateCollectionObject_Check(o) \ PyObject_TypeCheck(o, &UpdateCollection_Type) PyObject *Object_FromUpdateCollection(cr_UpdateCollection *rec); cr_UpdateCollection *UpdateCollection_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/updatecollectionmodule-py.c000066400000000000000000000166531400672373200237170ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "updatecollectionmodule-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_UpdateCollectionModule *module; } _UpdateCollectionModuleObject; PyObject * Object_FromUpdateCollectionModule(cr_UpdateCollectionModule *mod) { PyObject *py_rec; if (!mod) { PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateCollectionModule pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &UpdateCollectionModule_Type, NULL); cr_updatecollectionmodule_free(((_UpdateCollectionModuleObject *)py_rec)->module); ((_UpdateCollectionModuleObject *)py_rec)->module = mod; return py_rec; } cr_UpdateCollectionModule * UpdateCollectionModule_FromPyObject(PyObject *o) { if (!UpdateCollectionModuleObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a UpdateCollectionModule object."); return NULL; } return ((_UpdateCollectionModuleObject *)o)->module; } static int check_UpdateCollectionModuleStatus(const _UpdateCollectionModuleObject *self) { assert(self != NULL); assert(UpdateCollectionModuleObject_Check(self)); if (self->module == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateCollectionModule object."); return -1; } return 0; } /* Function on the type */ static PyObject * updatecollectionmodule_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateCollectionModuleObject *self = (_UpdateCollectionModuleObject *)type->tp_alloc(type, 0); if (self) { self->module = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updatecollectionmodule_init__doc__, ".. method:: __init__()\n\n"); static int updatecollectionmodule_init(_UpdateCollectionModuleObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->module) cr_updatecollectionmodule_free(self->module); /* Init */ self->module = cr_updatecollectionmodule_new(); if (self->module == NULL) { PyErr_SetString(CrErr_Exception, "UpdateCollectionModule initialization failed"); return -1; } return 0; } static void updatecollectionmodule_dealloc(_UpdateCollectionModuleObject *self) { if (self->module) cr_updatecollectionmodule_free(self->module); Py_TYPE(self)->tp_free(self); } static PyObject * updatecollectionmodule_repr(G_GNUC_UNUSED _UpdateCollectionModuleObject *self) { return PyUnicode_FromFormat(""); } /* UpdateCollectionModule methods */ PyDoc_STRVAR(copy__doc__, "copy() -> UpdateCollectionModule\n\n" "Return copy of the UpdateCollectionModule object"); static PyObject * copy_updatecollectionmodule(_UpdateCollectionModuleObject *self, G_GNUC_UNUSED void *nothing) { if (check_UpdateCollectionModuleStatus(self)) return NULL; return Object_FromUpdateCollectionModule(cr_updatecollectionmodule_copy(self->module)); } static struct PyMethodDef updatecollectionmodule_methods[] = { {"copy", (PyCFunction)copy_updatecollectionmodule, METH_NOARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_UpdateCollectionModule, member) static PyObject * get_str(_UpdateCollectionModuleObject *self, void *member_offset) { if (check_UpdateCollectionModuleStatus(self)) return NULL; cr_UpdateCollectionModule *module = self->module; char *str = *((char **) ((size_t) module + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static PyObject * get_uint(_UpdateCollectionModuleObject *self, void *member_offset) { if (check_UpdateCollectionModuleStatus(self)) return NULL; cr_UpdateCollectionModule *module = self->module; guint64 val = *((guint64 *) ((size_t) module + (size_t) member_offset)); return PyLong_FromUnsignedLongLong((guint64) val); } static int set_str(_UpdateCollectionModuleObject *self, PyObject *value, void *member_offset) { if (check_UpdateCollectionModuleStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_UpdateCollectionModule *module = self->module; char *str = PyObject_ToChunkedString(value, module->chunk); *((char **) ((size_t) module + (size_t) member_offset)) = str; return 0; } static int set_uint(_UpdateCollectionModuleObject *self, PyObject *value, void *member_offset) { if (check_UpdateCollectionModuleStatus(self)) return -1; guint64 val; if (PyLong_Check(value)) { val = PyLong_AsUnsignedLongLong(value); } else if (PyFloat_Check(value)) { val = (guint64) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_UpdateCollectionModule *module = self->module; *((guint64 *) ((size_t) module + (size_t) member_offset)) = (guint64) val; return 0; } static PyGetSetDef updatecollectionmodule_getsetters[] = { {"name", (getter)get_str, (setter)set_str, "Name", OFFSET(name)}, {"stream", (getter)get_str, (setter)set_str, "Stream", OFFSET(stream)}, {"version", (getter)get_uint, (setter)set_uint, "Version", OFFSET(version)}, {"context", (getter)get_str, (setter)set_str, "Context", OFFSET(context)}, {"arch", (getter)get_str, (setter)set_str, "Arch", OFFSET(arch)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateCollectionModule_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateCollectionModule", .tp_basicsize = sizeof(_UpdateCollectionModuleObject), .tp_dealloc = (destructor) updatecollectionmodule_dealloc, .tp_repr = (reprfunc) updatecollectionmodule_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updatecollectionmodule_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updatecollectionmodule_methods, .tp_getset = updatecollectionmodule_getsetters, .tp_init = (initproc) updatecollectionmodule_init, .tp_new = updatecollectionmodule_new, }; createrepo_c-0.17.0/src/python/updatecollectionmodule-py.h000066400000000000000000000023661400672373200237200ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATECOLLECTIONMODULE_PY_H #define CR_UPDATECOLLECTIONMODULE_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateCollectionModule_Type; #define UpdateCollectionModuleObject_Check(o) \ PyObject_TypeCheck(o, &UpdateCollectionModule_Type) PyObject *Object_FromUpdateCollectionModule(cr_UpdateCollectionModule *rec); cr_UpdateCollectionModule *UpdateCollectionModule_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/updatecollectionpackage-py.c000066400000000000000000000202551400672373200240160ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "updatecollectionpackage-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_UpdateCollectionPackage *pkg; } _UpdateCollectionPackageObject; PyObject * Object_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *pkg) { PyObject *py_rec; if (!pkg) { PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateCollectionPackage pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &UpdateCollectionPackage_Type, NULL); cr_updatecollectionpackage_free(((_UpdateCollectionPackageObject *)py_rec)->pkg); ((_UpdateCollectionPackageObject *)py_rec)->pkg = pkg; return py_rec; } cr_UpdateCollectionPackage * UpdateCollectionPackage_FromPyObject(PyObject *o) { if (!UpdateCollectionPackageObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a UpdateCollectionPackage object."); return NULL; } return ((_UpdateCollectionPackageObject *)o)->pkg; } static int check_UpdateCollectionPackageStatus(const _UpdateCollectionPackageObject *self) { assert(self != NULL); assert(UpdateCollectionPackageObject_Check(self)); if (self->pkg == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateCollectionPackage object."); return -1; } return 0; } /* Function on the type */ static PyObject * updatecollectionpackage_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateCollectionPackageObject *self = (_UpdateCollectionPackageObject *)type->tp_alloc(type, 0); if (self) { self->pkg = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updatecollectionpackage_init__doc__, ".. method:: __init__()\n\n"); static int updatecollectionpackage_init(_UpdateCollectionPackageObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->pkg) cr_updatecollectionpackage_free(self->pkg); /* Init */ self->pkg = cr_updatecollectionpackage_new(); if (self->pkg == NULL) { PyErr_SetString(CrErr_Exception, "UpdateCollectionPackage initialization failed"); return -1; } return 0; } static void updatecollectionpackage_dealloc(_UpdateCollectionPackageObject *self) { if (self->pkg) cr_updatecollectionpackage_free(self->pkg); Py_TYPE(self)->tp_free(self); } static PyObject * updatecollectionpackage_repr(G_GNUC_UNUSED _UpdateCollectionPackageObject *self) { return PyUnicode_FromFormat(""); } /* UpdateCollectionPackage methods */ PyDoc_STRVAR(copy__doc__, "copy() -> UpdateCollectionPackage\n\n" "Return copy of the UpdateCollectionPackage object"); static PyObject * copy_updatecollectionpackage(_UpdateCollectionPackageObject *self, G_GNUC_UNUSED void *nothing) { if (check_UpdateCollectionPackageStatus(self)) return NULL; return Object_FromUpdateCollectionPackage(cr_updatecollectionpackage_copy(self->pkg)); } static struct PyMethodDef updatecollectionpackage_methods[] = { {"copy", (PyCFunction)copy_updatecollectionpackage, METH_NOARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_UpdateCollectionPackage, member) static PyObject * get_int(_UpdateCollectionPackageObject *self, void *member_offset) { if (check_UpdateCollectionPackageStatus(self)) return NULL; cr_UpdateCollectionPackage *pkg = self->pkg; gint64 val = *((int *) ((size_t)pkg + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_str(_UpdateCollectionPackageObject *self, void *member_offset) { if (check_UpdateCollectionPackageStatus(self)) return NULL; cr_UpdateCollectionPackage *pkg = self->pkg; char *str = *((char **) ((size_t) pkg + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static int set_int(_UpdateCollectionPackageObject *self, PyObject *value, void *member_offset) { long val; if (check_UpdateCollectionPackageStatus(self)) return -1; if (PyLong_Check(value)) { val = PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (long long) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_UpdateCollectionPackage *pkg = self->pkg; *((int *) ((size_t) pkg + (size_t) member_offset)) = (int) val; return 0; } static int set_str(_UpdateCollectionPackageObject *self, PyObject *value, void *member_offset) { if (check_UpdateCollectionPackageStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_UpdateCollectionPackage *pkg = self->pkg; char *str = PyObject_ToChunkedString(value, pkg->chunk); *((char **) ((size_t) pkg + (size_t) member_offset)) = str; return 0; } static PyGetSetDef updatecollectionpackage_getsetters[] = { {"name", (getter)get_str, (setter)set_str, "Name", OFFSET(name)}, {"version", (getter)get_str, (setter)set_str, "Version", OFFSET(version)}, {"release", (getter)get_str, (setter)set_str, "Release", OFFSET(release)}, {"epoch", (getter)get_str, (setter)set_str, "Epoch", OFFSET(epoch)}, {"arch", (getter)get_str, (setter)set_str, "Architecture", OFFSET(arch)}, {"src", (getter)get_str, (setter)set_str, "Source filename", OFFSET(src)}, {"filename", (getter)get_str, (setter)set_str, "Filename", OFFSET(filename)}, {"sum", (getter)get_str, (setter)set_str, "Checksum", OFFSET(sum)}, {"sum_type", (getter)get_int, (setter)set_int, "Type of checksum", OFFSET(sum_type)}, {"reboot_suggested", (getter)get_int, (setter)set_int, "Suggested reboot", OFFSET(reboot_suggested)}, {"restart_suggested", (getter)get_int, (setter)set_int, "Suggested restart",OFFSET(restart_suggested)}, {"relogin_suggested", (getter)get_int, (setter)set_int, "Suggested relogin",OFFSET(relogin_suggested)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateCollectionPackage_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateCollectionPackage", .tp_basicsize = sizeof(_UpdateCollectionPackageObject), .tp_dealloc = (destructor) updatecollectionpackage_dealloc, .tp_repr = (reprfunc) updatecollectionpackage_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updatecollectionpackage_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updatecollectionpackage_methods, .tp_getset = updatecollectionpackage_getsetters, .tp_init = (initproc) updatecollectionpackage_init, .tp_new = updatecollectionpackage_new, }; createrepo_c-0.17.0/src/python/updatecollectionpackage-py.h000066400000000000000000000023771400672373200240300ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATECOLLECTIONPACKAGE_PY_H #define CR_UPDATECOLLECTIONPACKAGE_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateCollectionPackage_Type; #define UpdateCollectionPackageObject_Check(o) \ PyObject_TypeCheck(o, &UpdateCollectionPackage_Type) PyObject *Object_FromUpdateCollectionPackage(cr_UpdateCollectionPackage *rec); cr_UpdateCollectionPackage *UpdateCollectionPackage_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/updateinfo-py.c000066400000000000000000000172731400672373200213100ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "updateinfo-py.h" #include "updaterecord-py.h" #include "exception-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_UpdateInfo *updateinfo; } _UpdateInfoObject; cr_UpdateInfo * UpdateInfo_FromPyObject(PyObject *o) { if (!UpdateInfoObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a createrepo_c.UpdateInfo object."); return NULL; } return ((_UpdateInfoObject *)o)->updateinfo; } static int check_UpdateInfoStatus(const _UpdateInfoObject *self) { assert(self != NULL); assert(UpdateInfoObject_Check(self)); if (self->updateinfo == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateInfo object."); return -1; } return 0; } /* Function on the type */ static PyObject * updateinfo_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateInfoObject *self = (_UpdateInfoObject *)type->tp_alloc(type, 0); if (self) { self->updateinfo = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updateinfo_init__doc__, "UpdateInfo object"); static int updateinfo_init(_UpdateInfoObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->updateinfo) { cr_updateinfo_free(self->updateinfo); } /* Init */ self->updateinfo = cr_updateinfo_new(); if (self->updateinfo == NULL) { PyErr_SetString(CrErr_Exception, "UpdateInfo initialization failed"); return -1; } return 0; } static void updateinfo_dealloc(_UpdateInfoObject *self) { if (self->updateinfo) cr_updateinfo_free(self->updateinfo); Py_TYPE(self)->tp_free(self); } static PyObject * updateinfo_repr(G_GNUC_UNUSED _UpdateInfoObject *self) { return PyUnicode_FromFormat(""); } /* UpdateInfo methods */ PyDoc_STRVAR(append__doc__, "append(updaterecord) -> None\n\n" "Append UpdateRecord"); static PyObject * append(_UpdateInfoObject *self, PyObject *args) { PyObject *record; cr_UpdateRecord *orig, *new; if (!PyArg_ParseTuple(args, "O!:append", &UpdateRecord_Type, &record)) return NULL; if (check_UpdateInfoStatus(self)) return NULL; orig = UpdateRecord_FromPyObject(record); new = cr_updaterecord_copy(orig); cr_updateinfo_apped_record(self->updateinfo, new); Py_RETURN_NONE; } PyDoc_STRVAR(xml_dump__doc__, "xml_dump() -> str\n\n" "Generate xml representation of the updateinfo"); static PyObject * xml_dump(_UpdateInfoObject *self, G_GNUC_UNUSED void *nothing) { PyObject *py_str; GError *tmp_err = NULL; char *xml = cr_xml_dump_updateinfo(self->updateinfo, &tmp_err); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } static struct PyMethodDef updateinfo_methods[] = { {"append", (PyCFunction)append, METH_VARARGS, append__doc__}, {"xml_dump", (PyCFunction)xml_dump, METH_NOARGS, xml_dump__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Convertors for getsetters */ /** Convert C object to PyObject. * @param C object * @return PyObject representation */ typedef PyObject *(*ConversionFromFunc)(void *); /** Check an element from a list if has a valid format. * @param a single list element * @return 0 if ok, 1 otherwise */ typedef int (*ConversionToCheckFunc)(PyObject *); /** Convert PyObject to C representation. * @param PyObject * @return C representation */ typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *); PyObject * PyObject_FromUpdateRecord(cr_UpdateRecord *rec) { return Object_FromUpdateRecord(cr_updaterecord_copy(rec)); } typedef struct { size_t offset; /*!< Ofset of the list in cr_UpdateInfo */ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */ ConversionToFunc t; /*!< Conversion func to C object from PyObject */ } ListConvertor; /** List of convertors for converting a lists in cr_Package. */ static ListConvertor list_convertors[] = { { offsetof(cr_UpdateInfo, updates), (ConversionFromFunc) PyObject_FromUpdateRecord, (ConversionToCheckFunc) NULL, (ConversionToFunc) NULL }, }; /* Getters */ static PyObject * get_list(_UpdateInfoObject *self, void *conv) { ListConvertor *convertor = conv; PyObject *list; cr_UpdateInfo *updateinfo = self->updateinfo; GSList *glist = *((GSList **) ((size_t) updateinfo + (size_t) convertor->offset)); if (check_UpdateInfoStatus(self)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (GSList *elem = glist; elem; elem = g_slist_next(elem)) { PyObject *obj = convertor->f(elem->data); if (!obj) continue; PyList_Append(list, obj); Py_DECREF(obj); } return list; } /* Setters */ /* static int set_list(_UpdateInfoObject *self, PyObject *list, void *conv) { ListConvertor *convertor = conv; cr_UpdateInfo *updateinfo = self->updateinfo; GSList *glist = NULL; if (check_UpdateInfoStatus(self)) return -1; if (!PyList_Check(list)) { PyErr_SetString(PyExc_TypeError, "List expected!"); return -1; } Py_ssize_t len = PyList_Size(list); // Check all elements for (Py_ssize_t x = 0; x < len; x++) { PyObject *elem = PyList_GetItem(list, x); if (convertor->t_check && convertor->t_check(elem)) return -1; } for (Py_ssize_t x = 0; x < len; x++) { glist = g_slist_prepend(glist, convertor->t(PyList_GetItem(list, x), updateinfo->chunk)); } *((GSList **) ((size_t) updateinfo + (size_t) convertor->offset)) = glist; return 0; } */ /** Return offset of a selected member of cr_UpdateInfo structure. */ #define OFFSET(member) (void *) offsetof(cr_UpdateInfo, member) static PyGetSetDef updateinfo_getsetters[] = { {"updates", (getter)get_list, (setter)NULL, "List of UpdateRecords", &(list_convertors[0])}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateInfo_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateInfo", .tp_basicsize = sizeof(_UpdateInfoObject), .tp_dealloc = (destructor) updateinfo_dealloc, .tp_repr = (reprfunc) updateinfo_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updateinfo_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updateinfo_methods, .tp_getset = updateinfo_getsetters, .tp_init = (initproc) updateinfo_init, .tp_new = updateinfo_new, }; createrepo_c-0.17.0/src/python/updateinfo-py.h000066400000000000000000000021111400672373200212760ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATEINFO_PY_H #define CR_UPDATEINFO_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateInfo_Type; #define UpdateInfoObject_Check(o) PyObject_TypeCheck(o, &UpdateInfo_Type) cr_UpdateInfo *UpdateInfo_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/updaterecord-py.c000066400000000000000000000356611400672373200216340ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include // from python #include #include #include #include "updaterecord-py.h" #include "updatereference-py.h" #include "updatecollection-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_UpdateRecord *record; } _UpdateRecordObject; PyObject * Object_FromUpdateRecord(cr_UpdateRecord *rec) { PyObject *py_rec; if (!rec) { PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateRecord pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &UpdateRecord_Type, NULL); cr_updaterecord_free(((_UpdateRecordObject *)py_rec)->record); ((_UpdateRecordObject *)py_rec)->record = rec; return py_rec; } cr_UpdateRecord * UpdateRecord_FromPyObject(PyObject *o) { if (!UpdateRecordObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a UpdateRecord object."); return NULL; } return ((_UpdateRecordObject *)o)->record; } static int check_UpdateRecordStatus(const _UpdateRecordObject *self) { assert(self != NULL); assert(UpdateRecordObject_Check(self)); if (self->record == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateRecord object."); return -1; } return 0; } /* Function on the type */ static PyObject * updaterecord_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateRecordObject *self = (_UpdateRecordObject *)type->tp_alloc(type, 0); if (self) { self->record = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updaterecord_init__doc__, ".. method:: __init__()\n\n"); static int updaterecord_init(_UpdateRecordObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->record) cr_updaterecord_free(self->record); /* Init */ self->record = cr_updaterecord_new(); if (self->record == NULL) { PyErr_SetString(CrErr_Exception, "UpdateRecord initialization failed"); return -1; } return 0; } static void updaterecord_dealloc(_UpdateRecordObject *self) { if (self->record) cr_updaterecord_free(self->record); Py_TYPE(self)->tp_free(self); } static PyObject * updaterecord_repr(G_GNUC_UNUSED _UpdateRecordObject *self) { return PyUnicode_FromFormat(""); } /* UpdateRecord methods */ PyDoc_STRVAR(append_reference__doc__, "append_reference(reference) -> None\n\n" "Append UpdateReference"); static PyObject * append_reference(_UpdateRecordObject *self, PyObject *args) { PyObject *pkg; cr_UpdateReference *orig, *new; if (!PyArg_ParseTuple(args, "O!:append_reference", &UpdateReference_Type, &pkg)) return NULL; if (check_UpdateRecordStatus(self)) return NULL; orig = UpdateReference_FromPyObject(pkg); new = cr_updatereference_copy(orig); cr_updaterecord_append_reference(self->record, new); Py_RETURN_NONE; } PyDoc_STRVAR(append_collection__doc__, "append_collection(collection) -> None\n\n" "Append UpdateCollection"); static PyObject * append_collection(_UpdateRecordObject *self, PyObject *args) { PyObject *pkg; cr_UpdateCollection *orig, *new; if (!PyArg_ParseTuple(args, "O!:append_collection", &UpdateCollection_Type, &pkg)) return NULL; if (check_UpdateRecordStatus(self)) return NULL; orig = UpdateCollection_FromPyObject(pkg); new = cr_updatecollection_copy(orig); cr_updaterecord_append_collection(self->record, new); Py_RETURN_NONE; } PyDoc_STRVAR(copy__doc__, "copy() -> UpdateRecord\n\n" "Return copy of the UpdateRecord object"); static PyObject * copy_updaterecord(_UpdateRecordObject *self, G_GNUC_UNUSED void *nothing) { if (check_UpdateRecordStatus(self)) return NULL; return Object_FromUpdateRecord(cr_updaterecord_copy(self->record)); } static struct PyMethodDef updaterecord_methods[] = { {"append_reference", (PyCFunction)append_reference, METH_VARARGS, append_reference__doc__}, {"append_collection", (PyCFunction)append_collection, METH_VARARGS, append_collection__doc__}, {"copy", (PyCFunction)copy_updaterecord, METH_NOARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* Convertors for getsetters */ /** Convert C object to PyObject. * @param C object * @return PyObject representation */ typedef PyObject *(*ConversionFromFunc)(void *); /** Check an element from a list if has a valid format. * @param a single list element * @return 0 if ok, 1 otherwise */ typedef int (*ConversionToCheckFunc)(PyObject *); /** Convert PyObject to C representation. * @param PyObject * @return C representation */ typedef void *(*ConversionToFunc)(PyObject *, GStringChunk *); PyObject * PyObject_FromUpdateReference(cr_UpdateReference *ref) { return Object_FromUpdateReference(cr_updatereference_copy(ref)); } PyObject * PyObject_FromUpdateCollection(cr_UpdateCollection *col) { return Object_FromUpdateCollection(cr_updatecollection_copy(col)); } typedef struct { size_t offset; /*!< Ofset of the list in cr_UpdateInfo */ ConversionFromFunc f; /*!< Conversion func to PyObject from a C object */ ConversionToCheckFunc t_check; /*!< Check func for a single element of list */ ConversionToFunc t; /*!< Conversion func to C object from PyObject */ } ListConvertor; /** List of convertors for converting a lists in cr_Package. */ static ListConvertor list_convertors[] = { { offsetof(cr_UpdateRecord, references), (ConversionFromFunc) PyObject_FromUpdateReference, (ConversionToCheckFunc) NULL, (ConversionToFunc) NULL }, { offsetof(cr_UpdateRecord, collections), (ConversionFromFunc) PyObject_FromUpdateCollection, (ConversionToCheckFunc) NULL, (ConversionToFunc) NULL }, }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_UpdateRecord, member) static PyObject * get_int(_UpdateRecordObject *self, void *member_offset) { if (check_UpdateRecordStatus(self)) return NULL; cr_UpdateRecord *rec = self->record; gint64 val = *((int *) ((size_t) rec + (size_t) member_offset)); return PyLong_FromLongLong((long long) val); } static PyObject * get_str(_UpdateRecordObject *self, void *member_offset) { if (check_UpdateRecordStatus(self)) return NULL; cr_UpdateRecord *rec = self->record; char *str = *((char **) ((size_t) rec + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static PyObject * get_datetime(_UpdateRecordObject *self, void *member_offset) { PyDateTime_IMPORT; if (check_UpdateRecordStatus(self)) return NULL; cr_UpdateRecord *rec = self->record; char *str = *((char **) ((size_t) rec + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; struct tm *dt = g_malloc0(sizeof(struct tm)); char *res = strptime(str, "%Y-%m-%d %H:%M:%S", dt); if (res == NULL) { memset(dt, 0, sizeof(struct tm)); res = strptime(str, "%Y-%m-%d", dt); if (res == NULL) { g_free(dt); // Try to convert the whole string to a number if it passes it's likely in epoch format char *t; long long int epoch = strtoll(str, &t, 10); if(*t == '\0') { return PyLong_FromLongLong(epoch); } else { char err[55]; snprintf(err, 55, "Unable to parse updateinfo record date: %s", str); PyErr_SetString(CrErr_Exception, err); return NULL; } } } PyObject *py_dt = PyDateTime_FromDateAndTime(dt->tm_year + 1900, dt->tm_mon + 1, dt->tm_mday, dt->tm_hour, dt->tm_min, dt->tm_sec, 0); g_free(dt); return py_dt; } static PyObject * get_list(_UpdateRecordObject *self, void *conv) { ListConvertor *convertor = conv; PyObject *list; cr_UpdateRecord *rec = self->record; GSList *glist = *((GSList **) ((size_t) rec + (size_t) convertor->offset)); if (check_UpdateRecordStatus(self)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (GSList *elem = glist; elem; elem = g_slist_next(elem)) { PyObject *obj = convertor->f(elem->data); if (!obj) continue; PyList_Append(list, obj); Py_DECREF(obj); } return list; } static int set_int(_UpdateRecordObject *self, PyObject *value, void *member_offset) { long val; if (check_UpdateRecordStatus(self)) return -1; if (PyLong_Check(value)) { val = PyLong_AsLong(value); } else if (PyFloat_Check(value)) { val = (long long) PyFloat_AS_DOUBLE(value); } else { PyErr_SetString(PyExc_TypeError, "Number expected!"); return -1; } cr_UpdateRecord *rec = self->record; *((int *) ((size_t) rec + (size_t) member_offset)) = (int) val; return 0; } static int set_str(_UpdateRecordObject *self, PyObject *value, void *member_offset) { if (check_UpdateRecordStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_UpdateRecord *rec = self->record; char *str = PyObject_ToChunkedString(value, rec->chunk); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } static int set_datetime(_UpdateRecordObject *self, PyObject *value, void *member_offset) { PyDateTime_IMPORT; if (check_UpdateRecordStatus(self)) return -1; if (value == Py_None) { return 0; } cr_UpdateRecord *rec = self->record; if (PyLong_Check(value)) { unsigned long long epoch = PyLong_AsUnsignedLongLong(value); /* Length 13 is plenty of space for epoch. */ char *date = malloc(13 * sizeof(char)); int ret = snprintf(date, 13, "%llu", epoch); if (ret < 0 || ret > 12){ PyErr_SetString(PyExc_TypeError, "Invalid epoch value!"); free(date); return -1; } char *str = cr_safe_string_chunk_insert(rec->chunk, date); free(date); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } if (!PyDateTime_Check(value)) { PyErr_SetString(PyExc_TypeError, "DateTime, integer epoch or None expected!"); return -1; } /* Length is 20: yyyy-mm-dd HH:MM:SS */ char *date = malloc(20 * sizeof(char)); snprintf(date, 20, "%04d-%02d-%02d %02d:%02d:%02d", PyDateTime_GET_YEAR(value) % 9999, PyDateTime_GET_MONTH(value) % 13, PyDateTime_GET_DAY(value) % 32, PyDateTime_DATE_GET_HOUR(value) % 24, (PyDateTime_DATE_GET_MINUTE(value) % 60), PyDateTime_DATE_GET_SECOND(value) % 60); char *str = cr_safe_string_chunk_insert(rec->chunk, date); free(date); *((char **) ((size_t) rec + (size_t) member_offset)) = str; return 0; } static PyGetSetDef updaterecord_getsetters[] = { {"fromstr", (getter)get_str, (setter)set_str, "Who issued this update", OFFSET(from)}, {"status", (getter)get_str, (setter)set_str, "Status of the update", OFFSET(status)}, {"type", (getter)get_str, (setter)set_str, "Update type", OFFSET(type)}, {"version", (getter)get_str, (setter)set_str, "Version of update", OFFSET(version)}, {"id", (getter)get_str, (setter)set_str, "Update id", OFFSET(id)}, {"title", (getter)get_str, (setter)set_str, "Update title", OFFSET(title)}, {"issued_date", (getter)get_datetime, (setter)set_datetime, "Date when the update was issued", OFFSET(issued_date)}, {"updated_date", (getter)get_datetime, (setter)set_datetime, "Date when the update was updated", OFFSET(updated_date)}, {"rights", (getter)get_str, (setter)set_str, "Copyrights", OFFSET(rights)}, {"release", (getter)get_str, (setter)set_str, "Update release", OFFSET(release)}, {"pushcount", (getter)get_str, (setter)set_str, "Pushcount", OFFSET(pushcount)}, {"severity", (getter)get_str, (setter)set_str, "Severity", OFFSET(severity)}, {"summary", (getter)get_str, (setter)set_str, "Short summary", OFFSET(summary)}, {"description", (getter)get_str, (setter)set_str, "Description", OFFSET(description)}, {"solution", (getter)get_str, (setter)set_str, "Solution", OFFSET(solution)}, {"references", (getter)get_list, (setter)NULL, "List of UpdateReferences", &(list_convertors[0])}, {"collections", (getter)get_list, (setter)NULL, "List of UpdateCollections", &(list_convertors[1])}, {"reboot_suggested", (getter)get_int, (setter)set_int, "Suggested reboot", OFFSET(reboot_suggested)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateRecord_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateRecord", .tp_basicsize = sizeof(_UpdateRecordObject), .tp_dealloc = (destructor) updaterecord_dealloc, .tp_repr = (reprfunc) updaterecord_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updaterecord_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updaterecord_methods, .tp_getset = updaterecord_getsetters, .tp_init = (initproc) updaterecord_init, .tp_new = updaterecord_new, }; createrepo_c-0.17.0/src/python/updaterecord-py.h000066400000000000000000000022201400672373200216220ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATERECORD_PY_H #define CR_UPDATERECORD_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateRecord_Type; #define UpdateRecordObject_Check(o) PyObject_TypeCheck(o, &UpdateRecord_Type) PyObject *Object_FromUpdateRecord(cr_UpdateRecord *rec); cr_UpdateRecord *UpdateRecord_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/updatereference-py.c000066400000000000000000000141611400672373200223040ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "updatereference-py.h" #include "exception-py.h" #include "typeconversion.h" #include "contentstat-py.h" typedef struct { PyObject_HEAD cr_UpdateReference *reference; } _UpdateReferenceObject; PyObject * Object_FromUpdateReference(cr_UpdateReference *ref) { PyObject *py_rec; if (!ref) { PyErr_SetString(PyExc_ValueError, "Expected a cr_UpdateReference pointer not NULL."); return NULL; } py_rec = PyObject_CallObject((PyObject *) &UpdateReference_Type, NULL); cr_updatereference_free(((_UpdateReferenceObject *)py_rec)->reference); ((_UpdateReferenceObject *)py_rec)->reference = ref; return py_rec; } cr_UpdateReference * UpdateReference_FromPyObject(PyObject *o) { if (!UpdateReferenceObject_Check(o)) { PyErr_SetString(PyExc_TypeError, "Expected a UpdateReference object."); return NULL; } return ((_UpdateReferenceObject *)o)->reference; } static int check_UpdateReferenceStatus(const _UpdateReferenceObject *self) { assert(self != NULL); assert(UpdateReferenceObject_Check(self)); if (self->reference == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c UpdateReference object."); return -1; } return 0; } /* Function on the type */ static PyObject * updatereference_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _UpdateReferenceObject *self = (_UpdateReferenceObject *)type->tp_alloc(type, 0); if (self) { self->reference = NULL; } return (PyObject *)self; } PyDoc_STRVAR(updatereference_init__doc__, ".. method:: __init__()\n\n"); static int updatereference_init(_UpdateReferenceObject *self, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { /* Free all previous resources when reinitialization */ if (self->reference) cr_updatereference_free(self->reference); /* Init */ self->reference = cr_updatereference_new(); if (self->reference == NULL) { PyErr_SetString(CrErr_Exception, "UpdateReference initialization failed"); return -1; } return 0; } static void updatereference_dealloc(_UpdateReferenceObject *self) { if (self->reference) cr_updatereference_free(self->reference); Py_TYPE(self)->tp_free(self); } static PyObject * updatereference_repr(G_GNUC_UNUSED _UpdateReferenceObject *self) { if (self->reference->type) return PyUnicode_FromFormat("", self->reference->type); else return PyUnicode_FromFormat(""); } /* UpdateReference methods */ PyDoc_STRVAR(copy__doc__, "copy() -> UpdateReference\n\n" "Return copy of the UpdateReference object"); static PyObject * copy_updatereference(_UpdateReferenceObject *self, G_GNUC_UNUSED void *nothing) { if (check_UpdateReferenceStatus(self)) return NULL; return Object_FromUpdateReference(cr_updatereference_copy(self->reference)); } static struct PyMethodDef updatereference_methods[] = { {"copy", (PyCFunction)copy_updatereference, METH_NOARGS, copy__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; /* getsetters */ #define OFFSET(member) (void *) offsetof(cr_UpdateReference, member) static PyObject * get_str(_UpdateReferenceObject *self, void *member_offset) { if (check_UpdateReferenceStatus(self)) return NULL; cr_UpdateReference *ref = self->reference; char *str = *((char **) ((size_t) ref + (size_t) member_offset)); if (str == NULL) Py_RETURN_NONE; return PyUnicode_FromString(str); } static int set_str(_UpdateReferenceObject *self, PyObject *value, void *member_offset) { if (check_UpdateReferenceStatus(self)) return -1; if (!PyUnicode_Check(value) && !PyBytes_Check(value) && value != Py_None) { PyErr_SetString(PyExc_TypeError, "Unicode, bytes, or None expected!"); return -1; } cr_UpdateReference *ref = self->reference; char *str = PyObject_ToChunkedString(value, ref->chunk); *((char **) ((size_t) ref + (size_t) member_offset)) = str; return 0; } static PyGetSetDef updatereference_getsetters[] = { {"href", (getter)get_str, (setter)set_str, "Reference URL",OFFSET(href)}, {"id", (getter)get_str, (setter)set_str, "ID", OFFSET(id)}, {"type", (getter)get_str, (setter)set_str, "Type", OFFSET(type)}, {"title", (getter)get_str, (setter)set_str, "Title", OFFSET(title)}, {NULL, NULL, NULL, NULL, NULL} /* sentinel */ }; /* Object */ PyTypeObject UpdateReference_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.UpdateReference", .tp_basicsize = sizeof(_UpdateReferenceObject), .tp_dealloc = (destructor) updatereference_dealloc, .tp_repr = (reprfunc) updatereference_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = updatereference_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = updatereference_methods, .tp_getset = updatereference_getsetters, .tp_init = (initproc) updatereference_init, .tp_new = updatereference_new, }; createrepo_c-0.17.0/src/python/updatereference-py.h000066400000000000000000000022531400672373200223100ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_UPDATEREFERENCE_PY_H #define CR_UPDATEREFERENCE_PY_H #include "src/createrepo_c.h" extern PyTypeObject UpdateReference_Type; #define UpdateReferenceObject_Check(o) PyObject_TypeCheck(o, &UpdateReference_Type) PyObject *Object_FromUpdateReference(cr_UpdateReference *rec); cr_UpdateReference *UpdateReference_FromPyObject(PyObject *o); #endif createrepo_c-0.17.0/src/python/xml_dump-py.c000066400000000000000000000075471400672373200210020ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "src/createrepo_c.h" #include "typeconversion.h" #include "package-py.h" #include "exception-py.h" #include "updaterecord-py.h" PyObject * py_xml_dump_primary(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *py_pkg, *py_str; char *xml; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:py_xml_dump_primary", &Package_Type, &py_pkg)) return NULL; xml = cr_xml_dump_primary(Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } PyObject * py_xml_dump_filelists(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *py_pkg, *py_str; char *xml; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:py_xml_dump_filelists", &Package_Type, &py_pkg)) return NULL; xml = cr_xml_dump_filelists(Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } PyObject * py_xml_dump_other(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *py_pkg, *py_str; char *xml; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:py_xml_dump_other", &Package_Type, &py_pkg)) return NULL; xml = cr_xml_dump_other(Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } PyObject * py_xml_dump(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *py_pkg, *tuple; struct cr_XmlStruct xml_res; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:py_xml_dump", &Package_Type, &py_pkg)) return NULL; xml_res = cr_xml_dump(Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } if ((tuple = PyTuple_New(3)) == NULL) { free(xml_res.primary); free(xml_res.filelists); free(xml_res.other); return NULL; } PyTuple_SetItem(tuple, 0, PyUnicodeOrNone_FromString(xml_res.primary)); PyTuple_SetItem(tuple, 1, PyUnicodeOrNone_FromString(xml_res.filelists)); PyTuple_SetItem(tuple, 2, PyUnicodeOrNone_FromString(xml_res.other)); free(xml_res.primary); free(xml_res.filelists); free(xml_res.other); return tuple; } PyObject * py_xml_dump_updaterecord(G_GNUC_UNUSED PyObject *self, PyObject *args) { PyObject *py_rec, *py_str; char *xml = NULL; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:py_xml_dump_updaterecord", &UpdateRecord_Type, &py_rec)) return NULL; xml = cr_xml_dump_updaterecord(UpdateRecord_FromPyObject(py_rec), &err); if (err) { nice_exception(&err, NULL); free(xml); return NULL; } py_str = PyUnicodeOrNone_FromString(xml); free(xml); return py_str; } createrepo_c-0.17.0/src/python/xml_dump-py.h000066400000000000000000000035201400672373200207720ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_XML_DUMP_PY_H #define CR_XML_DUMP_PY_H #include "src/createrepo_c.h" PyDoc_STRVAR(xml_dump_primary__doc__, "xml_dump_primary(pkg) -> str\n\n" "Generate primary xml chunk from the package"); PyObject *py_xml_dump_primary(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_dump_filelists__doc__, "xml_dump_filelists(pkg) -> str\n\n" "Generate filelists xml chunk from the package"); PyObject *py_xml_dump_filelists(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_dump_other__doc__, "xml_dump_other(pkg) -> str\n\n" "Generate other xml chunk from the package"); PyObject *py_xml_dump_other(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_dump__doc__, "xml_dump(pkg) -> (str, str, str)\n\n" "Generate primary, filelists and other xml chunks from the package"); PyObject *py_xml_dump(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_dump_updaterecord__doc__, "xml_dump_updaterecord(pkg) -> str\n\n" "Generate xml chunk from UpdateRecord"); PyObject *py_xml_dump_updaterecord(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/python/xml_file-py.c000066400000000000000000000163261400672373200207470ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "xml_file-py.h" #include "package-py.h" #include "exception-py.h" #include "contentstat-py.h" #include "typeconversion.h" typedef struct { PyObject_HEAD cr_XmlFile *xmlfile; PyObject *py_stat; } _XmlFileObject; static PyObject * xmlfile_close(_XmlFileObject *self, void *nothing); static int check_XmlFileStatus(const _XmlFileObject *self) { assert(self != NULL); assert(XmlFileObject_Check(self)); if (self->xmlfile == NULL) { PyErr_SetString(CrErr_Exception, "Improper createrepo_c XmlFile object (Already closed file?)."); return -1; } return 0; } /* Function on the type */ static PyObject * xmlfile_new(PyTypeObject *type, G_GNUC_UNUSED PyObject *args, G_GNUC_UNUSED PyObject *kwds) { _XmlFileObject *self = (_XmlFileObject *)type->tp_alloc(type, 0); if (self) { self->xmlfile = NULL; self->py_stat = NULL; } return (PyObject *)self; } PyDoc_STRVAR(xmlfile_init__doc__, "XmlFile object represents a single XML file (primary, filelists or other).\n\n" ".. method:: __init__(path, type, compression_type, contentstat)\n\n" " :arg path: Path to the xml file\n" " :arg type: Type of the XML file. One from XMLFILE_PRIMARY,\n" " XMLFILE_FILELISTS, XMLFILE_OTHER constants\n" " :arg compression_type: Compression type specified by constant\n" " :arg contentstat: ContentStat object to gather content statistics or None"); static int xmlfile_init(_XmlFileObject *self, PyObject *args, G_GNUC_UNUSED PyObject *kwds) { char *path; int type, comtype; GError *err = NULL; PyObject *py_stat, *ret; cr_ContentStat *stat; if (!PyArg_ParseTuple(args, "siiO|:xmlfile_init", &path, &type, &comtype, &py_stat)) return -1; /* Check arguments */ if (type < 0 || type >= CR_XMLFILE_SENTINEL) { PyErr_SetString(PyExc_ValueError, "Unknown XML file type"); return -1; } if (comtype < 0 || comtype >= CR_CW_COMPRESSION_SENTINEL) { PyErr_SetString(PyExc_ValueError, "Unknown compression type"); return -1; } if (py_stat == Py_None) { stat = NULL; } else if (ContentStatObject_Check(py_stat)) { stat = ContentStat_FromPyObject(py_stat); } else { PyErr_SetString(PyExc_TypeError, "Use ContentStat or None"); return -1; } /* Free all previous resources when reinitialization */ ret = xmlfile_close(self, NULL); Py_XDECREF(ret); Py_XDECREF(self->py_stat); self->py_stat = NULL; if (ret == NULL) { // Error encountered! return -1; } /* Init */ self->xmlfile = cr_xmlfile_sopen(path, type, comtype, stat, &err); if (err) { nice_exception(&err, NULL); return -1; } self->py_stat = py_stat; Py_XINCREF(py_stat); return 0; } static void xmlfile_dealloc(_XmlFileObject *self) { cr_xmlfile_close(self->xmlfile, NULL); Py_XDECREF(self->py_stat); Py_TYPE(self)->tp_free(self); } static PyObject * xmlfile_repr(_XmlFileObject *self) { char *type; switch (self->xmlfile->type) { case CR_XMLFILE_PRIMARY: type = "Primary"; break; case CR_XMLFILE_FILELISTS: type = "Filelists"; break; case CR_XMLFILE_OTHER: type = "Other"; break; default: type = "Unknown"; } return PyUnicode_FromFormat("", type); } /* XmlFile methods */ PyDoc_STRVAR(set_num_of_pkgs__doc__, "set_num_of_pkgs(number_of_packages) -> None\n\n" "Set number of all packages"); static PyObject * set_num_of_pkgs(_XmlFileObject *self, PyObject *args) { long num; GError *err = NULL; if (!PyArg_ParseTuple(args, "l:set_num_of_pkgs", &num)) return NULL; if (check_XmlFileStatus(self)) return NULL; cr_xmlfile_set_num_of_pkgs(self->xmlfile, num, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(add_pkg__doc__, "add_pkg(Package) -> None\n\n" "Add Package to the xml"); static PyObject * add_pkg(_XmlFileObject *self, PyObject *args) { PyObject *py_pkg; GError *err = NULL; if (!PyArg_ParseTuple(args, "O!:add_pkg", &Package_Type, &py_pkg)) return NULL; if (check_XmlFileStatus(self)) return NULL; cr_xmlfile_add_pkg(self->xmlfile, Package_FromPyObject(py_pkg), &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(add_chunk__doc__, "add_chunk(chunk) -> None\n\n" "Add a string chunk to the xml"); static PyObject * add_chunk(_XmlFileObject *self, PyObject *args) { char *chunk; GError *err = NULL; if (!PyArg_ParseTuple(args, "s:add_chunk", &chunk)) return NULL; if (check_XmlFileStatus(self)) return NULL; cr_xmlfile_add_chunk(self->xmlfile, chunk, &err); if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(close__doc__, "close() -> None\n\n" "Close the XML file"); static PyObject * xmlfile_close(_XmlFileObject *self, G_GNUC_UNUSED void *nothing) { GError *err = NULL; if (self->xmlfile) { cr_xmlfile_close(self->xmlfile, &err); self->xmlfile = NULL; } Py_XDECREF(self->py_stat); self->py_stat = NULL; if (err) { nice_exception(&err, NULL); return NULL; } Py_RETURN_NONE; } static struct PyMethodDef xmlfile_methods[] = { {"set_num_of_pkgs", (PyCFunction)set_num_of_pkgs, METH_VARARGS, set_num_of_pkgs__doc__}, {"add_pkg", (PyCFunction)add_pkg, METH_VARARGS, add_pkg__doc__}, {"add_chunk", (PyCFunction)add_chunk, METH_VARARGS, add_chunk__doc__}, {"close", (PyCFunction)xmlfile_close, METH_NOARGS, close__doc__}, {NULL, NULL, 0, NULL} /* sentinel */ }; PyTypeObject XmlFile_Type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "createrepo_c.XmlFile", .tp_basicsize = sizeof(_XmlFileObject), .tp_dealloc = (destructor) xmlfile_dealloc, .tp_repr = (reprfunc) xmlfile_repr, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = xmlfile_init__doc__, .tp_iter = PyObject_SelfIter, .tp_methods = xmlfile_methods, .tp_init = (initproc) xmlfile_init, .tp_new = xmlfile_new, }; createrepo_c-0.17.0/src/python/xml_file-py.h000066400000000000000000000020061400672373200207420ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_XML_FILE_PY_H #define CR_XML_FILE_PY_H #include "src/createrepo_c.h" extern PyTypeObject XmlFile_Type; #define XmlFileObject_Check(o) PyObject_TypeCheck(o, &XmlFile_Type) #endif createrepo_c-0.17.0/src/python/xml_parser-py.c000066400000000000000000000471701400672373200213250ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "src/createrepo_c.h" #include "xml_parser-py.h" #include "typeconversion.h" #include "package-py.h" #include "repomd-py.h" #include "updateinfo-py.h" #include "exception-py.h" typedef struct { PyObject *py_newpkgcb; PyObject *py_pkgcb; PyObject *py_warningcb; PyObject *py_pkg; /*!< Current processed package */ } CbData; static int c_newpkgcb(cr_Package **pkg, const char *pkgId, const char *name, const char *arch, void *cbdata, GError **err) { PyObject *arglist, *result; CbData *data = cbdata; if (data->py_pkg) { // Decref ref count on previous processed package Py_DECREF(data->py_pkg); data->py_pkg = NULL; } arglist = Py_BuildValue("(sss)", pkgId, name, arch); result = PyObject_CallObject(data->py_newpkgcb, arglist); Py_DECREF(arglist); if (result == NULL) { // Exception raised PyErr_ToGError(err); return CR_CB_RET_ERR; } if (!PackageObject_Check(result) && result != Py_None) { PyErr_SetString(PyExc_TypeError, "Expected a cr_Package or None as a callback return value"); Py_DECREF(result); return CR_CB_RET_ERR; } if (result == Py_None) { *pkg = NULL; data->py_pkg = NULL; Py_DECREF(result); } else { *pkg = Package_FromPyObject(result); data->py_pkg = result; // Store reference to current package } return CR_CB_RET_OK; } static int c_pkgcb(cr_Package *pkg, void *cbdata, GError **err) { PyObject *arglist, *result, *py_pkg; CbData *data = cbdata; if (data->py_pkg) py_pkg = data->py_pkg; else py_pkg = Object_FromPackage(pkg, 1); arglist = Py_BuildValue("(O)", py_pkg); result = PyObject_CallObject(data->py_pkgcb, arglist); Py_DECREF(arglist); Py_DECREF(py_pkg); data->py_pkg = NULL; if (result == NULL) { // Exception raised PyErr_ToGError(err); return CR_CB_RET_ERR; } Py_DECREF(result); return CR_CB_RET_OK; } static int c_warningcb(cr_XmlParserWarningType type, char *msg, void *cbdata, GError **err) { PyObject *arglist, *result; CbData *data = cbdata; arglist = Py_BuildValue("(is)", type, msg); result = PyObject_CallObject(data->py_warningcb, arglist); Py_DECREF(arglist); if (result == NULL) { // Exception raised PyErr_ToGError(err); return CR_CB_RET_ERR; } Py_DECREF(result); return CR_CB_RET_OK; } PyObject * py_xml_parse_primary(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *filename; int do_files; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOOi:py_xml_parse_primary", &filename, &py_newpkgcb, &py_pkgcb, &py_warningcb, &do_files)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_primary(filename, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, do_files, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_primary_snippet(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *target; int do_files; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOOi:py_xml_parse_primary_snippet", &target, &py_newpkgcb, &py_pkgcb, &py_warningcb, &do_files)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_primary_snippet(target, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, do_files, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_filelists(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *filename; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOO:py_xml_parse_filelists", &filename, &py_newpkgcb, &py_pkgcb, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_filelists(filename, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_filelists_snippet(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *target; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOO:py_xml_parse_filelists_snippet", &target, &py_newpkgcb, &py_pkgcb, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_filelists_snippet(target, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_other(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *filename; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOO:py_xml_parse_other", &filename, &py_newpkgcb, &py_pkgcb, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_other(filename, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_other_snippet(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *target; PyObject *py_newpkgcb, *py_pkgcb, *py_warningcb; CbData cbdata; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sOOO:py_xml_parse_other_snippet", &target, &py_newpkgcb, &py_pkgcb, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_newpkgcb) && py_newpkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "newpkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_pkgcb) && py_pkgcb != Py_None) { PyErr_SetString(PyExc_TypeError, "pkgcb must be callable or None"); return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } if (py_newpkgcb == Py_None && py_pkgcb == Py_None) { PyErr_SetString(PyExc_ValueError, "both pkgcb and newpkgcb cannot be None"); return NULL; } Py_XINCREF(py_newpkgcb); Py_XINCREF(py_pkgcb); Py_XINCREF(py_warningcb); cr_XmlParserNewPkgCb ptr_c_newpkgcb = NULL; cr_XmlParserPkgCb ptr_c_pkgcb = NULL; cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_newpkgcb != Py_None) ptr_c_newpkgcb = c_newpkgcb; if (py_pkgcb != Py_None) ptr_c_pkgcb = c_pkgcb; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = py_newpkgcb; cbdata.py_pkgcb = py_pkgcb; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; cr_xml_parse_other_snippet(target, ptr_c_newpkgcb, &cbdata, ptr_c_pkgcb, &cbdata, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_newpkgcb); Py_XDECREF(py_pkgcb); Py_XDECREF(py_warningcb); Py_XDECREF(cbdata.py_pkg); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_repomd(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *filename; PyObject *py_repomd, *py_warningcb; CbData cbdata; cr_Repomd *repomd; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sO!O:py_xml_parse_repomd", &filename, &Repomd_Type, &py_repomd, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } Py_XINCREF(py_repomd); Py_XINCREF(py_warningcb); cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = NULL; cbdata.py_pkgcb = NULL; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; repomd = Repomd_FromPyObject(py_repomd); cr_xml_parse_repomd(filename, repomd, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_repomd); Py_XDECREF(py_warningcb); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } PyObject * py_xml_parse_updateinfo(G_GNUC_UNUSED PyObject *self, PyObject *args) { char *filename; PyObject *py_updateinfo, *py_warningcb; CbData cbdata; cr_UpdateInfo *updateinfo; GError *tmp_err = NULL; if (!PyArg_ParseTuple(args, "sO!O:py_xml_parse_updateinfo", &filename, &UpdateInfo_Type, &py_updateinfo, &py_warningcb)) { return NULL; } if (!PyCallable_Check(py_warningcb) && py_warningcb != Py_None) { PyErr_SetString(PyExc_TypeError, "warningcb must be callable or None"); return NULL; } Py_XINCREF(py_updateinfo); Py_XINCREF(py_warningcb); cr_XmlParserWarningCb ptr_c_warningcb = NULL; if (py_warningcb != Py_None) ptr_c_warningcb = c_warningcb; cbdata.py_newpkgcb = NULL; cbdata.py_pkgcb = NULL; cbdata.py_warningcb = py_warningcb; cbdata.py_pkg = NULL; updateinfo = UpdateInfo_FromPyObject(py_updateinfo); cr_xml_parse_updateinfo(filename, updateinfo, ptr_c_warningcb, &cbdata, &tmp_err); Py_XDECREF(py_updateinfo); Py_XDECREF(py_warningcb); if (tmp_err) { nice_exception(&tmp_err, NULL); return NULL; } Py_RETURN_NONE; } createrepo_c-0.17.0/src/python/xml_parser-py.h000066400000000000000000000050731400672373200213260ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef CR_XML_PARSER_PY_H #define CR_XML_PARSER_PY_H #include "src/createrepo_c.h" PyDoc_STRVAR(xml_parse_primary__doc__, "xml_parse_primary(filename, newpkgcb, pkgcb, warningcb, do_files) -> None\n\n" "Parse primary.xml"); PyDoc_STRVAR(xml_parse_primary_snippet__doc__, "xml_parse_primary_snippet(snippet, newpkgcb, pkgcb, warningcb, do_files) -> None\n\n" "Parse primary xml snippet"); PyObject *py_xml_parse_primary(PyObject *self, PyObject *args); PyObject *py_xml_parse_primary_snippet(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_parse_filelists__doc__, "xml_parse_filelists(filename, newpkgcb, pkgcb, warningcb) -> None\n\n" "Parse filelists.xml"); PyDoc_STRVAR(xml_parse_filelists_snippet__doc__, "xml_parse_filelists_snippet(snippet, newpkgcb, pkgcb, warningcb) -> None\n\n" "Parse filelists xml snippet"); PyObject *py_xml_parse_filelists(PyObject *self, PyObject *args); PyObject *py_xml_parse_filelists_snippet(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_parse_other__doc__, "xml_parse_other(filename, newpkgcb, pkgcb, warningcb) -> None\n\n" "Parse other.xml"); PyDoc_STRVAR(xml_parse_other_snippet__doc__, "xml_parse_other_snippet(snippet, newpkgcb, pkgcb, warningcb) -> None\n\n" "Parse other xml snippet"); PyObject *py_xml_parse_other(PyObject *self, PyObject *args); PyObject *py_xml_parse_other_snippet(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_parse_repomd__doc__, "xml_parse_repomd(filename, repomd_object, warningcb) -> None\n\n" "Parse repomd.xml"); PyObject *py_xml_parse_repomd(PyObject *self, PyObject *args); PyDoc_STRVAR(xml_parse_updateinfo__doc__, "xml_parse_updateinfo(filename, updateinfo_object, warningcb) -> None\n\n" "Parse updateinfo.xml"); PyObject *py_xml_parse_updateinfo(PyObject *self, PyObject *args); #endif createrepo_c-0.17.0/src/repomd.c000066400000000000000000000757251400672373200164770ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include #include #include #include "cleanup.h" #include "error.h" #include "misc.h" #include "checksum.h" #include "repomd.h" #include "repomd_internal.h" #include "compression_wrapper.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define LOCATION_HREF_PREFIX "repodata/" #define DEFAULT_DATABASE_VERSION 10 #define BUFFER_SIZE 8192 cr_DistroTag * cr_distrotag_new() { return (cr_DistroTag *) g_malloc0(sizeof(cr_DistroTag)); } cr_RepomdRecord * cr_repomd_record_new(const char *type, const char *path) { cr_RepomdRecord *md = g_malloc0(sizeof(*md)); md->chunk = g_string_chunk_new(128); md->type = cr_safe_string_chunk_insert(md->chunk, type); md->size_open = G_GINT64_CONSTANT(-1); md->size_header = G_GINT64_CONSTANT(-1); if (path) { gchar *filename = cr_get_filename(path); gchar *location_href = g_strconcat(LOCATION_HREF_PREFIX, filename, NULL); md->location_real = g_string_chunk_insert(md->chunk, path); md->location_href = g_string_chunk_insert(md->chunk, location_href); g_free(location_href); } return md; } void cr_repomd_record_free(cr_RepomdRecord *md) { if (!md) return; g_string_chunk_free(md->chunk); g_free(md); } cr_RepomdRecord * cr_repomd_record_copy(const cr_RepomdRecord *orig) { cr_RepomdRecord *rec; if (!orig) return NULL; rec = cr_repomd_record_new(orig->type, NULL); rec->location_real = cr_safe_string_chunk_insert(rec->chunk, orig->location_real); rec->location_href = cr_safe_string_chunk_insert(rec->chunk, orig->location_href); rec->location_base = cr_safe_string_chunk_insert(rec->chunk, orig->location_base); rec->checksum = cr_safe_string_chunk_insert(rec->chunk, orig->checksum); rec->checksum_type = cr_safe_string_chunk_insert(rec->chunk, orig->checksum_type); rec->checksum_open = cr_safe_string_chunk_insert(rec->chunk, orig->checksum_open); rec->checksum_open_type = cr_safe_string_chunk_insert(rec->chunk, orig->checksum_open_type); rec->timestamp = orig->timestamp; rec->size = orig->size; rec->size_open = orig->size_open; rec->size_header = orig->size_header; rec->db_ver = orig->db_ver; if (orig->checksum_header) rec->checksum_header = cr_safe_string_chunk_insert(rec->chunk, orig->checksum_header); if (orig->checksum_header_type) rec->checksum_header_type = cr_safe_string_chunk_insert(rec->chunk, orig->checksum_header_type); return rec; } cr_ContentStat * cr_get_compressed_content_stat(const char *filename, cr_ChecksumType checksum_type, GError **err) { GError *tmp_err = NULL; assert(filename); assert(!err || *err == NULL); if (!g_file_test(filename, G_FILE_TEST_IS_REGULAR)) { g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "File %s doesn't exists or not a regular file", filename); return NULL; } // Open compressed file cr_ContentStat *read_stat = g_malloc0(sizeof(cr_ContentStat)); CR_FILE *cwfile = cr_sopen(filename, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, read_stat, &tmp_err); if (!cwfile) { g_propagate_prefixed_error(err, tmp_err, "Cannot open a file %s: ", filename); return NULL; } // Read compressed file and calculate checksum and size cr_ChecksumCtx *checksum = cr_checksum_new(checksum_type, &tmp_err); if (tmp_err) { g_critical("%s: g_checksum_new() failed", __func__); g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation: "); return NULL; } gint64 size = G_GINT64_CONSTANT(0); long readed; unsigned char buffer[BUFFER_SIZE]; do { readed = cr_read(cwfile, (void *) buffer, BUFFER_SIZE, &tmp_err); if (readed == CR_CW_ERR) { g_debug("%s: Error while read compressed file %s: %s", __func__, filename, tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "Error while read compressed file %s: ", filename); break; } cr_checksum_update(checksum, buffer, readed, NULL); size += readed; } while (readed == BUFFER_SIZE); if (readed == CR_CW_ERR) return NULL; // Create result structure cr_ContentStat* result = g_malloc0(sizeof(cr_ContentStat)); if (result) { if (cwfile->stat) { result->hdr_checksum = cwfile->stat->hdr_checksum; result->hdr_checksum_type = cwfile->stat->hdr_checksum_type; result->hdr_size = cwfile->stat->hdr_size; } else { result->hdr_checksum = NULL; result->hdr_checksum_type = 0; result->hdr_size = G_GINT64_CONSTANT(-1); } result->checksum = cr_checksum_final(checksum, NULL); result->size = size; } else { g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot allocate memory"); } cr_close(cwfile, NULL); cr_contentstat_free(read_stat, NULL); return result; } int cr_repomd_record_fill(cr_RepomdRecord *md, cr_ChecksumType checksum_type, GError **err) { const char *checksum_str; cr_ChecksumType checksum_t; gchar *path; GError *tmp_err = NULL; assert(md); assert(!err || *err == NULL); if (!(md->location_real) || !strlen(md->location_real)) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Empty locations in repomd record object."); return CRE_BADARG; } path = md->location_real; checksum_str = cr_checksum_name_str(checksum_type); checksum_t = checksum_type; if (!g_file_test(path, G_FILE_TEST_IS_REGULAR)) { // File doesn't exists g_warning("%s: File %s doesn't exists", __func__, path); g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "File %s doesn't exists or not a regular file", path); return CRE_NOFILE; } // Compute checksum of compressed file if (!md->checksum_type || !md->checksum) { gchar *chksum; chksum = cr_checksum_file(path, checksum_t, &tmp_err); if (!chksum) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation of %s:", path); return code; } md->checksum_type = g_string_chunk_insert(md->chunk, checksum_str); md->checksum = g_string_chunk_insert(md->chunk, chksum); g_free(chksum); } // Compute checksum of non compressed content and its size if (!md->checksum_open_type || !md->checksum_open || md->size_open == G_GINT64_CONSTANT(-1)) { cr_CompressionType com_type = cr_detect_compression(path, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot detect compression type of %s: ", path); return code; } if (com_type != CR_CW_UNKNOWN_COMPRESSION && com_type != CR_CW_NO_COMPRESSION) { // File compressed by supported algorithm cr_ContentStat *open_stat = NULL; open_stat = cr_get_compressed_content_stat(path, checksum_t, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while computing stat of compressed content of %s:", path); return code; } md->checksum_open_type = g_string_chunk_insert(md->chunk, checksum_str); md->checksum_open = g_string_chunk_insert(md->chunk, open_stat->checksum); if (md->size_open == G_GINT64_CONSTANT(-1)) md->size_open = open_stat->size; if (open_stat->hdr_checksum != NULL) { const char *hdr_checksum_str = cr_checksum_name_str(open_stat->hdr_checksum_type); md->checksum_header_type = g_string_chunk_insert(md->chunk, hdr_checksum_str); md->checksum_header = g_string_chunk_insert(md->chunk, open_stat->hdr_checksum); if (md->size_header == G_GINT64_CONSTANT(-1)) md->size_header = open_stat->hdr_size; g_free(open_stat->hdr_checksum); } g_free(open_stat->checksum); g_free(open_stat); } else { if (com_type != CR_CW_NO_COMPRESSION) { // Unknown compression g_warning("%s: File \"%s\" compressed by an unsupported type" " of compression", __func__, path); } md->checksum_open_type = NULL; md->checksum_open = NULL; md->size_open = G_GINT64_CONSTANT(-1); } } // Get timestamp and size of compressed file if (!md->timestamp || !md->size) { struct stat buf; if (!stat(path, &buf)) { if (!md->timestamp) { md->timestamp = buf.st_mtime; } if (!md->size) { md->size = buf.st_size; } } else { g_warning("%s: Stat on file \"%s\" failed", __func__, path); g_set_error(err, ERR_DOMAIN, CRE_STAT, "Stat() on %s failed: %s", path, g_strerror(errno)); return CRE_STAT; } } // Set db version if (!md->db_ver) md->db_ver = DEFAULT_DATABASE_VERSION; return CRE_OK; } int cr_repomd_record_compress_and_fill(cr_RepomdRecord *record, cr_RepomdRecord *crecord, cr_ChecksumType checksum_type, cr_CompressionType record_compression, const char *zck_dict_dir, GError **err) { int ret = CRE_OK; const char *suffix; gchar *path, *cpath; gchar *clocation_real, *clocation_href; gchar *checksum = NULL; gchar *cchecksum = NULL; gchar *hdrchecksum = NULL; int readed; char buf[BUFFER_SIZE]; CR_FILE *cw_plain; CR_FILE *cw_compressed; gint64 gf_size = G_GINT64_CONSTANT(-1), cgf_size = G_GINT64_CONSTANT(-1); gint64 gf_time = G_GINT64_CONSTANT(-1), cgf_time = G_GINT64_CONSTANT(-1); gint64 cgf_hdrsize = G_GINT64_CONSTANT(-1); struct stat gf_stat, cgf_stat; const char *checksum_str = cr_checksum_name_str(checksum_type); const char *hdr_checksum_str = NULL; GError *tmp_err = NULL; assert(record); assert(crecord); assert(!err || *err == NULL); if (!(record->location_real) || !strlen(record->location_real)) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Empty locations in repomd record object"); return CRE_BADARG; } if (!g_file_test(record->location_real, G_FILE_TEST_IS_REGULAR)) { // File doesn't exists g_warning("%s: File %s doesn't exists", __func__, record->location_real); g_set_error(err, ERR_DOMAIN, CRE_NOFILE, "File %s doesn't exists or not a regular file", record->location_real); return CRE_NOFILE;; } // Paths suffix = cr_compression_suffix(record_compression); // Only update locations, if they are not set yet if (!crecord->location_real){ clocation_real = g_strconcat(record->location_real, suffix, NULL); crecord->location_real = g_string_chunk_insert(crecord->chunk, clocation_real); g_free(clocation_real); } if (!crecord->location_href){ clocation_href = g_strconcat(record->location_href, suffix, NULL); crecord->location_href = g_string_chunk_insert(crecord->chunk, clocation_href); g_free(clocation_href); } path = record->location_real; cpath = crecord->location_real; // Compress file + get size of non compressed file int mode = CR_CW_NO_COMPRESSION; if (record_compression == CR_CW_ZCK_COMPRESSION) mode = CR_CW_AUTO_DETECT_COMPRESSION; cw_plain = cr_open(path, CR_CW_MODE_READ, mode, &tmp_err); if (!cw_plain) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", path); return ret; } _cleanup_free_ gchar *dict = NULL; size_t dict_size = 0; if (record_compression == CR_CW_ZCK_COMPRESSION && zck_dict_dir) { /* Find zdict */ _cleanup_free_ gchar *file_basename = NULL; _cleanup_free_ gchar *dict_base = NULL; if (g_str_has_suffix(cpath, ".zck")) dict_base = g_strndup(cpath, strlen(cpath)-4); else dict_base = g_strdup(cpath); file_basename = g_path_get_basename(dict_base); _cleanup_free_ gchar *dict_file = cr_get_dict_file(zck_dict_dir, file_basename); /* Read dictionary from file */ if (dict_file && !g_file_get_contents(dict_file, &dict, &dict_size, &tmp_err)) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error reading zchunk dict %s:", dict_file); return ret; } } _cleanup_free_ cr_ContentStat *out_stat = g_malloc0(sizeof(cr_ContentStat)); cw_compressed = cr_sopen(cpath, CR_CW_MODE_WRITE, record_compression, out_stat, &tmp_err); if (!cw_compressed) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", cpath); return ret; } if (record_compression == CR_CW_ZCK_COMPRESSION) { if (dict && cr_set_dict(cw_compressed, dict, dict_size, &tmp_err) != CRE_OK) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Unable to set zdict for %s: ", cpath); return ret; } if (cr_set_autochunk(cw_compressed, TRUE, &tmp_err) != CRE_OK) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Unable to set auto-chunking for %s: ", cpath); return ret; } } while ((readed = cr_read(cw_plain, buf, BUFFER_SIZE, &tmp_err)) > 0) { cr_write(cw_compressed, buf, (unsigned int) readed, &tmp_err); if (tmp_err) break; } cr_close(cw_plain, NULL); if (tmp_err) { ret = tmp_err->code; cr_close(cw_compressed, NULL); g_debug("%s: Error while repomd record compression: %s", __func__, tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "Error while compression %s -> %s:", path, cpath); return ret; } cr_close(cw_compressed, &tmp_err); if (tmp_err) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while closing %s: ", path); return ret; } // Compute checksums checksum = cr_checksum_file(path, checksum_type, &tmp_err); if (!checksum) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation:"); goto end; } cchecksum = cr_checksum_file(cpath, checksum_type, &tmp_err); if (!cchecksum) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while checksum calculation:"); goto end; } // Get stats if (stat(path, &gf_stat)) { g_debug("%s: Error while stat() on %s", __func__, path); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot stat %s", path); ret = CRE_IO; goto end; } gf_size = gf_stat.st_size; gf_time = gf_stat.st_mtime; if (stat(cpath, &cgf_stat)) { g_debug("%s: Error while stat() on %s", __func__, cpath); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot stat %s", cpath); ret = CRE_IO; goto end; } cgf_size = cgf_stat.st_size; cgf_time = cgf_stat.st_mtime; if (out_stat->hdr_checksum) { cgf_hdrsize = out_stat->hdr_size; hdr_checksum_str = cr_checksum_name_str(out_stat->hdr_checksum_type); hdrchecksum = out_stat->hdr_checksum; } // Results record->checksum = g_string_chunk_insert(record->chunk, checksum); record->checksum_type = g_string_chunk_insert(record->chunk, checksum_str); record->checksum_open = NULL; record->checksum_open_type = NULL; record->checksum_header = NULL; record->checksum_header_type = NULL; record->timestamp = gf_time; record->size = gf_size; record->size_open = G_GINT64_CONSTANT(-1); record->size_header = G_GINT64_CONSTANT(-1); crecord->checksum = g_string_chunk_insert(crecord->chunk, cchecksum); crecord->checksum_type = g_string_chunk_insert(crecord->chunk, checksum_str); crecord->checksum_open = g_string_chunk_insert(record->chunk, checksum); crecord->checksum_open_type = g_string_chunk_insert(record->chunk, checksum_str); if (hdr_checksum_str) { crecord->checksum_header = g_string_chunk_insert(crecord->chunk, hdrchecksum); crecord->checksum_header_type = g_string_chunk_insert(crecord->chunk, hdr_checksum_str); } else { crecord->checksum_header = NULL; crecord->checksum_header_type = 0; } crecord->timestamp = cgf_time; crecord->size = cgf_size; crecord->size_open = gf_size; crecord->size_header = cgf_hdrsize; end: g_free(checksum); g_free(cchecksum); g_free(hdrchecksum); return ret; } static int rename_file(gchar **location_real, gchar **location_href, char *checksum, cr_RepomdRecord *md, GError **err) { int x, len; gchar *location_prefix = NULL; const gchar *location_filename = NULL; gchar *new_location_href; gchar *new_location_real; assert(!err || *err == NULL); assert(*location_real && *location_href); location_filename = *location_real; x = strlen(*location_real); for (; x > 0; x--) { if ((*location_real)[x] == '/') { location_prefix = g_strndup(*location_real, x+1); location_filename = cr_get_filename(*location_real+x+1); break; } } if (!location_prefix) // In case that the location_real doesn't contain '/' location_prefix = g_strdup(""); // Check if the rename is necessary // During update with --keep-all-metadata some files (groupfile, // updateinfo, ..) could already have checksum in filenames if (g_str_has_prefix(location_filename, checksum)) { // The filename constains valid checksum g_free(location_prefix); return CRE_OK; } // Skip existing obsolete checksum in the name if there is any len = strlen(location_filename); if (len > 32) { // The filename is long -> it could contains a checksum for (x = 0; x < len; x++) { if (location_filename[x] == '-' && ( x == 32 // Prefix is MD5 checksum || x == 40 // Prefix is SHA1 checksum || x == 64 // Prefix is SHA256 checksum || x == 128 // Prefix is SHA512 checksum )) { location_filename = location_filename + x + 1; break; } } } // Prepare new name new_location_real = g_strconcat(location_prefix, checksum, "-", location_filename, NULL); g_free(location_prefix); // Rename file if (g_file_test (new_location_real, G_FILE_TEST_EXISTS)) { if (remove(new_location_real)) { g_critical("%s: Cannot delete old %s", __func__, new_location_real); g_set_error(err, ERR_DOMAIN, CRE_IO, "File with name %s already exists and cannot be deleted", new_location_real); g_free(new_location_real); return CRE_IO; } } if (rename(*location_real, new_location_real)) { g_critical("%s: Cannot rename %s to %s", __func__, *location_real, new_location_real); g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot rename %s to %s", *location_real, new_location_real); g_free(new_location_real); return CRE_IO; } // Update locations in repomd record *location_real = g_string_chunk_insert(md->chunk, new_location_real); new_location_href = g_strconcat(LOCATION_HREF_PREFIX, checksum, "-", location_filename, NULL); *location_href = g_string_chunk_insert(md->chunk, new_location_href); g_free(new_location_real); g_free(new_location_href); return CRE_OK; } int cr_repomd_record_rename_file(cr_RepomdRecord *md, GError **err) { assert(!err || *err == NULL); if (!md) return CRE_OK; if (!(md->location_real) || !strlen(md->location_real)) { g_debug("Empty locations in repomd record object"); g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Empty locations in repomd record object"); return CRE_BADARG; } if (!md->checksum) { g_debug("Record doesn't contain checksum"); g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Record doesn't contain checksum"); return CRE_BADARG; } char *checksum = md->checksum; int retval = rename_file(&(md->location_real), &(md->location_href), checksum, md, err); return retval; } void cr_repomd_record_set_timestamp(cr_RepomdRecord *record, gint64 timestamp) { struct utimbuf times = { timestamp, timestamp }; if (!record) return; record->timestamp = timestamp; // intentionally ignore error utime(record->location_real, ×); } void cr_repomd_record_load_contentstat(cr_RepomdRecord *record, cr_ContentStat *stats) { if (!stats) return; record->checksum_open = cr_safe_string_chunk_insert(record->chunk, stats->checksum); record->checksum_open_type = cr_safe_string_chunk_insert(record->chunk, cr_checksum_name_str(stats->checksum_type)); record->size_open = stats->size; } void cr_repomd_record_load_zck_contentstat(cr_RepomdRecord *record, cr_ContentStat *stats) { if (!stats) return; record->checksum_header = cr_safe_string_chunk_insert(record->chunk, stats->hdr_checksum); record->checksum_header_type = cr_safe_string_chunk_insert(record->chunk, cr_checksum_name_str(stats->hdr_checksum_type)); record->size_header = stats->hdr_size; } cr_Repomd * cr_repomd_new() { cr_Repomd *repomd = g_malloc0(sizeof(cr_Repomd)); repomd->chunk = g_string_chunk_new(0); return repomd; } cr_Repomd * cr_repomd_copy(cr_Repomd *orig) { cr_Repomd *new = cr_repomd_new(); cr_safe_string_chunk_insert(new->chunk, orig->revision); cr_safe_string_chunk_insert(new->chunk, orig->repoid); cr_safe_string_chunk_insert(new->chunk, orig->repoid_type); cr_safe_string_chunk_insert(new->chunk, orig->contenthash); cr_safe_string_chunk_insert(new->chunk, orig->contenthash_type); for (GSList *elem = orig->repo_tags; elem; elem = g_slist_next(elem)) { gchar *str = elem->data; cr_repomd_add_repo_tag(new, str); } new->repo_tags = g_slist_reverse(new->repo_tags); for (GSList *elem = orig->content_tags; elem; elem = g_slist_next(elem)) { gchar *str = elem->data; cr_repomd_add_content_tag(new, str); } new->content_tags = g_slist_reverse(new->content_tags); for (GSList *elem = orig->distro_tags; elem; elem = g_slist_next(elem)) { cr_DistroTag *tag = elem->data; cr_repomd_add_distro_tag(new, tag->cpeid, tag->val); } new->distro_tags = g_slist_reverse(new->distro_tags); for (GSList *elem = orig->records; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; rec = cr_repomd_record_copy(rec); cr_repomd_set_record(new, rec); } new->records = g_slist_reverse(new->records); return new; } void cr_repomd_free(cr_Repomd *repomd) { if (!repomd) return; cr_slist_free_full(repomd->records, (GDestroyNotify) cr_repomd_record_free ); g_slist_free(repomd->repo_tags); cr_slist_free_full(repomd->distro_tags, (GDestroyNotify) g_free); g_slist_free(repomd->content_tags); g_string_chunk_free(repomd->chunk); g_free(repomd); } void cr_repomd_set_record(cr_Repomd *repomd, cr_RepomdRecord *record) { if (!repomd || !record) return; cr_RepomdRecord *delrec = NULL; // Remove all existing record of the same type while ((delrec = cr_repomd_get_record(repomd, record->type)) != NULL) { cr_repomd_detach_record(repomd, delrec); cr_repomd_record_free(delrec); } repomd->records = g_slist_append(repomd->records, record); } void cr_repomd_set_revision(cr_Repomd *repomd, const char *revision) { if (!repomd) return; repomd->revision = cr_safe_string_chunk_insert(repomd->chunk, revision); } void cr_repomd_set_repoid(cr_Repomd *repomd, const char *repoid, const char *type) { if (!repomd) return; repomd->repoid = cr_safe_string_chunk_insert(repomd->chunk, repoid); repomd->repoid_type = cr_safe_string_chunk_insert(repomd->chunk, type); } void cr_repomd_set_contenthash(cr_Repomd *repomd, const char *hash, const char *type) { if (!repomd) return; repomd->contenthash = cr_safe_string_chunk_insert(repomd->chunk, hash); repomd->contenthash_type = cr_safe_string_chunk_insert(repomd->chunk, type); } void cr_repomd_add_distro_tag(cr_Repomd *repomd, const char *cpeid, const char *tag) { cr_DistroTag *distro; if (!repomd || !tag) return; distro = cr_distrotag_new(); distro->cpeid = cr_safe_string_chunk_insert(repomd->chunk, cpeid); distro->val = cr_safe_string_chunk_insert(repomd->chunk, tag); repomd->distro_tags = g_slist_append(repomd->distro_tags, (gpointer) distro); } void cr_repomd_add_repo_tag(cr_Repomd *repomd, const char *tag) { if (!repomd || !tag) return; repomd->repo_tags = g_slist_append(repomd->repo_tags, cr_safe_string_chunk_insert(repomd->chunk, tag)); } void cr_repomd_add_content_tag(cr_Repomd *repomd, const char *tag) { if (!repomd || !tag) return; repomd->content_tags = g_slist_append(repomd->content_tags, cr_safe_string_chunk_insert(repomd->chunk, tag)); } void cr_repomd_detach_record(cr_Repomd *repomd, cr_RepomdRecord *rec) { if (!repomd || !rec) return; repomd->records = g_slist_remove(repomd->records, rec); } void cr_repomd_remove_record(cr_Repomd *repomd, const char *type) { cr_RepomdRecord *rec = cr_repomd_get_record(repomd, type); if (!rec) return; cr_repomd_detach_record(repomd, rec); cr_repomd_record_free(rec); } cr_RepomdRecord * cr_repomd_get_record(cr_Repomd *repomd, const char *type) { if (!repomd || !type) return NULL; for (GSList *elem = repomd->records; elem; elem = g_slist_next(elem)) { cr_RepomdRecord *rec = elem->data; assert(rec); if (!g_strcmp0(rec->type, type)) return rec; } return NULL; } static gint record_type_value(const char *type) { if (!g_strcmp0(type, "primary")) return 1; if (!g_strcmp0(type, "filelists")) return 2; if (!g_strcmp0(type, "other")) return 3; if (!g_strcmp0(type, "primary_db")) return 4; if (!g_strcmp0(type, "filelists_db")) return 5; if (!g_strcmp0(type, "other_db")) return 6; if (!g_strcmp0(type, "primary_zck")) return 7; if (!g_strcmp0(type, "filelists_zck")) return 8; if (!g_strcmp0(type, "other_zck")) return 9; return 10; } static gint record_cmp(gconstpointer _a, gconstpointer _b) { const cr_RepomdRecord *a = _a; const cr_RepomdRecord *b = _b; gint a_val = record_type_value(a->type); gint b_val = record_type_value(b->type); // Keep base metadata files sorted by logical order (primary, filelists, ..) if (a_val < b_val) return -1; if (a_val > b_val) return 1; // Other metadta sort by the type gint ret = g_strcmp0(a->type, b->type); if (ret) return ret; // If even the type is not sufficient, use location href ret = g_strcmp0(a->location_href, b->location_href); // If even the location href is not sufficient, use the location base return ret ? ret : g_strcmp0(a->location_base, b->location_base); } void cr_repomd_sort_records(cr_Repomd *repomd) { if (!repomd) return; repomd->records = g_slist_sort(repomd->records, record_cmp); } createrepo_c-0.17.0/src/repomd.h000066400000000000000000000265621400672373200164770ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_REPOMD_H__ #define __C_CREATEREPOLIB_REPOMD_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" #include "compression_wrapper.h" #include "package.h" /** \defgroup repomd Repomd API. * * Module for generating repomd.xml. * * Example: * * \code * char *xml; * cr_Repomd *md = cr_repomd_new(); * cr_RepomdRecord *rec; * * cr_xml_dump_init(); * * // Set some repomd stuff * cr_repomd_set_revision(md, "007"); * cr_repomd_add_repo_tag(md, "repotag"); * cr_repomd_add_content_tag(md, "contenttag"); * cr_repomd_add_distro_tag(md, "foocpeid", "data"); * * // Create record for new metadata file * rec = cr_repomd_record_new("primary", "/foo/bar/repodata/primary.xml.xz"); * // Calculate all needed parameters (uncompresed size, checksum, ...) * cr_repomd_record_fill(rec, CR_CHECKSUM_SHA256); * // Rename source file - insert checksum into the filename * cr_repomd_record_rename_file(rec) * // Append the record into the repomd * cr_repomd_set_record(md, rec); * * // Get repomd.xml content * xml = cr_xml_dump_repomd(md, NULL); * * // Cleanup * cr_repomd_free(md); * cr_xml_dump_cleanup(); * \endcode * * \addtogroup repomd * @{ */ /** Internal representation of cr_RepomdRecord object */ typedef struct { char *type; /*!< type of record */ char *location_real; /*!< real path to the file */ char *location_href; /*!< location of the file (in repomd.xml) */ char *location_base; /*!< base location of the file */ char *checksum; /*!< checksum of file */ char *checksum_type; /*!< checksum type */ char *checksum_open; /*!< checksum of uncompressed file */ char *checksum_open_type; /*!< checksum type of uncompressed file */ char *checksum_header; /*!< checksum of header */ char *checksum_header_type; /*!< checksum type of header */ gint64 timestamp; /*!< mtime of the file */ gint64 size; /*!< size of file in bytes */ gint64 size_open; /*!< size of uncompressed file in bytes */ gint64 size_header; /*!< header size */ int db_ver; /*!< version of database */ GStringChunk *chunk; /*!< String chunk */ } cr_RepomdRecord; /** Distro tag structure */ typedef struct { gchar *cpeid; /*!< cpeid value or NULL */ gchar *val; /*!< Tag value */ } cr_DistroTag; /** Internal representation of cr_Repomd object */ typedef struct { gchar *revision; /*!< Revison */ gchar *repoid; /*!< OBSOLETE, replaced by contenthash */ gchar *repoid_type; /*!< OBSOLETE, replaced by contenthash_type */ gchar *contenthash; /*!< Content hash */ gchar *contenthash_type; /*!< Content hash type ("sha256", ...) */ GSList *repo_tags; /*!< List of strings */ GSList *content_tags; /*!< List of strings */ GSList *distro_tags; /*!< List of cr_DistroTag* */ GSList *records; /*!< List with cr_RepomdRecords */ GStringChunk *chunk; /*!< String chunk for repomd strings (Note: RepomdRecord strings are stored in RepomdRecord->chunk) */ } cr_Repomd; /** Creates (alloc) new cr_RepomdRecord object * @param type Type of record ("primary", "filelists", ..) * @param path path to the compressed file */ cr_RepomdRecord *cr_repomd_record_new(const char *type, const char *path); /** Destroy cr_RepomdRecord object. * NOTE: Do NOT use this function on objects attached to cr_Repomd * (by cr_repomd_set_record). * @param record cr_RepomdRecord object */ void cr_repomd_record_free(cr_RepomdRecord *record); /** Copy cr_RepomdRecord object. * @param orig cr_RepomdRecord object * @return copy of cr_RepomdRecord object */ cr_RepomdRecord *cr_repomd_record_copy(const cr_RepomdRecord *orig); /** Fill unfilled items in the cr_RepomdRecord (calculate checksums, * get file size before/after compression, etc.). * Note: If checksum_open, checksum_open_type and size_open are filed * then their calculation will be skiped. This items could be filled * directly on our own or use function for load them from a cr_ContentStat. * If no open stats are supplied, then this function has to decompress * the file for the open checksum calculation. * @param record cr_RepomdRecord object * @param checksum_type type of checksum to use * @param err GError ** * @return cr_Error code */ int cr_repomd_record_fill(cr_RepomdRecord *record, cr_ChecksumType checksum_type, GError **err); /** Almost analogous to cr_repomd_record_fill but suitable for groupfile. * Record must be set with the path to existing non compressed groupfile. * Compressed file will be created and compressed_record updated. * @param record cr_RepomdRecord initialized to an existing * uncompressed file * @param compressed_record empty cr_RepomdRecord object that will by filled * @param checksum_type type of checksums * @param compression type of compression * @param zck_dict_dir Location of zchunk dictionaries (NULL if unused) * @param err GError ** * @return cr_Error code */ int cr_repomd_record_compress_and_fill(cr_RepomdRecord *record, cr_RepomdRecord *compressed_record, cr_ChecksumType checksum_type, cr_CompressionType compression, const char *zck_dict_dir, GError **err); /** Add a hash as prefix to the filename. * @param record cr_RepomdRecord of file to be renamed * @param err GError ** * @return cr_Error code */ int cr_repomd_record_rename_file(cr_RepomdRecord *record, GError **err); /** Set timestamp of the file. Needed to reproduce bit-by-bit identical metadata. * @param record cr_RepomdRecord of file to be renamed * @param timestamp timestamp in number of seconds since 1970-01-01 */ void cr_repomd_record_set_timestamp(cr_RepomdRecord *record, gint64 timestamp); /** Load the open stats (checksum_open, checksum_open_type and size_open) * from the cr_ContentStat object. * @param record cr_RepomdRecord * @param stats cr_ContentStat */ void cr_repomd_record_load_contentstat(cr_RepomdRecord *record, cr_ContentStat *stats); /** Load the zchunk stats (zck_header_checksum, zck_header_checksum_type and zck_header_size) * from the cr_ContentStat object. * @param record cr_RepomdRecord * @param stats cr_ContentStat */ void cr_repomd_record_load_zck_contentstat(cr_RepomdRecord *record, cr_ContentStat *stats); /** Create new empty cr_Repomd object wich represents content of repomd.xml. */ cr_Repomd *cr_repomd_new(); /** Create copy of cr_Repomd * @param repomd cr_Repomd object * @return Copy of the input cr_Repomd object */ cr_Repomd *cr_repomd_copy(cr_Repomd *repomd); /** Set cr_Repomd record into cr_Repomd object. * @param repomd cr_Repomd object * @param record cr_RepomdRecord object */ void cr_repomd_set_record(cr_Repomd *repomd, cr_RepomdRecord *record); /** Set custom revision string of repomd. * @param repomd cr_Repomd object * @param revision revision string */ void cr_repomd_set_revision(cr_Repomd *repomd, const char *revision); /** Set a repoid - OBSOLETE, use cr_repomd_set_contenthash instead * @param repomd cr_Repomd object * @param repoid RepoId * @param type Type of hash function used to calculate repoid */ void cr_repomd_set_repoid(cr_Repomd *repomd, const char *repoid, const char *type); /** Set a contenthash * @param repomd cr_Repomd object * @param hash content hash * @param type Type of hash function */ void cr_repomd_set_contenthash(cr_Repomd *repomd, const char *hash, const char *type); /** Add distro tag. * @param repomd cr_Repomd object * @param cpeid cpeid string (could be NULL) * @param tag distro tag string */ void cr_repomd_add_distro_tag(cr_Repomd *repomd, const char *cpeid, const char *tag); /** Add repo tag. * @param repomd cr_Repomd object * @param tag repo tag */ void cr_repomd_add_repo_tag(cr_Repomd *repomd, const char *tag); /** Add content tag. * @param repomd cr_Repomd object * @param tag content tag */ void cr_repomd_add_content_tag(cr_Repomd *repomd, const char *tag); /** Get repomd record from the repomd object. * @param repomd cr_Repomd object * @param type Type of the record ("primary", "primary_db", ..) * @return Pointer to a record of desired type or NULL */ cr_RepomdRecord *cr_repomd_get_record(cr_Repomd *repomd, const char *type); /** Detach this record from the repomd records list. * Note: Responsibility of freeing record memory no longer belongs to * cr_Repomd object. * @param repomd cr_Repomd object * @param rec Record to be removed */ void cr_repomd_detach_record(cr_Repomd *repomd, cr_RepomdRecord *rec); /** Remove first record of the specified type * @param repomd cr_Repomd object * @param type Type of the record ("primary", "primary_db", ..) */ void cr_repomd_remove_record(cr_Repomd *repomd, const char *type); /** Records are stored in order they were added to the repomd. * Because sometimes deterministic output is desirable this function * exists. * @param repomd cr_Repomd object */ void cr_repomd_sort_records(cr_Repomd *repomd); /** Frees cr_Repomd object and all its cr_RepomdRecord objects * @param repomd cr_Repomd object */ void cr_repomd_free(cr_Repomd *repomd); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_REPOMD_H__ */ createrepo_c-0.17.0/src/repomd_internal.h000066400000000000000000000021431400672373200203600ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_REPOMD_INTERNAL_H__ #define __C_CREATEREPOLIB_REPOMD_INTERNAL_H__ #ifdef __cplusplus extern "C" { #endif #include #include "error.h" cr_DistroTag *cr_distrotag_new(); #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_REPOMD_INTERNAL_H__ */ createrepo_c-0.17.0/src/sqlite.c000066400000000000000000001357601400672373200165060ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * Copyright (C) 2008-2011 James Antill * Copyright (C) 2006-2007 James Bowes * Copyright (C) 2006-2007 Paul Nasrat * Copyright (C) 2006 Tambet Ingo * Copyright (C) 2006-2010 Seth Vidal * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "misc.h" #include "sqlite.h" #include "error.h" #include "xml_dump.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ENCODED_PACKAGE_FILE_FILES 2048 #define ENCODED_PACKAGE_FILE_TYPES 60 struct _DbPrimaryStatements { sqlite3 *db; sqlite3_stmt *pkg_handle; sqlite3_stmt *provides_handle; sqlite3_stmt *conflicts_handle; sqlite3_stmt *obsoletes_handle; sqlite3_stmt *requires_handle; sqlite3_stmt *suggests_handle; sqlite3_stmt *enhances_handle; sqlite3_stmt *recommends_handle; sqlite3_stmt *supplements_handle; sqlite3_stmt *files_handle; }; struct _DbFilelistsStatements { sqlite3 *db; sqlite3_stmt *package_id_handle; sqlite3_stmt *filelists_handle; }; struct _DbOtherStatements { sqlite3 *db; sqlite3_stmt *package_id_handle; sqlite3_stmt *changelog_handle; }; static inline int cr_sqlite3_bind_text(sqlite3_stmt *stmt, int i, const char *orig_content, int len, void(*desctructor)(void *)) { int ret; int free_content = 0; unsigned char *content; if (!orig_content) { content = (unsigned char *) orig_content; } else if (xmlCheckUTF8((const unsigned char *) orig_content) && !cr_hascontrollchars((const unsigned char *) orig_content)) { content = (unsigned char *) orig_content; } else { desctructor = SQLITE_TRANSIENT; size_t llen = strlen((const char *) orig_content); content = malloc(sizeof(unsigned char)*llen*2 + 1); cr_latin1_to_utf8((const unsigned char *) orig_content, content); free_content = 1; } ret = sqlite3_bind_text(stmt, i, (char *) content, len, desctructor); if (free_content) free(content); return ret; } /* * Base DB operation * - Open db * - Creation of tables * - Tweaking of db settings * - Creation of info table * - Creation of index * - Close db */ static sqlite3 * open_sqlite_db(const char *path, GError **err) { int rc; sqlite3 *db = NULL; assert(!err || *err == NULL); rc = sqlite3_open(path, &db); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not open SQL database: %s", sqlite3_errmsg(db)); sqlite3_close(db); db = NULL; } return db; } static void db_create_dbinfo_table(sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE TABLE db_info (dbversion INTEGER, checksum TEXT)"; rc = sqlite3_exec(db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create db_info table: %s", sqlite3_errmsg (db)); } } static void db_create_primary_tables(sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE TABLE packages (" " pkgKey INTEGER PRIMARY KEY," " pkgId TEXT," " name TEXT," " arch TEXT," " version TEXT," " epoch TEXT," " release TEXT," " summary TEXT," " description TEXT," " url TEXT," " time_file INTEGER," " time_build INTEGER," " rpm_license TEXT," " rpm_vendor TEXT," " rpm_group TEXT," " rpm_buildhost TEXT," " rpm_sourcerpm TEXT," " rpm_header_start INTEGER," " rpm_header_end INTEGER," " rpm_packager TEXT," " size_package INTEGER," " size_installed INTEGER," " size_archive INTEGER," " location_href TEXT," " location_base TEXT," " checksum_type TEXT)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create packages table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TABLE files (" " name TEXT," " type TEXT," " pkgKey INTEGER)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create files table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TABLE %s (" " name TEXT," " flags TEXT," " epoch TEXT," " version TEXT," " release TEXT," " pkgKey INTEGER %s)"; const char *deps[] = { "requires", "provides", "conflicts", "obsoletes", "suggests", "enhances", "recommends", "supplements", NULL }; int i; for (i = 0; deps[i]; i++) { const char *prereq; char *query; if (!strcmp(deps[i], "requires")) { prereq = ", pre BOOLEAN DEFAULT FALSE"; } else prereq = ""; query = g_strdup_printf (sql, deps[i], prereq); rc = sqlite3_exec (db, query, NULL, NULL, NULL); g_free (query); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create %s table: %s", deps[i], sqlite3_errmsg (db)); return; } } sql = "CREATE TRIGGER removals AFTER DELETE ON packages" " BEGIN" " DELETE FROM files WHERE pkgKey = old.pkgKey;" " DELETE FROM requires WHERE pkgKey = old.pkgKey;" " DELETE FROM provides WHERE pkgKey = old.pkgKey;" " DELETE FROM conflicts WHERE pkgKey = old.pkgKey;" " DELETE FROM obsoletes WHERE pkgKey = old.pkgKey;" " DELETE FROM suggests WHERE pkgKey = old.pkgKey;" " DELETE FROM enhances WHERE pkgKey = old.pkgKey;" " DELETE FROM recommends WHERE pkgKey = old.pkgKey;" " DELETE FROM supplements WHERE pkgKey = old.pkgKey;" " END;"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create removals trigger: %s", sqlite3_errmsg (db)); return; } } static void db_create_filelists_tables(sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE TABLE packages (" " pkgKey INTEGER PRIMARY KEY," " pkgId TEXT)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create packages table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TABLE filelist (" " pkgKey INTEGER," " dirname TEXT," " filenames TEXT," " filetypes TEXT)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create filelist table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TRIGGER remove_filelist AFTER DELETE ON packages" " BEGIN" " DELETE FROM filelist WHERE pkgKey = old.pkgKey;" " END;"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create remove_filelist trigger: %s", sqlite3_errmsg (db)); return; } } static void db_create_other_tables (sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE TABLE packages (" " pkgKey INTEGER PRIMARY KEY," " pkgId TEXT)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create packages table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TABLE changelog (" " pkgKey INTEGER," " author TEXT," " date INTEGER," " changelog TEXT)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create changelog table: %s", sqlite3_errmsg (db)); return; } sql = "CREATE TRIGGER remove_changelogs AFTER DELETE ON packages" " BEGIN" " DELETE FROM changelog WHERE pkgKey = old.pkgKey;" " END;"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create remove_changelogs trigger: %s", sqlite3_errmsg (db)); return; } } static void db_tweak(sqlite3 *db, G_GNUC_UNUSED GError **err) { assert(!err || *err == NULL); // Do not wait for disk writes to be fully // written to disk before continuing sqlite3_exec (db, "PRAGMA synchronous = OFF", NULL, NULL, NULL); sqlite3_exec (db, "PRAGMA journal_mode = MEMORY", NULL, NULL, NULL); sqlite3_exec (db, "PRAGMA temp_store = MEMORY", NULL, NULL, NULL); } static void db_index_primary_tables (sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE INDEX IF NOT EXISTS packagename ON packages (name)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create packagename index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS packageId ON packages (pkgId)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create packageId index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS filenames ON files (name)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create filenames index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS pkgfiles ON files (pkgKey)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create index on files table: %s", sqlite3_errmsg (db)); return; } int i; const char *deps[] = { "requires", "provides", "conflicts", "obsoletes", "suggests", "enhances", "recommends", "supplements", NULL }; const char *pkgindexsql = "CREATE INDEX IF NOT EXISTS pkg%s on %s (pkgKey)"; const char *nameindexsql = "CREATE INDEX IF NOT EXISTS %sname ON %s (name)"; for (i = 0; deps[i]; i++) { char *query; query = g_strdup_printf(pkgindexsql, deps[i], deps[i]); rc = sqlite3_exec (db, query, NULL, NULL, NULL); g_free (query); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create index on %s table: %s", deps[i], sqlite3_errmsg (db)); return; } if (i < 2) { query = g_strdup_printf(nameindexsql, deps[i], deps[i]); rc = sqlite3_exec (db, query, NULL, NULL, NULL); g_free(query); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create %sname index: %s", deps[i], sqlite3_errmsg (db)); return; } } } } static void db_index_filelists_tables (sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE INDEX IF NOT EXISTS keyfile ON filelist (pkgKey)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create keyfile index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS pkgId ON packages (pkgId)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create pkgId index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS dirnames ON filelist (dirname)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create dirnames index: %s", sqlite3_errmsg (db)); return; } } static void db_index_other_tables (sqlite3 *db, GError **err) { int rc; const char *sql; assert(!err || *err == NULL); sql = "CREATE INDEX IF NOT EXISTS keychange ON changelog (pkgKey)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create keychange index: %s", sqlite3_errmsg (db)); return; } sql = "CREATE INDEX IF NOT EXISTS pkgId ON packages (pkgId)"; rc = sqlite3_exec (db, sql, NULL, NULL, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not create pkgId index: %s", sqlite3_errmsg (db)); return; } } /* * Package insertion stuff */ int cr_db_dbinfo_update(cr_SqliteDb *sqlitedb, const char *checksum, GError **err) { int rc; sqlite3_stmt *handle; const char *query = "INSERT INTO db_info (dbversion, checksum) VALUES (?, ?)"; assert(sqlitedb); assert(!err || *err == NULL); /* Prepare insert statement */ rc = sqlite3_prepare_v2(sqlitedb->db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Cannot prepare db_info update: %s", sqlite3_errmsg(sqlitedb->db)); g_critical("%s: Cannot prepare db_info update statement: %s", __func__, sqlite3_errmsg(sqlitedb->db)); sqlite3_finalize(handle); return CRE_DB; } /* Delete all previous content of db_info */ sqlite3_exec(sqlitedb->db, "DELETE FROM db_info", NULL, NULL, NULL); /* Perform insert */ sqlite3_bind_int(handle, 1, CR_DB_CACHE_DBVERSION); cr_sqlite3_bind_text(handle, 2, checksum, -1, SQLITE_STATIC); rc = sqlite3_step(handle); if (rc != SQLITE_DONE) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Cannot update dbinfo table: %s", sqlite3_errmsg (sqlitedb->db)); g_critical("%s: Cannot update dbinfo table: %s", __func__, sqlite3_errmsg(sqlitedb->db)); return CRE_DB; } rc = sqlite3_finalize(handle); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Cannot update dbinfo table: %s", sqlite3_errmsg (sqlitedb->db)); g_critical("%s: Cannot update dbinfo table: %s", __func__, sqlite3_errmsg(sqlitedb->db)); return CRE_DB; } return CRE_OK; } /* * primary.sqlite */ static sqlite3_stmt * db_package_prepare (sqlite3 *db, GError **err) { int rc; sqlite3_stmt *handle = NULL; const char *query; assert(!err || *err == NULL); query = "INSERT INTO packages (" " pkgId, name, arch, version, epoch, release, summary, description," " url, time_file, time_build, rpm_license, rpm_vendor, rpm_group," " rpm_buildhost, rpm_sourcerpm, rpm_header_start, rpm_header_end," " rpm_packager, size_package, size_installed, size_archive," " location_href, location_base, checksum_type) " "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?," " ?, ?, ?, ?, ?, ?, ?)"; rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Cannot prepare packages insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } static inline const char * prevent_null(const char *str) { if (!str) return ""; else return str; } static inline const char * force_null(const char *str) { if (!str || str[0] == '\0') return NULL; else return str; } static void db_package_write (sqlite3 *db, sqlite3_stmt *handle, cr_Package *p, GError **err) { int rc; assert(!err || *err == NULL); cr_sqlite3_bind_text (handle, 1, p->pkgId, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 2, p->name, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 3, p->arch, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 4, p->version, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 5, p->epoch, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 6, p->release, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 7, p->summary, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 8, p->description, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 9, force_null(p->url), -1, SQLITE_STATIC); // {null} sqlite3_bind_int (handle, 10, p->time_file); sqlite3_bind_int (handle, 11, p->time_build); cr_sqlite3_bind_text (handle, 12, p->rpm_license, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 13, prevent_null(p->rpm_vendor), -1, SQLITE_STATIC); // "" cr_sqlite3_bind_text (handle, 14, p->rpm_group, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 15, p->rpm_buildhost, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 16, prevent_null(p->rpm_sourcerpm), -1, SQLITE_STATIC); // "" sqlite3_bind_int (handle, 17, p->rpm_header_start); sqlite3_bind_int (handle, 18, p->rpm_header_end); cr_sqlite3_bind_text (handle, 19, force_null(p->rpm_packager), -1, SQLITE_STATIC); // {null} sqlite3_bind_int64(handle, 20, p->size_package); sqlite3_bind_int64(handle, 21, p->size_installed); sqlite3_bind_int64(handle, 22, p->size_archive); cr_sqlite3_bind_text (handle, 23, p->location_href, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 24, force_null(p->location_base), -1, SQLITE_STATIC); // {null} cr_sqlite3_bind_text (handle, 25, p->checksum_type, -1, SQLITE_STATIC); rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc == SQLITE_DONE) { p->pkgKey = sqlite3_last_insert_rowid (db); } else { g_critical ("Error adding package to db: %s", sqlite3_errmsg(db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding package to db: %s", sqlite3_errmsg(db)); } } static sqlite3_stmt * db_dependency_prepare (sqlite3 *db, const char *table, GError **err) { int rc; sqlite3_stmt *handle = NULL; char *query; const char *pre_name = ""; const char *pre_value = ""; assert(!err || *err == NULL); if (!strcmp (table, "requires")) { pre_name = ", pre"; pre_value = ", ?"; } query = g_strdup_printf ("INSERT INTO %s (name, flags, epoch, version, release, pkgKey%s) " "VALUES (?, ?, ?, ?, ?, ?%s)", table, pre_name, pre_value); rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); g_free (query); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Cannot prepare dependency insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } static void db_dependency_write (sqlite3 *db, sqlite3_stmt *handle, gint64 pkgKey, cr_Dependency *dep, gboolean isRequirement, GError **err) { int rc; assert(!err || *err == NULL); cr_sqlite3_bind_text (handle, 1, dep->name, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 2, dep->flags, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 3, dep->epoch, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 4, dep->version, -1, SQLITE_STATIC); cr_sqlite3_bind_text (handle, 5, dep->release, -1, SQLITE_STATIC); sqlite3_bind_int (handle, 6, pkgKey); if (isRequirement) { if (dep->pre) cr_sqlite3_bind_text (handle, 7, "TRUE", -1, SQLITE_TRANSIENT); else cr_sqlite3_bind_text (handle, 7, "FALSE", -1, SQLITE_TRANSIENT); } rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc != SQLITE_DONE) { g_critical ("Error adding package dependency to db: %s", sqlite3_errmsg (db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding package dependency to db: %s", sqlite3_errmsg(db)); } } static sqlite3_stmt * db_file_prepare (sqlite3 *db, GError **err) { int rc; sqlite3_stmt *handle = NULL; const char *query; assert(!err || *err == NULL); query = "INSERT INTO files (name, type, pkgKey) VALUES (?, ?, ?)"; rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not prepare file insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } static void db_file_write (sqlite3 *db, sqlite3_stmt *handle, gint64 pkgKey, cr_PackageFile *file, GError **err) { int rc; assert(!err || *err == NULL); gchar *fullpath = g_strconcat(file->path, file->name, NULL); if (!fullpath) return; // Nothing to do if (!cr_is_primary(fullpath)) { g_free(fullpath); return; } const char* file_type = file->type; if (!file_type || file_type[0] == '\0') { file_type = "file"; } cr_sqlite3_bind_text (handle, 1, fullpath, -1, SQLITE_TRANSIENT); g_free(fullpath); cr_sqlite3_bind_text (handle, 2, file_type, -1, SQLITE_STATIC); sqlite3_bind_int (handle, 3, pkgKey); rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc != SQLITE_DONE) { g_critical ("Error adding package file to db: %s", sqlite3_errmsg (db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding package file to db: %s", sqlite3_errmsg(db)); } } /* * filelists.sqlite */ static sqlite3_stmt * db_filelists_prepare (sqlite3 *db, GError **err) { int rc; sqlite3_stmt *handle = NULL; const char *query; assert(!err || *err == NULL); query = "INSERT INTO filelist (pkgKey, dirname, filenames, filetypes) " " VALUES (?, ?, ?, ?)"; rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not prepare filelist insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } typedef struct { GString *files; GString *types; } EncodedPackageFile; static EncodedPackageFile * encoded_package_file_new (void) { EncodedPackageFile *enc; enc = g_new0 (EncodedPackageFile, 1); enc->files = g_string_sized_new (ENCODED_PACKAGE_FILE_FILES); enc->types = g_string_sized_new (ENCODED_PACKAGE_FILE_TYPES); return enc; } static void encoded_package_file_free (EncodedPackageFile *file) { g_string_free (file->files, TRUE); g_string_free (file->types, TRUE); g_free (file); } static GHashTable * package_files_to_hash (GSList *files) { GHashTable *hash; GSList *iter; hash = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, (GDestroyNotify) encoded_package_file_free); for (iter = files; iter; iter = iter->next) { cr_PackageFile *file; EncodedPackageFile *enc; char *dir; char *name; file = (cr_PackageFile *) iter->data; dir = file->path; name = file->name; enc = (EncodedPackageFile *) g_hash_table_lookup (hash, dir); if (!enc) { enc = encoded_package_file_new (); g_hash_table_insert (hash, dir, enc); } if (enc->files->len) g_string_append_c (enc->files, '/'); if (!name || name[0] == '\0') // Root directory '/' has empty name g_string_append_c (enc->files, '/'); else g_string_append (enc->files, name); if (!(file->type) || file->type[0] == '\0' || !strcmp (file->type, "file")) g_string_append_c (enc->types, 'f'); else if (!strcmp (file->type, "dir")) g_string_append_c (enc->types, 'd'); else if (!strcmp (file->type, "ghost")) g_string_append_c (enc->types, 'g'); } return hash; } static void cr_db_write_file (sqlite3 *db, sqlite3_stmt *handle, gint64 pkgKey, gpointer key, gpointer value, GError **err) { // key is a path to directory eg. "/etc/X11/xinit/xinitrc.d" // value is a struct eg. { .files="foo/bar/dir", .types="ffd"} int rc; size_t key_len; EncodedPackageFile *file = (EncodedPackageFile *) value; assert(!err || *err == NULL); key_len = strlen((const char *) key); while (key_len > 1 && ((char *) key)[key_len-1] == '/') { // Remove trailing '/' char(s) // If there are only '/' symbols leave only the first one key_len--; } if (key_len == 0) { // Same directory is represented by '.' in database key = "."; key_len = 1; } sqlite3_bind_int (handle, 1, pkgKey); cr_sqlite3_bind_text(handle, 2, (const char *) key, (int) key_len, SQLITE_STATIC); cr_sqlite3_bind_text(handle, 3, file->files->str, -1, SQLITE_STATIC); cr_sqlite3_bind_text(handle, 4, file->types->str, -1, SQLITE_STATIC); rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc != SQLITE_DONE) { g_critical ("Error adding file records to db: %s", sqlite3_errmsg (db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding file records to db : %s", sqlite3_errmsg(db)); } } /* * other.sqlite */ static sqlite3_stmt * db_changelog_prepare (sqlite3 *db, GError **err) { int rc; sqlite3_stmt *handle = NULL; const char *query; assert(!err || *err == NULL); query = "INSERT INTO changelog (pkgKey, author, date, changelog) " " VALUES (?, ?, ?, ?)"; rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not prepare changelog insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } // Stuff common for both filelists.sqlite and other.sqlite static sqlite3_stmt * db_package_ids_prepare(sqlite3 *db, GError **err) { int rc; sqlite3_stmt *handle = NULL; const char *query; assert(!err || *err == NULL); query = "INSERT INTO packages (pkgId) VALUES (?)"; rc = sqlite3_prepare_v2 (db, query, -1, &handle, NULL); if (rc != SQLITE_OK) { g_set_error(err, ERR_DOMAIN, CRE_DB, "Can not prepare package ids insertion: %s", sqlite3_errmsg (db)); sqlite3_finalize (handle); handle = NULL; } return handle; } static void db_package_ids_write(sqlite3 *db, sqlite3_stmt *handle, cr_Package *pkg, GError **err) { int rc; assert(!err || *err == NULL); cr_sqlite3_bind_text (handle, 1, pkg->pkgId, -1, SQLITE_STATIC); rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc == SQLITE_DONE) { pkg->pkgKey = sqlite3_last_insert_rowid (db); } else { g_critical("Error adding package to db: %s", sqlite3_errmsg(db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding package to db: %s", sqlite3_errmsg(db)); } } /* * Module interface */ // Primary.sqlite interface void cr_db_destroy_primary_statements(cr_DbPrimaryStatements stmts) { if (!stmts) return; if (stmts->pkg_handle) sqlite3_finalize(stmts->pkg_handle); if (stmts->provides_handle) sqlite3_finalize(stmts->provides_handle); if (stmts->conflicts_handle) sqlite3_finalize(stmts->conflicts_handle); if (stmts->obsoletes_handle) sqlite3_finalize(stmts->obsoletes_handle); if (stmts->requires_handle) sqlite3_finalize(stmts->requires_handle); if (stmts->suggests_handle) sqlite3_finalize(stmts->suggests_handle); if (stmts->enhances_handle) sqlite3_finalize(stmts->enhances_handle); if (stmts->recommends_handle) sqlite3_finalize(stmts->recommends_handle); if (stmts->supplements_handle) sqlite3_finalize(stmts->supplements_handle); if (stmts->files_handle) sqlite3_finalize(stmts->files_handle); free(stmts); } cr_DbPrimaryStatements cr_db_prepare_primary_statements(sqlite3 *db, GError **err) { assert(!err || *err == NULL); GError *tmp_err = NULL; cr_DbPrimaryStatements ret = malloc(sizeof(*ret)); ret->db = db; ret->pkg_handle = NULL; ret->provides_handle = NULL; ret->conflicts_handle = NULL; ret->obsoletes_handle = NULL; ret->requires_handle = NULL; ret->suggests_handle = NULL; ret->enhances_handle = NULL; ret->recommends_handle = NULL; ret->supplements_handle = NULL; ret->files_handle = NULL; ret->pkg_handle = db_package_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->provides_handle = db_dependency_prepare(db, "provides", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->conflicts_handle = db_dependency_prepare(db, "conflicts", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->obsoletes_handle = db_dependency_prepare(db, "obsoletes", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->requires_handle = db_dependency_prepare(db, "requires", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->suggests_handle = db_dependency_prepare(db, "suggests", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->enhances_handle = db_dependency_prepare(db, "enhances", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->recommends_handle = db_dependency_prepare(db, "recommends", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->supplements_handle = db_dependency_prepare(db, "supplements", &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->files_handle = db_file_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } return ret; error: cr_db_destroy_primary_statements(ret); return NULL; } void cr_db_add_primary_pkg(cr_DbPrimaryStatements stmts, cr_Package *pkg, GError **err) { GError *tmp_err = NULL; GSList *iter; assert(!err || *err == NULL); db_package_write(stmts->db, stmts->pkg_handle, pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } for (iter = pkg->provides; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->provides_handle, pkg->pkgKey, (cr_Dependency *) iter->data, FALSE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->conflicts; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->conflicts_handle, pkg->pkgKey, (cr_Dependency *) iter->data, FALSE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->obsoletes; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->obsoletes_handle, pkg->pkgKey, (cr_Dependency *) iter->data, FALSE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->requires; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->requires_handle, pkg->pkgKey, (cr_Dependency *) iter->data, TRUE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->suggests; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->suggests_handle, pkg->pkgKey, (cr_Dependency *) iter->data, TRUE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->enhances; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->enhances_handle, pkg->pkgKey, (cr_Dependency *) iter->data, TRUE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->recommends; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->recommends_handle, pkg->pkgKey, (cr_Dependency *) iter->data, TRUE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->supplements; iter; iter = iter->next) { db_dependency_write(stmts->db, stmts->supplements_handle, pkg->pkgKey, (cr_Dependency *) iter->data, TRUE, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } for (iter = pkg->files; iter; iter = iter->next) { db_file_write(stmts->db, stmts->files_handle, pkg->pkgKey, (cr_PackageFile *) iter->data, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } } } // filelists.sqlite interface void cr_db_destroy_filelists_statements(cr_DbFilelistsStatements stmts) { if (!stmts) return; if (stmts->package_id_handle) sqlite3_finalize(stmts->package_id_handle); if (stmts->filelists_handle) sqlite3_finalize(stmts->filelists_handle); free(stmts); } cr_DbFilelistsStatements cr_db_prepare_filelists_statements(sqlite3 *db, GError **err) { GError *tmp_err = NULL; cr_DbFilelistsStatements ret = malloc(sizeof(*ret)); assert(!err || *err == NULL); ret->db = db; ret->package_id_handle = NULL; ret->filelists_handle = NULL; ret->package_id_handle = db_package_ids_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->filelists_handle = db_filelists_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } return ret; error: cr_db_destroy_filelists_statements(ret); return NULL; } void cr_db_add_filelists_pkg(cr_DbFilelistsStatements stmts, cr_Package *pkg, GError **err) { GError *tmp_err = NULL; assert(!err || *err == NULL); // Add record into the package table db_package_ids_write(stmts->db, stmts->package_id_handle, pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } // Add records into the filelist table GHashTable *hash; GHashTableIter iter; gpointer key, value; // Create a hashtable where: // key is a path to directory eg. "/etc/X11/xinit/xinitrc.d" // value is a struct eg. { .files="foo/bar/dir", .types="ffd"} hash = package_files_to_hash(pkg->files); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next (&iter, &key, &value)) { cr_db_write_file(stmts->db, stmts->filelists_handle, pkg->pkgKey, key, value, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); break; } } g_hash_table_destroy(hash); } // other.sqlite interface void cr_db_destroy_other_statements(cr_DbOtherStatements stmts) { if (!stmts) return; if (stmts->package_id_handle) sqlite3_finalize(stmts->package_id_handle); if (stmts->changelog_handle) sqlite3_finalize(stmts->changelog_handle); free(stmts); } cr_DbOtherStatements cr_db_prepare_other_statements(sqlite3 *db, GError **err) { GError *tmp_err = NULL; cr_DbOtherStatements ret = malloc(sizeof(*ret)); assert(!err || *err == NULL); ret->db = db; ret->package_id_handle = NULL; ret->changelog_handle = NULL; ret->package_id_handle = db_package_ids_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } ret->changelog_handle = db_changelog_prepare(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); goto error; } return ret; error: cr_db_destroy_other_statements(ret); return NULL; } void cr_db_add_other_pkg(cr_DbOtherStatements stmts, cr_Package *pkg, GError **err) { int rc; GSList *iter; cr_ChangelogEntry *entry; GError *tmp_err = NULL; assert(!err || *err == NULL); sqlite3_stmt *handle = stmts->changelog_handle; // Add package record into the packages table db_package_ids_write(stmts->db, stmts->package_id_handle, pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return; } // Add changelog recrods into the changelog table for (iter = pkg->changelogs; iter; iter = iter->next) { entry = (cr_ChangelogEntry *) iter->data; sqlite3_bind_int (handle, 1, pkg->pkgKey); cr_sqlite3_bind_text (handle, 2, entry->author, -1, SQLITE_STATIC); sqlite3_bind_int (handle, 3, entry->date); cr_sqlite3_bind_text (handle, 4, entry->changelog, -1, SQLITE_STATIC); rc = sqlite3_step (handle); sqlite3_reset (handle); if (rc != SQLITE_DONE) { g_critical ("Error adding changelog to db: %s", sqlite3_errmsg (stmts->db)); g_set_error(err, ERR_DOMAIN, CRE_DB, "Error adding changelog to db : %s", sqlite3_errmsg(stmts->db)); return; } } } // Function from header file (Public interface of the module) cr_SqliteDb * cr_db_open(const char *path, cr_DatabaseType db_type, GError **err) { cr_SqliteDb *sqlitedb = NULL; int exists; sqlite3 *db = NULL; GError *tmp_err = NULL; void *statements; assert(path); assert(db_type < CR_DB_SENTINEL); assert(!err || *err == NULL); if (path[0] == '\0') { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Bad path: \"%s\"", path); return NULL; } exists = g_file_test(path, G_FILE_TEST_IS_REGULAR); if (exists) { struct stat stat_buf; if (stat(path, &stat_buf) == -1) { g_set_error(err, ERR_DOMAIN, CRE_IO, "Cannot stat %s: %s", path, g_strerror(errno)); return NULL; } if (stat_buf.st_size == 0) // File exists, but is just a placeholder created by g_mkstemp() // because --local-sqlite option was used exists = FALSE; } sqlite3_enable_shared_cache(1); db = open_sqlite_db(path, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return NULL; } sqlite3_exec(db, "BEGIN", NULL, NULL, NULL); db_tweak(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); sqlite3_close(db); return NULL; } db_create_dbinfo_table(db, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); sqlite3_close(db); return NULL; } if (!exists) { // Do not recreate tables, indexes and triggers if db has existed. switch (db_type) { case CR_DB_PRIMARY: db_create_primary_tables(db, &tmp_err); break; case CR_DB_FILELISTS: db_create_filelists_tables(db, &tmp_err); break; case CR_DB_OTHER: db_create_other_tables(db, &tmp_err); break; default: g_critical("%s: Bad db_type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return NULL; } if (tmp_err) { g_propagate_error(err, tmp_err); sqlite3_close(db); return NULL; } } // Compile SQL statements switch (db_type) { case CR_DB_PRIMARY: statements = cr_db_prepare_primary_statements(db, &tmp_err); break; case CR_DB_FILELISTS: statements = cr_db_prepare_filelists_statements(db, &tmp_err); break; case CR_DB_OTHER: statements = cr_db_prepare_other_statements(db, &tmp_err); break; default: g_critical("%s: Bad db_type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return NULL; } if (!statements) { g_propagate_error(err, tmp_err); sqlite3_close(db); return NULL; } sqlitedb = g_new0(cr_SqliteDb, 1); sqlitedb->db = db; sqlitedb->type = db_type; switch (db_type) { case CR_DB_PRIMARY: sqlitedb->statements.pri = statements; break; case CR_DB_FILELISTS: sqlitedb->statements.fil = statements; break; case CR_DB_OTHER: sqlitedb->statements.oth = statements; break; default: g_critical("%s: Bad db_type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return NULL; } return sqlitedb; } int cr_db_close(cr_SqliteDb *sqlitedb, GError **err) { GError *tmp_err = NULL; assert(!err || *err == NULL); if (!sqlitedb) return CRE_OK; switch (sqlitedb->type) { case CR_DB_PRIMARY: db_index_primary_tables(sqlitedb->db, &tmp_err); cr_db_destroy_primary_statements(sqlitedb->statements.pri); break; case CR_DB_FILELISTS: db_index_filelists_tables(sqlitedb->db, &tmp_err); cr_db_destroy_filelists_statements(sqlitedb->statements.fil); break; case CR_DB_OTHER: db_index_other_tables(sqlitedb->db, &tmp_err); cr_db_destroy_other_statements(sqlitedb->statements.oth); break; default: g_critical("%s: Bad db type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return CRE_ASSERT; } if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } sqlite3_exec (sqlitedb->db, "COMMIT", NULL, NULL, NULL); sqlite3_close(sqlitedb->db); g_free(sqlitedb); return CRE_OK; } int cr_db_add_pkg(cr_SqliteDb *sqlitedb, cr_Package *pkg, GError **err) { GError *tmp_err = NULL; assert(sqlitedb); assert(sqlitedb->type < CR_DB_SENTINEL); assert(!err || *err == NULL); if (!pkg) return CRE_OK; switch (sqlitedb->type) { case CR_DB_PRIMARY: cr_db_add_primary_pkg(sqlitedb->statements.pri, pkg, &tmp_err); break; case CR_DB_FILELISTS: cr_db_add_filelists_pkg(sqlitedb->statements.fil, pkg, &tmp_err); break; case CR_DB_OTHER: cr_db_add_other_pkg(sqlitedb->statements.oth, pkg, &tmp_err); break; default: g_critical("%s: Bad db type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad db type"); return CRE_ASSERT; } if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } return CRE_OK; } createrepo_c-0.17.0/src/sqlite.h000066400000000000000000000131251400672373200165010ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_SQLITE_H__ #define __C_CREATEREPOLIB_SQLITE_H__ #include #include #include "package.h" #ifdef __cplusplus extern "C" { #endif /** \defgroup sqlite SQLite metadata API. * * Module for writing sqlite metadata databases. * * Example: * \code * cr_Package *pkg; * cr_SqliteDb *primary_db; * * // Load pkg (See parsepkg or parsehdr module) * * // Create primary sqlite database * primary_db = cr_db_open_primary("/foo/bar/repodata/primary.sqlite", NULL); * * // Add all packages here * cr_db_add_pkg(primary_db, pkg, NULL); * * // Add checksum of XML version of file (primary in this case) * cr_db_dbinfo_update(primary_db, "foochecksum", NULL); * * // Cleanup * cr_db_close(primary_db, NULL); * \endcode * * \addtogroup sqlite * @{ */ #define CR_DB_CACHE_DBVERSION 10 /*!< Version of DB api */ /** Database type. */ typedef enum { CR_DB_PRIMARY, /*!< primary */ CR_DB_FILELISTS, /*!< filelists */ CR_DB_OTHER, /*!< other */ CR_DB_SENTINEL, /*!< sentinel of the list */ } cr_DatabaseType; typedef struct _DbPrimaryStatements * cr_DbPrimaryStatements; /*!< Compiled primary database statements */ typedef struct _DbFilelistsStatements * cr_DbFilelistsStatements; /*!< Compiled filelists database statements */ typedef struct _DbOtherStatements * cr_DbOtherStatements; /*!< Compiled other database statements */ /** Union of precompiled database statements */ typedef union { cr_DbPrimaryStatements pri; /*!< Primary statements */ cr_DbFilelistsStatements fil; /*!< Filelists statements */ cr_DbOtherStatements oth; /*!< Other statements */ } cr_Statements; /** cr_SqliteDb structure. */ typedef struct { sqlite3 *db; /*!< Sqlite database */ cr_DatabaseType type; /*!< Type of Sqlite database. */ cr_Statements statements; /*!< Compiled SQL statements */ } cr_SqliteDb; /** Macro over cr_db_open function. Open (create new) primary sqlite sqlite db. * - creates db file * - creates primary tables * - creates info table * - tweak some db params * @param PATH Path to the db file. * @param ERR **GError * @return Opened db or NULL on error */ #define cr_db_open_primary(PATH, ERR) cr_db_open(PATH, CR_DB_PRIMARY, ERR) /** Macro over cr_db_open function. Open (create new) filelists sqlite sqlite db. * - creates db file * - creates filelists tables * - creates info table * - tweak some db params * @param PATH Path to the db file. * @param ERR **GError * @return Opened db or NULL on error */ #define cr_db_open_filelists(PATH, ERR) cr_db_open(PATH, CR_DB_FILELISTS, ERR) /** Macro over cr_db_open function. Open (create new) other sqlite sqlite db. * - creates db file * - opens transaction * - creates other tables * - creates info table * - tweak some db params * @param PATH Path to the db file. * @param ERR **GError * @return Opened db or NULL on error */ #define cr_db_open_other(PATH, ERR) cr_db_open(PATH, CR_DB_OTHER, ERR) /** Open (create new) other sqlite sqlite db. * - creates db file * - opens transaction * - creates other tables * - creates info table * - tweak some db params * @param path Path to the db file. * @param db_type Type of database (primary, filelists, other) * @param err **GError * @return Opened db or NULL on error */ cr_SqliteDb *cr_db_open(const char *path, cr_DatabaseType db_type, GError **err); /** Add package into the database. * @param sqlitedb open db connection * @param pkg package object * @param err **GError * @return cr_Error code */ int cr_db_add_pkg(cr_SqliteDb *sqlitedb, cr_Package *pkg, GError **err); /** Insert record into the updateinfo table * @param sqlitedb open db connection * @param checksum compressed xml file checksum * @param err **GError * @return cr_Error code */ int cr_db_dbinfo_update(cr_SqliteDb *sqlitedb, const char *checksum, GError **err); /** Close db. * - creates indexes on tables * - commits transaction * - closes db * @param sqlitedb open db connection * @param err **GError * @return cr_Error code */ int cr_db_close(cr_SqliteDb *sqlitedb, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_SQLITE_H__ */ createrepo_c-0.17.0/src/sqliterepo_c.c000066400000000000000000001033401400672373200176630ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include #include "error.h" #include "cleanup.h" #include "version.h" #include "compression_wrapper.h" #include "misc.h" #include "createrepo_shared.h" #include "locate_metadata.h" #include "load_metadata.h" #include "package.h" #include "repomd.h" #include "sqlite.h" #include "xml_file.h" #include "modifyrepo_shared.h" #include "threads.h" #include "xml_dump.h" #define DEFAULT_CHECKSUM CR_CHECKSUM_SHA256 /** * Command line options */ typedef struct { /* Items filled by cmd option parser */ gboolean version; /*!< print program version */ gboolean quiet; /*!< quiet mode */ gboolean verbose; /*!< verbose mode */ gboolean force; /*!< overwrite existing DBs */ gboolean keep_old; /*!< keep old DBs around */ gboolean xz_compression; /*!< use xz for DBs compression */ gchar *compress_type; /*!< which compression type to use */ gboolean local_sqlite; /*!< gen sqlite locally into a directory temporary files. (For situations when sqlite has a trouble to gen DBs on NFS mounts.)*/ gchar *chcksum_type; /*!< type of checksum in repomd.xml */ /* Items filled by check_sqliterepo_arguments() */ cr_CompressionType compression_type; /*!< compression type */ cr_ChecksumType checksum_type; /*!< checksum type */ } SqliterepoCmdOptions; static SqliterepoCmdOptions * sqliterepocmdoptions_new(void) { SqliterepoCmdOptions *options; options = g_new(SqliterepoCmdOptions, 1); options->version = FALSE; options->quiet = FALSE; options->verbose = FALSE; options->force = FALSE; options->keep_old = FALSE; options->xz_compression = FALSE; options->compress_type = NULL; options->chcksum_type = NULL; options->local_sqlite = FALSE; options->compression_type = CR_CW_BZ2_COMPRESSION; options->checksum_type = CR_CHECKSUM_UNKNOWN; return options; } static void sqliterepocmdoptions_free(SqliterepoCmdOptions *options) { g_free(options->compress_type); g_free(options); } CR_DEFINE_CLEANUP_FUNCTION0(SqliterepoCmdOptions*, cr_local_sqliterepocmdoptions_free, sqliterepocmdoptions_free) #define _cleanup_sqliterepocmdoptions_free_ __attribute__ ((cleanup(cr_local_sqliterepocmdoptions_free))) /** * Parse commandline arguments for sqliterepo utility */ static gboolean parse_sqliterepo_arguments(int *argc, char ***argv, SqliterepoCmdOptions *options, GError **err) { const GOptionEntry cmd_entries[] = { { "version", 'V', 0, G_OPTION_ARG_NONE, &(options->version), "Show program's version number and exit.", NULL}, { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(options->quiet), "Run quietly.", NULL }, { "verbose", 'v', 0, G_OPTION_ARG_NONE, &(options->verbose), "Run verbosely.", NULL }, { "force", 'f', 0, G_OPTION_ARG_NONE, &(options->force), "Overwrite existing DBs.", NULL }, { "keep-old", '\0', 0, G_OPTION_ARG_NONE, &(options->keep_old), "Do not remove old DBs. Use only with combination with --force.", NULL }, { "xz", '\0', 0, G_OPTION_ARG_NONE, &(options->xz_compression), "Use xz for repodata compression.", NULL }, { "compress-type", '\0', 0, G_OPTION_ARG_STRING, &(options->compress_type), "Which compression type to use.", "" }, { "checksum", '\0', 0, G_OPTION_ARG_STRING, &(options->chcksum_type), "Which checksum type to use in repomd.xml for sqlite DBs.", "" }, { "local-sqlite", '\0', 0, G_OPTION_ARG_NONE, &(options->local_sqlite), "Gen sqlite DBs locally (into a directory for temporary files). " "Sometimes, sqlite has a trouble to gen DBs on a NFS mount, " "use this option in such cases. " "This option could lead to a higher memory consumption " "if TMPDIR is set to /tmp or not set at all, because then the /tmp is " "used and /tmp dir is often a ramdisk.", NULL }, { NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL }, }; // Parse cmd arguments GOptionContext *context; context = g_option_context_new(""); g_option_context_set_summary(context, "Generate sqlite DBs from XML repodata."); g_option_context_add_main_entries(context, cmd_entries, NULL); gboolean ret = g_option_context_parse(context, argc, argv, err); g_option_context_free(context); return ret; } /** * Check parsed arguments and fill some other attributes * of option struct accordingly. */ static gboolean check_arguments(SqliterepoCmdOptions *options, GError **err) { // --compress-type if (options->compress_type) { options->compression_type = cr_compression_type(options->compress_type); if (options->compression_type == CR_CW_UNKNOWN_COMPRESSION) { g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR, "Unknown compression type \"%s\"", options->compress_type); return FALSE; } } // --checksum if (options->chcksum_type) { cr_ChecksumType type; type = cr_checksum_type(options->chcksum_type); if (type == CR_CHECKSUM_UNKNOWN) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "Unknown/Unsupported checksum type \"%s\"", options->chcksum_type); return FALSE; } options->checksum_type = type; } // --xz if (options->xz_compression) options->compression_type = CR_CW_XZ_COMPRESSION; return TRUE; } // Common static int warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type, char *msg, void *cbdata, G_GNUC_UNUSED GError **err) { g_warning("XML parser warning (%s): %s\n", (gchar *) cbdata, msg); return CR_CB_RET_OK; } static int pkgcb(cr_Package *pkg, void *cbdata, GError **err) { int rc; rc = cr_db_add_pkg((cr_SqliteDb *) cbdata, pkg, err); cr_package_free(pkg); if (rc != CRE_OK) return CR_CB_RET_ERR; return CR_CB_RET_OK; } // Primary sqlite db static gboolean primary_to_sqlite(const gchar *pri_xml_path, cr_SqliteDb *pri_db, GError **err) { int rc; rc = cr_xml_parse_primary(pri_xml_path, NULL, NULL, pkgcb, (void *) pri_db, warningcb, (void *) pri_xml_path, TRUE, err); if (rc != CRE_OK) return FALSE; return TRUE; } // Filelists static gboolean filelists_to_sqlite(const gchar *fil_xml_path, cr_SqliteDb *fil_db, GError **err) { int rc; rc = cr_xml_parse_filelists(fil_xml_path, NULL, NULL, pkgcb, (void *) fil_db, warningcb, (void *) fil_xml_path, err); if (rc != CRE_OK) return FALSE; return TRUE; } // Other static gboolean other_to_sqlite(const gchar *oth_xml_path, cr_SqliteDb *oth_db, GError **err) { int rc; rc = cr_xml_parse_other(oth_xml_path, NULL, NULL, pkgcb, (void *) oth_db, warningcb, (void *) oth_xml_path, err); if (rc != CRE_OK) return FALSE; return TRUE; } // Main static gboolean xml_to_sqlite(const gchar *pri_xml_path, const gchar *fil_xml_path, const gchar *oth_xml_path, cr_SqliteDb *pri_db, cr_SqliteDb *fil_db, cr_SqliteDb *oth_db, GError **err) { gboolean ret; if (pri_xml_path && pri_db) { ret = primary_to_sqlite(pri_xml_path, pri_db, err); if (!ret) return FALSE; g_debug("Primary sqlite done"); } if (fil_xml_path && fil_db) { ret = filelists_to_sqlite(fil_xml_path, fil_db, err); if (!ret) return FALSE; g_debug("Filelists sqlite done"); } if (oth_xml_path && oth_db) { ret = other_to_sqlite(oth_xml_path, oth_db, err); if (!ret) return FALSE; g_debug("Other sqlite done"); } return TRUE; } static gboolean sqlite_dbinfo_update(cr_Repomd *repomd, cr_SqliteDb *pri_db, cr_SqliteDb *fil_db, cr_SqliteDb *oth_db, GError **err) { cr_RepomdRecord *rec = NULL; // Parse repomd.xml // Get files checksums and insert them into sqlite dbs if (pri_db) { rec = cr_repomd_get_record(repomd, "primary"); if (rec && rec->checksum) if (cr_db_dbinfo_update(pri_db, rec->checksum, err) != CRE_OK) return FALSE; } if (fil_db) { rec = cr_repomd_get_record(repomd, "filelists"); if (rec && rec->checksum) if (cr_db_dbinfo_update(fil_db, rec->checksum, err) != CRE_OK) return FALSE; } if (oth_db) { rec = cr_repomd_get_record(repomd, "other"); if (rec && rec->checksum) if (cr_db_dbinfo_update(oth_db, rec->checksum, err) != CRE_OK) return FALSE; } return TRUE; } static gboolean compress_sqlite_dbs(const gchar *tmp_out_repo, const gchar *pri_db_filename, cr_RepomdRecord **in_pri_db_rec, const gchar *fil_db_filename, cr_RepomdRecord **in_fil_db_rec, const gchar *oth_db_filename, cr_RepomdRecord **in_oth_db_rec, cr_CompressionType compression_type, cr_ChecksumType checksum_type) { cr_CompressionTask *pri_db_task; cr_CompressionTask *fil_db_task; cr_CompressionTask *oth_db_task; const char *sqlite_compression_suffix; cr_RepomdRecord *pri_db_rec = NULL; cr_RepomdRecord *fil_db_rec = NULL; cr_RepomdRecord *oth_db_rec = NULL; // Prepare thread pool for compression tasks GThreadPool *compress_pool = g_thread_pool_new(cr_compressing_thread, NULL, 3, FALSE, NULL); // Prepare output filenames sqlite_compression_suffix = cr_compression_suffix(compression_type); gchar *pri_db_name = g_strconcat(tmp_out_repo, "/primary.sqlite", sqlite_compression_suffix, NULL); gchar *fil_db_name = g_strconcat(tmp_out_repo, "/filelists.sqlite", sqlite_compression_suffix, NULL); gchar *oth_db_name = g_strconcat(tmp_out_repo, "/other.sqlite", sqlite_compression_suffix, NULL); // Prepare compression tasks pri_db_task = cr_compressiontask_new(pri_db_filename, pri_db_name, compression_type, checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, pri_db_task, NULL); fil_db_task = cr_compressiontask_new(fil_db_filename, fil_db_name, compression_type, checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, fil_db_task, NULL); oth_db_task = cr_compressiontask_new(oth_db_filename, oth_db_name, compression_type, checksum_type, NULL, FALSE, 1, NULL); g_thread_pool_push(compress_pool, oth_db_task, NULL); // Wait till all tasks are complete and free the thread pool g_thread_pool_free(compress_pool, FALSE, TRUE); // Remove uncompressed DBs cr_rm(pri_db_filename, CR_RM_FORCE, NULL, NULL); cr_rm(fil_db_filename, CR_RM_FORCE, NULL, NULL); cr_rm(oth_db_filename, CR_RM_FORCE, NULL, NULL); // Prepare repomd records pri_db_rec = cr_repomd_record_new("primary_db", pri_db_name); fil_db_rec = cr_repomd_record_new("filelists_db", fil_db_name); oth_db_rec = cr_repomd_record_new("other_db", oth_db_name); *in_pri_db_rec = pri_db_rec; *in_fil_db_rec = fil_db_rec; *in_oth_db_rec = oth_db_rec; // Free paths to compressed files g_free(pri_db_name); g_free(fil_db_name); g_free(oth_db_name); // Fill repomd records from stats gathered during compression cr_repomd_record_load_contentstat(pri_db_rec, pri_db_task->stat); cr_repomd_record_load_contentstat(fil_db_rec, fil_db_task->stat); cr_repomd_record_load_contentstat(oth_db_rec, oth_db_task->stat); // Free the compression tasks cr_compressiontask_free(pri_db_task, NULL); cr_compressiontask_free(fil_db_task, NULL); cr_compressiontask_free(oth_db_task, NULL); // Prepare thread pool for repomd record filling tasks GThreadPool *fill_pool = g_thread_pool_new(cr_repomd_record_fill_thread, NULL, 3, FALSE, NULL); // Prepare the tasks themselves cr_RepomdRecordFillTask *pri_db_fill_task; cr_RepomdRecordFillTask *fil_db_fill_task; cr_RepomdRecordFillTask *oth_db_fill_task; pri_db_fill_task = cr_repomdrecordfilltask_new(pri_db_rec, checksum_type, NULL); g_thread_pool_push(fill_pool, pri_db_fill_task, NULL); fil_db_fill_task = cr_repomdrecordfilltask_new(fil_db_rec, checksum_type, NULL); g_thread_pool_push(fill_pool, fil_db_fill_task, NULL); oth_db_fill_task = cr_repomdrecordfilltask_new(oth_db_rec, checksum_type, NULL); g_thread_pool_push(fill_pool, oth_db_fill_task, NULL); // Wait till the all tasks are finished and free the pool g_thread_pool_free(fill_pool, FALSE, TRUE); // Clear the tasks cr_repomdrecordfilltask_free(pri_db_fill_task, NULL); cr_repomdrecordfilltask_free(fil_db_fill_task, NULL); cr_repomdrecordfilltask_free(oth_db_fill_task, NULL); return TRUE; } static gboolean uses_simple_md_filename(cr_Repomd *repomd, gboolean *simple_md, GError **err) { cr_RepomdRecord *rec = NULL; // Get primary record rec = cr_repomd_get_record(repomd, "primary"); if (!rec) { g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR, "Repomd doen't contain primary.xml"); return FALSE; } if (!rec->location_href) { g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR, "Primary repomd record doesn't contain location href"); return FALSE; } // Check if it's prefixed by checksum or not _cleanup_free_ gchar *basename = NULL; basename = g_path_get_basename(rec->location_href); if (g_str_has_prefix(basename, "primary")) *simple_md = TRUE; else *simple_md = FALSE; return TRUE; } /* Prepare new repomd.xml * Detect if unique or simple md filenames should be used. * Rename the files if necessary (add checksums into prefixes) * Add the records for databases * Write the updated repomd.xml into tmp_out_repo */ static gboolean gen_new_repomd(const gchar *tmp_out_repo, cr_Repomd *in_repomd, cr_RepomdRecord *in_pri_db_rec, cr_RepomdRecord *in_fil_db_rec, cr_RepomdRecord *in_oth_db_rec, GError **err) { cr_Repomd *repomd = NULL; cr_RepomdRecord *pri_db_rec = NULL; cr_RepomdRecord *fil_db_rec = NULL; cr_RepomdRecord *oth_db_rec = NULL; gboolean simple_md_filename = FALSE; // Create copy of repomd repomd = cr_repomd_copy(in_repomd); // Check if a unique md filename should be used or not if (!uses_simple_md_filename(in_repomd, &simple_md_filename, err)) return FALSE; // Prepend checksum if unique md filename should be used if (!simple_md_filename) { g_debug("Renaming generated DBs to unique filenames.."); cr_repomd_record_rename_file(in_pri_db_rec, NULL); cr_repomd_record_rename_file(in_fil_db_rec, NULL); cr_repomd_record_rename_file(in_oth_db_rec, NULL); } // Remove existing DBs cr_repomd_remove_record(repomd, "primary_db"); cr_repomd_remove_record(repomd, "filelists_db"); cr_repomd_remove_record(repomd, "other_db"); // Create copy of the records // // Note: We do this copy, because once we set a record into // a repomd, the repomd overtake the ownership of the record, // but we don't want to lose ownership in this case. // // Note: We do this copy intentionaly after the rename, // because we want to have the rename propagated into // original records (the ones referenced in caller function). pri_db_rec = cr_repomd_record_copy(in_pri_db_rec); fil_db_rec = cr_repomd_record_copy(in_fil_db_rec); oth_db_rec = cr_repomd_record_copy(in_oth_db_rec); // Add records to repomd.xml cr_repomd_set_record(repomd, pri_db_rec); cr_repomd_set_record(repomd, fil_db_rec); cr_repomd_set_record(repomd, oth_db_rec); // Sort the records cr_repomd_sort_records(repomd); // Dump the repomd.xml content _cleanup_free_ gchar *repomd_content = NULL; repomd_content = cr_xml_dump_repomd(repomd, err); if (!repomd_content) return FALSE; // Prepare output repomd.xml path _cleanup_free_ gchar *repomd_path = NULL; repomd_path = g_build_filename(tmp_out_repo, "repomd.xml", NULL); // Write the repomd.xml _cleanup_file_fclose_ FILE *f_repomd = NULL; if (!(f_repomd = fopen(repomd_path, "w"))) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot open %s: %s", repomd_path, g_strerror(errno)); return FALSE; } // Write the content fputs(repomd_content, f_repomd); // Cleanup cr_repomd_free(repomd); return TRUE; } /** Intelligently move content of tmp_out_repo to in_repo * (the repomd.xml is moved as a last file) */ static gboolean move_results(const gchar *tmp_out_repo, const gchar *in_repo, GError **err) { _cleanup_dir_close_ GDir *dirp = NULL; _cleanup_error_free_ GError *tmp_err = NULL; // Open the source directory dirp = g_dir_open(tmp_out_repo, 0, &tmp_err); if (!dirp) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot open dir %s: %s", tmp_out_repo, tmp_err->message); return FALSE; } // Iterate over its content const gchar *filename; while ((filename = g_dir_read_name(dirp))) { _cleanup_free_ gchar *src_path = NULL; _cleanup_free_ gchar *dst_path = NULL; // Skip repomd.xml if (!g_strcmp0(filename, "repomd.xml")) continue; // Get full src path src_path = g_build_filename(tmp_out_repo, filename, NULL); // Prepare full dst path dst_path = g_build_filename(in_repo, filename, NULL); // Move the file if (g_rename(src_path, dst_path) == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot move: %s to: %s: %s", src_path, dst_path, g_strerror(errno)); return FALSE; } } // The last step - move of the repomd.xml { _cleanup_free_ gchar *src_path = NULL; _cleanup_free_ gchar *dst_path = NULL; src_path = g_build_filename(tmp_out_repo, "repomd.xml", NULL); dst_path = g_build_filename(in_repo, "repomd.xml", NULL); if (g_rename(src_path, dst_path) == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot move: %s to: %s: %s", src_path, dst_path, g_strerror(errno)); return FALSE; } } return TRUE; } static gboolean remove_old_if_different(const gchar *repo_path, cr_RepomdRecord *old_rec, cr_RepomdRecord *new_rec, GError **err) { int rc; _cleanup_free_ gchar *old_fn = NULL; _cleanup_free_ gchar *new_fn = NULL; // Input check if (!old_rec) return TRUE; // Build filenames old_fn = g_build_filename(repo_path, old_rec->location_href, NULL); new_fn = g_build_filename(repo_path, new_rec->location_href, NULL); // Check if the files are the same gboolean identical = FALSE; if (!cr_identical_files(old_fn, new_fn, &identical, err)) return FALSE; if (identical) { g_debug("Old DB file %s has been overwritten by the new one.", new_fn); return TRUE; } // Remove file referenced by the old record g_debug("Removing old DB file %s", old_fn); rc = g_remove(old_fn); if (rc == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot remove %s: %s", old_fn, g_strerror(errno)); return FALSE; } return TRUE; } static gboolean generate_sqlite_from_xml(const gchar *path, cr_CompressionType compression_type, cr_ChecksumType checksum_type, gboolean local_sqlite, gboolean force, gboolean keep_old, GError **err) { _cleanup_free_ gchar *in_dir = NULL; // path/to/repo/ _cleanup_free_ gchar *in_repo = NULL; // path/to/repo/repodata/ _cleanup_free_ gchar *out_dir = NULL; // path/to/out_repo/ _cleanup_free_ gchar *out_repo = NULL; // path/to/out_repo/repodata/ _cleanup_free_ gchar *tmp_out_repo = NULL; // usually path/to/out_repo/.repodata/ _cleanup_free_ gchar *lock_dir = NULL; // path/to/out_repo/.repodata/ gboolean ret; GError *tmp_err = NULL; // Check if input dir exists in_dir = cr_normalize_dir_path(path); if (!g_file_test(in_dir, G_FILE_TEST_IS_DIR)) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Directory %s must exist\n", in_dir); return FALSE; } // Set other paths in_repo = g_build_filename(in_dir, "repodata/", NULL); out_dir = g_strdup(in_dir); out_repo = g_strdup(in_repo); lock_dir = g_build_filename(out_dir, ".repodata/", NULL); tmp_out_repo = g_build_filename(out_dir, ".repodata/", NULL); // Block signals that terminates the process if (!cr_block_terminating_signals(err)) return FALSE; // Check if lock exists & Create lock dir if (!cr_lock_repo(out_dir, FALSE, &lock_dir, &tmp_out_repo, err)) return FALSE; // Setup cleanup handlers if (!cr_set_cleanup_handler(lock_dir, tmp_out_repo, err)) return FALSE; // Unblock the blocked signals if (!cr_unblock_terminating_signals(err)) return FALSE; // Locate repodata struct cr_MetadataLocation *md_loc = NULL; _cleanup_free_ gchar *pri_xml_path = NULL; _cleanup_free_ gchar *fil_xml_path = NULL; _cleanup_free_ gchar *oth_xml_path = NULL; _cleanup_free_ gchar *repomd_path = NULL; md_loc = cr_locate_metadata(in_dir, TRUE, NULL); if (!md_loc || !md_loc->repomd) { g_set_error(err, CREATEREPO_C_ERROR, CRE_NOFILE, "repomd.xml doesn't exist"); return FALSE; } repomd_path = g_build_filename(md_loc->repomd, NULL); if (md_loc->pri_xml_href) pri_xml_path = g_build_filename(md_loc->pri_xml_href, NULL); if (md_loc->fil_xml_href) fil_xml_path = g_build_filename(md_loc->fil_xml_href, NULL); if (md_loc->oth_xml_href) oth_xml_path = g_build_filename(md_loc->oth_xml_href, NULL); cr_metadatalocation_free(md_loc); // Parse repomd.xml int rc; cr_Repomd *repomd = cr_repomd_new(); rc = cr_xml_parse_repomd(repomd_path, repomd, warningcb, (void *) repomd_path, err); if (rc != CRE_OK) return FALSE; // Check if DBs already exist or not gboolean dbs_already_exist = FALSE; if (cr_repomd_get_record(repomd, "primary_db") || cr_repomd_get_record(repomd, "filename_db") || cr_repomd_get_record(repomd, "other_db")) { dbs_already_exist = TRUE; } if (dbs_already_exist && !force) { g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR, "Repository already has sqlitedb present " "in repomd.xml (You may use --force)"); return FALSE; } // Auto-detect used checksum algorithm if not specified explicitly if (checksum_type == CR_CHECKSUM_UNKNOWN) { cr_RepomdRecord *rec = cr_repomd_get_record(repomd, "primary"); if (!rec) { g_set_error(err, CREATEREPO_C_ERROR, CRE_ERROR, "repomd.xml is missing primary metadata"); return FALSE; } if (rec->checksum_type) checksum_type = cr_checksum_type(rec->checksum_type); else if (rec->checksum_open_type) checksum_type = cr_checksum_type(rec->checksum_open_type); if (checksum_type == CR_CHECKSUM_UNKNOWN) { g_debug("Cannot auto-detect checksum type, using default %s", cr_checksum_name_str(DEFAULT_CHECKSUM)); checksum_type = DEFAULT_CHECKSUM; } } // Open sqlite databases _cleanup_free_ gchar *pri_db_filename = NULL; _cleanup_free_ gchar *fil_db_filename = NULL; _cleanup_free_ gchar *oth_db_filename = NULL; cr_SqliteDb *pri_db = NULL; cr_SqliteDb *fil_db = NULL; cr_SqliteDb *oth_db = NULL; _cleanup_file_close_ int pri_db_fd = -1; _cleanup_file_close_ int fil_db_fd = -1; _cleanup_file_close_ int oth_db_fd = -1; g_message("Preparing sqlite DBs"); if (!local_sqlite) { g_debug("Creating databases"); pri_db_filename = g_strconcat(tmp_out_repo, "/primary.sqlite", NULL); fil_db_filename = g_strconcat(tmp_out_repo, "/filelists.sqlite", NULL); oth_db_filename = g_strconcat(tmp_out_repo, "/other.sqlite", NULL); } else { g_debug("Creating databases localy"); const gchar *tmpdir = g_get_tmp_dir(); pri_db_filename = g_build_filename(tmpdir, "primary.XXXXXX.sqlite", NULL); fil_db_filename = g_build_filename(tmpdir, "filelists.XXXXXX.sqlite", NULL); oth_db_filename = g_build_filename(tmpdir, "other.XXXXXXX.sqlite", NULL); pri_db_fd = g_mkstemp(pri_db_filename); g_debug("%s", pri_db_filename); if (pri_db_fd == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot open %s: %s", pri_db_filename, g_strerror(errno)); return FALSE; } fil_db_fd = g_mkstemp(fil_db_filename); g_debug("%s", fil_db_filename); if (fil_db_fd == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot open %s: %s", fil_db_filename, g_strerror(errno)); return FALSE; } oth_db_fd = g_mkstemp(oth_db_filename); g_debug("%s", oth_db_filename); if (oth_db_fd == -1) { g_set_error(err, CREATEREPO_C_ERROR, CRE_IO, "Cannot open %s: %s", oth_db_filename, g_strerror(errno)); return FALSE; } } pri_db = cr_db_open_primary(pri_db_filename, err); if (!pri_db) return FALSE; fil_db = cr_db_open_filelists(fil_db_filename, err); assert(fil_db || tmp_err); if (!fil_db) return FALSE; oth_db = cr_db_open_other(oth_db_filename, err); assert(oth_db || tmp_err); if (!oth_db) return FALSE; // XML to Sqlite ret = xml_to_sqlite(pri_xml_path, fil_xml_path, oth_xml_path, pri_db, fil_db, oth_db, err); if (!ret) return FALSE; // Put checksums of XML files into Sqlite ret = sqlite_dbinfo_update(repomd, pri_db, fil_db, oth_db, err); if (!ret) return FALSE; // Close dbs cr_db_close(pri_db, NULL); cr_db_close(fil_db, NULL); cr_db_close(oth_db, NULL); // Repomd records cr_RepomdRecord *pri_db_rec = NULL; cr_RepomdRecord *fil_db_rec = NULL; cr_RepomdRecord *oth_db_rec = NULL; // Compress DB files and fill records ret = compress_sqlite_dbs(tmp_out_repo, pri_db_filename, &pri_db_rec, fil_db_filename, &fil_db_rec, oth_db_filename, &oth_db_rec, compression_type, checksum_type); if (!ret) return FALSE; // Prepare new repomd.xml ret = gen_new_repomd(tmp_out_repo, repomd, pri_db_rec, fil_db_rec, oth_db_rec, err); if (!ret) return FALSE; // Move the results (compressed DBs and repomd.xml) into in_repo ret = move_results(tmp_out_repo, in_repo, err); if (!ret) return FALSE; // Remove old DBs if (dbs_already_exist && force && !keep_old) { ret = remove_old_if_different(in_dir, cr_repomd_get_record(repomd, "primary_db"), pri_db_rec, err); if (!ret) return FALSE; ret = remove_old_if_different(in_dir, cr_repomd_get_record(repomd, "filelists_db"), fil_db_rec, err); if (!ret) return FALSE; ret = remove_old_if_different(in_dir, cr_repomd_get_record(repomd, "other_db"), oth_db_rec, err); if (!ret) return FALSE; } // Remove tmp_out_repo g_rmdir(tmp_out_repo); // Clean up cr_repomd_free(repomd); cr_repomd_record_free(pri_db_rec); cr_repomd_record_free(fil_db_rec); cr_repomd_record_free(oth_db_rec); return TRUE; } /** * Main */ int main(int argc, char **argv) { gboolean ret = TRUE; _cleanup_sqliterepocmdoptions_free_ SqliterepoCmdOptions *options = NULL; _cleanup_error_free_ GError *tmp_err = NULL; // Parse arguments options = sqliterepocmdoptions_new(); if (!parse_sqliterepo_arguments(&argc, &argv, options, &tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } // Set logging cr_setup_logging(FALSE, options->verbose); // Print version if required if (options->version) { printf("Version: %s\n", cr_version_string_with_features()); exit(EXIT_SUCCESS); } // Check arguments if (!check_arguments(options, &tmp_err)) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } if (argc != 2) { g_printerr("Must specify exactly one repo directory to work on\n"); exit(EXIT_FAILURE); } // Emit debug message with version g_debug("Version: %s", cr_version_string_with_features()); // Gen the databases ret = generate_sqlite_from_xml(argv[1], options->compression_type, options->checksum_type, options->local_sqlite, options->force, options->keep_old, &tmp_err); if (!ret) { g_printerr("%s\n", tmp_err->message); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } createrepo_c-0.17.0/src/threads.c000066400000000000000000000117511400672373200166300ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include "threads.h" #include "error.h" #include "misc.h" #include "dumper_thread.h" #define ERR_DOMAIN CREATEREPO_C_ERROR /** Parallel Compression */ cr_CompressionTask * cr_compressiontask_new(const char *src, const char *dst, cr_CompressionType compression_type, cr_ChecksumType checksum_type, const char *zck_dict_dir, gboolean zck_auto_chunk, int delsrc, GError **err) { cr_ContentStat *stat; cr_CompressionTask *task; assert(src); assert(compression_type < CR_CW_COMPRESSION_SENTINEL); assert(checksum_type < CR_CHECKSUM_SENTINEL); assert(!err || *err == NULL); stat = cr_contentstat_new(checksum_type, err); if (!stat) return NULL; task = g_malloc0(sizeof(cr_CompressionTask)); if (!task) { g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot allocate memory"); return NULL; } task->src = g_strdup(src); task->dst = g_strdup(dst); task->type = compression_type; task->stat = stat; if (zck_dict_dir != NULL) task->zck_dict_dir = g_strdup(zck_dict_dir); task->zck_auto_chunk = zck_auto_chunk; task->delsrc = delsrc; return task; } void cr_compressiontask_free(cr_CompressionTask *task, GError **err) { assert(!err || *err == NULL); if (!task) return; g_free(task->src); g_free(task->dst); cr_contentstat_free(task->stat, err); if (task->err) g_error_free(task->err); if (task->zck_dict_dir) g_free(task->zck_dict_dir); g_free(task); } void cr_compressing_thread(gpointer data, G_GNUC_UNUSED gpointer user_data) { cr_CompressionTask *task = data; GError *tmp_err = NULL; assert(task); if (!task->dst) task->dst = g_strconcat(task->src, cr_compression_suffix(task->type), NULL); cr_compress_file_with_stat(task->src, task->dst, task->type, task->stat, task->zck_dict_dir, task->zck_auto_chunk, &tmp_err); if (tmp_err) { // Error encountered g_propagate_error(&task->err, tmp_err); } else { // Compression was successful if (task->delsrc) remove(task->src); } } void cr_rewrite_pkg_count_thread(gpointer data, gpointer user_data) { cr_CompressionTask *task = data; struct UserData *ud = user_data; GError *tmp_err = NULL; assert(task); cr_rewrite_header_package_count(task->src, task->type, ud->package_count, ud->task_count, task->stat, task->zck_dict_dir, &tmp_err); if (tmp_err) { // Error encountered g_propagate_error(&task->err, tmp_err); } } /** Parallel Repomd Record Fill */ cr_RepomdRecordFillTask * cr_repomdrecordfilltask_new(cr_RepomdRecord *record, cr_ChecksumType checksum_type, GError **err) { cr_RepomdRecordFillTask *task; assert(record); assert(!err || *err == NULL); task = g_malloc0(sizeof(cr_RepomdRecord)); task->record = record; task->checksum_type = checksum_type; return task; } void cr_repomdrecordfilltask_free(cr_RepomdRecordFillTask *task, GError **err) { assert(!err || *err == NULL); if (task->err) g_error_free(task->err); g_free(task); } void cr_repomd_record_fill_thread(gpointer data, G_GNUC_UNUSED gpointer user_data) { cr_RepomdRecordFillTask *task = data; GError *tmp_err = NULL; assert(task); cr_repomd_record_fill(task->record, task->checksum_type, &tmp_err); if (tmp_err) { // Error encountered g_propagate_error(&task->err, tmp_err); } } createrepo_c-0.17.0/src/threads.h000066400000000000000000000133361400672373200166360ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_THREADS_H__ #define __C_CREATEREPOLIB_THREADS_H__ #include #include "compression_wrapper.h" #include "checksum.h" #include "repomd.h" #ifdef __cplusplus extern "C" { #endif /** \defgroup threads Useful thread function to use in GThreadPool. * * Paralelized compression example: * \code * cr_CompressionTask *task_1, *task_2; * GThreadPool *pool; * * // Prepare tasks * task_1 = cr_compressiontask_new("foo", "foo.gz", CR_CW_GZ_COMPRESSION, 1, * CR_CHECKSUM_SHA256, NULL); * task_2 = cr_compressiontask_new("bar", "bar.gz", CR_CW_GZ_COMPRESSION, 1, * CR_CHECKSUM_SHA512, NULL); * * // Create pool for tasks * pool = g_thread_pool_new(cr_compressing_thread, NULL, 2, FALSE, NULL); * * // Push tasks to the pool * g_thread_pool_push(pool, task_1, NULL); * g_thread_pool_push(pool, task_2, NULL); * * // Wait until both treats finish and free the pool. * g_thread_pool_free(pool, FALSE, TRUE); * * // Use results * // Do whatever you want or need to do * * // Clean up * cr_compressiontask_free(task_1, NULL); * cr_compressiontask_free(task_2, NULL); * \endcode * * \addtogroup threads * @{ */ /** Object representing a single compression task */ typedef struct { char *src; /*!< Path to the original file. Must be specified by user. */ char *dst; /*!< Path to the destination file. If NULL, src+compression suffix will be used and this will be filled.*/ cr_CompressionType type; /*!< Type of compression to use */ cr_ContentStat *stat; /*!< Stats of compressed file or NULL */ char *zck_dict_dir; /*!< Location of zchunk dictionaries */ gboolean zck_auto_chunk; /*!< Whether zchunk file should be auto-chunked */ int delsrc; /*!< Indicate if delete source file after successful compression. */ GError *err; /*!< If error was encountered, it will be stored here, if no, then NULL*/ } cr_CompressionTask; /** Function to prepare a new cr_CompressionTask. * @param src Source filename. * @param dst Destination filename or NULL (then src+compression * suffix will be used). * @param compression_type Type of compression to use. * @param checksum_type Checksum type for stat calculation. Note: Stat * is always use. If you don't need a stats use * CR_CHECKSUM_UNKNOWN, then no checksum calculation * will be performed, only size would be calculated. * Don't be afraid, size calculation has almost * no overhead. * @param delsrc Delete src after successuful compression. * 0 = Do not delete, delete otherwise * @param err GError **. Note: This is a GError for the * cr_compresiontask_new function. The GError * that will be at created cr_CompressionTask is * different. * @return New cr_CompressionTask. */ cr_CompressionTask * cr_compressiontask_new(const char *src, const char *dst, cr_CompressionType compression_type, cr_ChecksumType checksum_type, const char *zck_dict_dir, gboolean zck_auto_chunk, int delsrc, GError **err); /** Frees cr_CompressionTask and all its components. * @param task cr_CompressionTask task * @param err GError ** */ void cr_compressiontask_free(cr_CompressionTask *task, GError **err); /** Function for GThreadPool. */ void cr_compressing_thread(gpointer data, gpointer user_data); /** Object representing a single repomd record fill task */ typedef struct { cr_RepomdRecord *record; /*!< Repomd record to be filled */ cr_ChecksumType checksum_type; /*!< Type of checksum to be used */ GError *err; /*!< GError ** */ } cr_RepomdRecordFillTask; /** Function to prepare a new cr_RepomdRecordFillTask. * @param record cr_RepomdRecord. * @param checksum_type Type of checksum. * @param err GError ** * @return New cr_RepomdRecordFillTask. */ cr_RepomdRecordFillTask * cr_repomdrecordfilltask_new(cr_RepomdRecord *record, cr_ChecksumType checksum_type, GError **err); /** Frees cr_RepomdRecordFillTask */ void cr_repomdrecordfilltask_free(cr_RepomdRecordFillTask *task, GError **err); /** Function for GThread Pool. */ void cr_repomd_record_fill_thread(gpointer data, gpointer user_data); /** Function for GThread Pool. */ void cr_rewrite_pkg_count_thread(gpointer data, gpointer user_data); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_THREADS_H__ */ createrepo_c-0.17.0/src/updateinfo.c000066400000000000000000000224041400672373200173310ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include "updateinfo.h" #include "error.h" #include "misc.h" #include "checksum.h" /* * cr_UpdateCollectionPackage */ cr_UpdateCollectionPackage * cr_updatecollectionpackage_new(void) { cr_UpdateCollectionPackage *pkg = g_malloc0(sizeof(*pkg)); pkg->chunk = g_string_chunk_new(0); return pkg; } cr_UpdateCollectionPackage * cr_updatecollectionpackage_copy(const cr_UpdateCollectionPackage *orig) { cr_UpdateCollectionPackage *pkg; if (!orig) return NULL; pkg = cr_updatecollectionpackage_new(); pkg->name = cr_safe_string_chunk_insert(pkg->chunk, orig->name); pkg->version = cr_safe_string_chunk_insert(pkg->chunk, orig->version); pkg->release = cr_safe_string_chunk_insert(pkg->chunk, orig->release); pkg->epoch = cr_safe_string_chunk_insert(pkg->chunk, orig->epoch); pkg->arch = cr_safe_string_chunk_insert(pkg->chunk, orig->arch); pkg->src = cr_safe_string_chunk_insert(pkg->chunk, orig->src); pkg->filename = cr_safe_string_chunk_insert(pkg->chunk, orig->filename); pkg->sum = cr_safe_string_chunk_insert(pkg->chunk, orig->sum); pkg->sum_type = orig->sum_type; pkg->reboot_suggested = orig->reboot_suggested; pkg->restart_suggested = orig->restart_suggested; pkg->relogin_suggested = orig->relogin_suggested; return pkg; } void cr_updatecollectionpackage_free(cr_UpdateCollectionPackage *pkg) { if (!pkg) return; g_string_chunk_free(pkg->chunk); g_free(pkg); } /* * cr_UpdateCollectionModule */ cr_UpdateCollectionModule * cr_updatecollectionmodule_new(void) { cr_UpdateCollectionModule *module = g_malloc0(sizeof(*module)); module->chunk = g_string_chunk_new(0); return module; } cr_UpdateCollectionModule * cr_updatecollectionmodule_copy(const cr_UpdateCollectionModule *orig) { cr_UpdateCollectionModule *module; if (!orig) return NULL; module = cr_updatecollectionmodule_new(); module->name = cr_safe_string_chunk_insert(module->chunk, orig->name); module->stream = cr_safe_string_chunk_insert(module->chunk, orig->stream); module->version = orig->version; module->context = cr_safe_string_chunk_insert(module->chunk, orig->context); module->arch = cr_safe_string_chunk_insert(module->chunk, orig->arch); return module; } void cr_updatecollectionmodule_free(cr_UpdateCollectionModule *module) { if (!module) return; g_string_chunk_free(module->chunk); g_free(module); } /* * cr_UpdateCollection */ cr_UpdateCollection * cr_updatecollection_new(void) { cr_UpdateCollection *collection = g_malloc0(sizeof(*collection)); collection->chunk = g_string_chunk_new(0); return collection; } cr_UpdateCollection * cr_updatecollection_copy(const cr_UpdateCollection *orig) { cr_UpdateCollection *col; if (!orig) return NULL; col = cr_updatecollection_new(); col->shortname = cr_safe_string_chunk_insert(col->chunk, orig->shortname); col->name = cr_safe_string_chunk_insert(col->chunk, orig->name); if (orig->module) { col->module = cr_updatecollectionmodule_copy(orig->module); } if (orig->packages) { GSList *newlist = NULL; for (GSList *elem = orig->packages; elem; elem = g_slist_next(elem)) { cr_UpdateCollectionPackage *pkg = elem->data; newlist = g_slist_prepend(newlist, cr_updatecollectionpackage_copy(pkg)); } col->packages = g_slist_reverse(newlist); } return col; } void cr_updatecollection_free(cr_UpdateCollection *collection) { if (!collection) return; cr_updatecollectionmodule_free(collection->module); cr_slist_free_full(collection->packages, (GDestroyNotify) cr_updatecollectionpackage_free); g_string_chunk_free(collection->chunk); g_free(collection); } void cr_updatecollection_append_package(cr_UpdateCollection *collection, cr_UpdateCollectionPackage *pkg) { if (!collection || !pkg) return; collection->packages = g_slist_append(collection->packages, pkg); } /* * cr_UpdateReference */ cr_UpdateReference * cr_updatereference_new(void) { cr_UpdateReference *ref = g_malloc0(sizeof(*ref)); ref->chunk = g_string_chunk_new(0); return ref; } cr_UpdateReference * cr_updatereference_copy(const cr_UpdateReference *orig) { cr_UpdateReference *ref; if (!orig) return NULL; ref = cr_updatereference_new(); ref->href = cr_safe_string_chunk_insert(ref->chunk, orig->href); ref->id = cr_safe_string_chunk_insert(ref->chunk, orig->id); ref->type = cr_safe_string_chunk_insert(ref->chunk, orig->type); ref->title = cr_safe_string_chunk_insert(ref->chunk, orig->title); return ref; } void cr_updatereference_free(cr_UpdateReference *ref) { if (!ref) return; g_string_chunk_free(ref->chunk); g_free(ref); } /* * cr_UpdateRecord */ cr_UpdateRecord * cr_updaterecord_new(void) { cr_UpdateRecord *rec = g_malloc0(sizeof(*rec)); rec->chunk = g_string_chunk_new(0); return rec; } cr_UpdateRecord * cr_updaterecord_copy(const cr_UpdateRecord *orig) { cr_UpdateRecord *rec; if (!orig) return NULL; rec = cr_updaterecord_new(); rec->from = cr_safe_string_chunk_insert(rec->chunk, orig->from); rec->status = cr_safe_string_chunk_insert(rec->chunk, orig->status); rec->type = cr_safe_string_chunk_insert(rec->chunk, orig->type); rec->version = cr_safe_string_chunk_insert(rec->chunk, orig->version); rec->id = cr_safe_string_chunk_insert(rec->chunk, orig->id); rec->title = cr_safe_string_chunk_insert(rec->chunk, orig->title); rec->issued_date = cr_safe_string_chunk_insert(rec->chunk, orig->issued_date); rec->updated_date = cr_safe_string_chunk_insert(rec->chunk, orig->updated_date); rec->rights = cr_safe_string_chunk_insert(rec->chunk, orig->rights); rec->release = cr_safe_string_chunk_insert(rec->chunk, orig->release); rec->pushcount = cr_safe_string_chunk_insert(rec->chunk, orig->pushcount); rec->severity = cr_safe_string_chunk_insert(rec->chunk, orig->severity); rec->summary = cr_safe_string_chunk_insert(rec->chunk, orig->summary); rec->description = cr_safe_string_chunk_insert(rec->chunk, orig->description); rec->solution = cr_safe_string_chunk_insert(rec->chunk, orig->solution); rec->reboot_suggested = orig->reboot_suggested; if (orig->references) { GSList *newlist = NULL; for (GSList *elem = orig->references; elem; elem = g_slist_next(elem)) { cr_UpdateReference *ref = elem->data; newlist = g_slist_prepend(newlist, cr_updatereference_copy(ref)); } rec->references = g_slist_reverse(newlist); } if (orig->collections) { GSList *newlist = NULL; for (GSList *elem = orig->collections; elem; elem = g_slist_next(elem)) { cr_UpdateCollection *col = elem->data; newlist = g_slist_prepend(newlist, cr_updatecollection_copy(col)); } rec->collections = g_slist_reverse(newlist); } return rec; } void cr_updaterecord_free(cr_UpdateRecord *rec) { if (!rec) return; cr_slist_free_full(rec->references, (GDestroyNotify) cr_updatereference_free); cr_slist_free_full(rec->collections, (GDestroyNotify) cr_updatecollection_free); g_string_chunk_free(rec->chunk); g_free(rec); } void cr_updaterecord_append_reference(cr_UpdateRecord *record, cr_UpdateReference *ref) { if (!record || !ref) return; record->references = g_slist_append(record->references, ref); } void cr_updaterecord_append_collection(cr_UpdateRecord *record, cr_UpdateCollection *collection) { if (!record || !collection) return; record->collections = g_slist_append(record->collections, collection); } /* * cr_Updateinfo */ cr_UpdateInfo * cr_updateinfo_new(void) { cr_UpdateInfo *uinfo = g_malloc0(sizeof(*uinfo)); return uinfo; } void cr_updateinfo_free(cr_UpdateInfo *uinfo) { if (!uinfo) return; cr_slist_free_full(uinfo->updates, (GDestroyNotify) cr_updaterecord_free); g_free(uinfo); } void cr_updateinfo_apped_record(cr_UpdateInfo *uinfo, cr_UpdateRecord *record) { if (!uinfo || !record) return; uinfo->updates = g_slist_append(uinfo->updates, record); } createrepo_c-0.17.0/src/updateinfo.h000066400000000000000000000124371400672373200173430ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_UPDATEINFO_H__ #define __C_CREATEREPOLIB_UPDATEINFO_H__ #ifdef __cplusplus extern "C" { #endif #include #include "checksum.h" /** \defgroup updateinfo Updateinfo API. * * Module for generating updateinfo.xml. * * \addtogroup updateinfo * @{ */ typedef struct { gchar *name; gchar *version; gchar *release; gchar *epoch; gchar *arch; gchar *src; gchar *filename; gchar *sum; cr_ChecksumType sum_type; gboolean reboot_suggested; gboolean restart_suggested; gboolean relogin_suggested; GStringChunk *chunk; } cr_UpdateCollectionPackage; typedef struct { gchar *name; gchar *stream; guint64 version; gchar *context; gchar *arch; GStringChunk *chunk; } cr_UpdateCollectionModule; typedef struct { gchar *shortname; /*!< e.g. rhn-tools-rhel-x86_64-server-6.5.aus */ gchar *name; /*!< e.g. RHN Tools for RHEL AUS (v. 6.5 for 64-bit x86_64) */ cr_UpdateCollectionModule *module; GSList *packages; /*!< List of cr_UpdateCollectionPackage */ GStringChunk *chunk; } cr_UpdateCollection; typedef struct { gchar *href; /*!< URL (e.g. to related bugzilla, errata, ...) */ gchar *id; /*!< id (e.g. 1035288, NULL for errata, ...) */ gchar *type; /*!< reference type ("self" for errata, "bugzilla", ...) */ gchar *title; /*!< Name of errata, name of bug, etc. */ GStringChunk *chunk; } cr_UpdateReference; typedef struct { gchar *from; /*!< Source of the update (e.g. security@redhat.com) */ gchar *status; /*!< Update status ("final", ...) */ gchar *type; /*!< Update type ("enhancement", "bugfix", ...) */ gchar *version; /*!< Update version (probably always an integer number) */ gchar *id; /*!< Update id (short update name, e.g. RHEA-2013:1777) */ gchar *title; /*!< Update name */ gchar *issued_date; /*!< Date string (e.g. "2013-12-02 00:00:00") */ gchar *updated_date;/*!< Date string */ gchar *rights; /*!< Copyright */ gchar *release; /*!< Release */ gchar *pushcount; /*!< Push count */ gchar *severity; /*!< Severity */ gchar *summary; /*!< Short summary */ gchar *description; /*!< Update description */ gchar *solution; /*!< Solution */ gboolean reboot_suggested; /*!< Reboot suggested */ GSList *references; /*!< List of cr_UpdateReference */ GSList *collections;/*!< List of cr_UpdateCollection */ GStringChunk *chunk;/*!< String chunk */ } cr_UpdateRecord; typedef struct { GSList *updates; /*!< List of cr_UpdateRecord */ } cr_UpdateInfo; /* * cr_UpdateCollectionPackage */ cr_UpdateCollectionPackage * cr_updatecollectionpackage_new(void); cr_UpdateCollectionPackage * cr_updatecollectionpackage_copy(const cr_UpdateCollectionPackage *orig); void cr_updatecollectionpackage_free(cr_UpdateCollectionPackage *pkg); /* * cr_UpdateCollectionModule */ cr_UpdateCollectionModule * cr_updatecollectionmodule_new(void); cr_UpdateCollectionModule * cr_updatecollectionmodule_copy(const cr_UpdateCollectionModule *orig); void cr_updatecollectionmodule_free(cr_UpdateCollectionModule *pkg); /* * cr_UpdateCollection */ cr_UpdateCollection * cr_updatecollection_new(void); cr_UpdateCollection * cr_updatecollection_copy(const cr_UpdateCollection *orig); void cr_updatecollection_free(cr_UpdateCollection *collection); void cr_updatecollection_append_package(cr_UpdateCollection *collection, cr_UpdateCollectionPackage *pkg); /* * cr_UpdateReference */ cr_UpdateReference * cr_updatereference_new(void); cr_UpdateReference * cr_updatereference_copy(const cr_UpdateReference *orig); void cr_updatereference_free(cr_UpdateReference *ref); /* * cr_UpdateRecord */ cr_UpdateRecord * cr_updaterecord_new(void); cr_UpdateRecord * cr_updaterecord_copy(const cr_UpdateRecord *orig); void cr_updaterecord_free(cr_UpdateRecord *record); void cr_updaterecord_append_reference(cr_UpdateRecord *record, cr_UpdateReference *ref); void cr_updaterecord_append_collection(cr_UpdateRecord *record, cr_UpdateCollection *collection); /* * cr_Updateinfo */ cr_UpdateInfo * cr_updateinfo_new(void); void cr_updateinfo_free(cr_UpdateInfo *uinfo); void cr_updateinfo_apped_record(cr_UpdateInfo *uinfo, cr_UpdateRecord *record); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_UPDATEINFO_H__ */ createrepo_c-0.17.0/src/version.h.in000066400000000000000000000024401400672373200172700ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_VERSION_H__ #define __C_CREATEREPOLIB_VERSION_H__ /** \defgroup version Library version information. */ #ifdef __cplusplus extern "C" { #endif /**@{*/ #define CR_VERSION_MAJOR @CR_MAJOR@ /*!< major library version */ #define CR_VERSION_MINOR @CR_MINOR@ /*!< minor library version */ #define CR_VERSION_PATCH @CR_PATCH@ /*!< patch library version */ /**@}*/ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_VERSION_H__ */ createrepo_c-0.17.0/src/xml_dump.c000066400000000000000000000234031400672373200170200ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "error.h" #include "misc.h" #include "xml_dump.h" #include "xml_dump_internal.h" void cr_xml_dump_init() { xmlInitParser(); } void cr_xml_dump_cleanup() { xmlCleanupParser(); } gboolean cr_hascontrollchars(const unsigned char *str) { while (*str) { if (*str < 32 && (*str != 9 && *str != 10 && *str != 13)) return TRUE; ++str; } return FALSE; } gchar * cr_prepend_protocol(const gchar *url) { if (url && *url == '/') return g_strconcat("file://", url, NULL); return g_strdup(url); } void cr_latin1_to_utf8(const unsigned char *in, unsigned char *out) { // http://stackoverflow.com/questions/4059775/convert-iso-8859-1-strings-to-utf-8-in-c-c/4059934#4059934 // This function converts latin1 to utf8 in effective and thread-safe way. while (*in) { if (*in<128) { if (*in < 32 && (*in != 9 && *in != 10 && *in != 13)) { ++in; continue; } *out++=*in++; } else if (*in<192) { // Found latin1 (iso-8859-1) control code. // The string is probably misencoded cp-1252 and not a real latin1. // Just skip this character. in++; continue; } else { *out++=0xc2+(*in>0xbf); *out++=(*in++&0x3f)+0x80; } } *out = '\0'; } xmlNodePtr cr_xmlNewTextChild(xmlNodePtr parent, xmlNsPtr ns, const xmlChar *name, const xmlChar *orig_content) { int free_content = 0; xmlChar *content; xmlNodePtr child; if (!orig_content) { content = BAD_CAST ""; } else if (xmlCheckUTF8(orig_content)) { content = (xmlChar *) orig_content; } else { size_t len = strlen((const char *) orig_content); content = malloc(sizeof(xmlChar)*len*2 + 1); cr_latin1_to_utf8(orig_content, content); free_content = 1; } child = xmlNewTextChild(parent, ns, name, content); if (free_content) free(content); return child; } xmlAttrPtr cr_xmlNewProp(xmlNodePtr node, const xmlChar *name, const xmlChar *orig_content) { int free_content = 0; xmlChar *content; xmlAttrPtr attr; if (!orig_content) { content = BAD_CAST ""; } else if (xmlCheckUTF8(orig_content)) { content = (xmlChar *) orig_content; } else { size_t len = strlen((const char *) orig_content); content = malloc(sizeof(xmlChar)*len*2 + 1); cr_latin1_to_utf8(orig_content, content); free_content = 1; } attr = xmlNewProp(node, name, content); if (free_content) free(content); return attr; } void cr_xml_dump_files(xmlNodePtr node, cr_Package *package, int primary) { if (!node || !package->files) { return; } GSList *element = NULL; for(element = package->files; element; element=element->next) { cr_PackageFile *entry = (cr_PackageFile*) element->data; // File without name or path is suspicious => Skip it if (!(entry->path) || !(entry->name)) { continue; } // String concatenation (path + basename) gchar *fullname; fullname = g_strconcat(entry->path, entry->name, NULL); if (!fullname) { continue; } // Skip a file if we want primary files and the file is not one if (primary && !cr_is_primary(fullname)) { g_free(fullname); continue; } // *********************************** // Element: file // ************************************ xmlNodePtr file_node; file_node = cr_xmlNewTextChild(node, NULL, BAD_CAST "file", BAD_CAST fullname); g_free(fullname); // Write type (skip type if type value is empty of "file") if (entry->type && entry->type[0] != '\0' && strcmp(entry->type, "file")) { cr_xmlNewProp(file_node, BAD_CAST "type", BAD_CAST entry->type); } } } gboolean cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(GSList *dep) { GSList *element; for (element = dep; element; element=g_slist_next(element)) { cr_Dependency *d = element->data; if ((d->name && cr_hascontrollchars((unsigned char *) d->name)) || (d->epoch && cr_hascontrollchars((unsigned char *) d->epoch)) || (d->version && cr_hascontrollchars((unsigned char *) d->version)) || (d->release && cr_hascontrollchars((unsigned char *) d->release))) { return 1; } } return 0; } gboolean cr_Package_contains_forbidden_control_chars(cr_Package *pkg) { if ((pkg->name && cr_hascontrollchars((unsigned char *) pkg->name)) || (pkg->arch && cr_hascontrollchars((unsigned char *) pkg->arch)) || (pkg->version && cr_hascontrollchars((unsigned char *) pkg->version)) || (pkg->epoch && cr_hascontrollchars((unsigned char *) pkg->epoch)) || (pkg->release && cr_hascontrollchars((unsigned char *) pkg->release)) || (pkg->summary && cr_hascontrollchars((unsigned char *) pkg->summary)) || (pkg->description && cr_hascontrollchars((unsigned char *) pkg->description)) || (pkg->url && cr_hascontrollchars((unsigned char *) pkg->url)) || (pkg->rpm_license && cr_hascontrollchars((unsigned char *) pkg->rpm_license)) || (pkg->rpm_vendor && cr_hascontrollchars((unsigned char *) pkg->rpm_vendor)) || (pkg->rpm_group && cr_hascontrollchars((unsigned char *) pkg->rpm_group)) || (pkg->rpm_buildhost && cr_hascontrollchars((unsigned char *) pkg->rpm_buildhost)) || (pkg->rpm_sourcerpm && cr_hascontrollchars((unsigned char *) pkg->rpm_sourcerpm)) || (pkg->rpm_packager && cr_hascontrollchars((unsigned char *) pkg->rpm_packager)) || (pkg->location_href && cr_hascontrollchars((unsigned char *) pkg->location_href)) || (pkg->location_base && cr_hascontrollchars((unsigned char *) pkg->location_base))) { return 1; } if (cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->requires) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->provides) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->conflicts) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->obsoletes) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->suggests) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->enhances) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->recommends) || cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(pkg->supplements)) { return 1; } GSList *element; for (element = pkg->files; element; element=g_slist_next(element)) { cr_PackageFile *f = element->data; if ((f->name && cr_hascontrollchars((unsigned char *) f->name)) || (f->path && cr_hascontrollchars((unsigned char *) f->path))) { return 1; } } for (element = pkg->changelogs; element; element=g_slist_next(element)) { cr_ChangelogEntry *ch = element->data; if ((ch->author && cr_hascontrollchars((unsigned char *) ch->author)) || (ch->changelog && cr_hascontrollchars((unsigned char *) ch->changelog))) { return 1; } } return 0; } struct cr_XmlStruct cr_xml_dump(cr_Package *pkg, GError **err) { struct cr_XmlStruct result; GError *tmp_err = NULL; assert(!err || *err == NULL); result.primary = NULL; result.filelists = NULL; result.other = NULL; if (!pkg) return result; if (cr_Package_contains_forbidden_control_chars(pkg)) { g_set_error(err, CREATEREPO_C_ERROR, CRE_XMLDATA, "Forbidden control chars found (ASCII values <32 except 9, 10 and 13)."); return result; } result.primary = cr_xml_dump_primary(pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); return result; } result.filelists = cr_xml_dump_filelists(pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); g_free(result.primary); result.primary = NULL; return result; } result.other = cr_xml_dump_other(pkg, &tmp_err); if (tmp_err) { g_propagate_error(err, tmp_err); g_free(result.primary); result.primary = NULL; g_free(result.filelists); result.filelists = NULL; return result; } return result; } createrepo_c-0.17.0/src/xml_dump.h000066400000000000000000000144231400672373200170270ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_XML_DUMP_H__ #define __C_CREATEREPOLIB_XML_DUMP_H__ #ifdef __cplusplus extern "C" { #endif #include #include "deltarpms.h" #include "package.h" #include "repomd.h" #include "updateinfo.h" /** \defgroup xml_dump XML dump API. * * Example: * \code * cr_Package *pkg; * struct cr_XmlStruct xml; * * cr_xml_dump_init(); * cr_package_parser_init(); * * pkg = cr_package_from_rpm_base("path/to/rpm.rpm", 5, CR_HDRR_NONE, NULL); * * xml = cr_xml_dump(pkg, NULL); * * cr_package_free(pkg); * * printf("Primary XML chunk:\n%s\n", xml.primary); * printf("Filelists XML chunk:\n%s\n", xml.filelists); * printf("Other XML chunk:\n%s\n", xml.other); * * free(xml.primary); * free(xml.filelists); * free(xml.other); * * cr_package_parser_cleanup(); * cr_xml_dump_cleanup(); * \endcode * * \addtogroup xml_dump * @{ */ /** Default namespace for primary.xml */ #define CR_XML_COMMON_NS "http://linux.duke.edu/metadata/common" /** Default namespace for filelists.xml */ #define CR_XML_FILELISTS_NS "http://linux.duke.edu/metadata/filelists" /** Default namespace for other.xml */ #define CR_XML_OTHER_NS "http://linux.duke.edu/metadata/other" /** Default namespace for repomd.xml */ #define CR_XML_REPOMD_NS "http://linux.duke.edu/metadata/repo" /** Namespace for rpm (used in primary.xml and repomd.xml) */ #define CR_XML_RPM_NS "http://linux.duke.edu/metadata/rpm" /** Xml chunks for primary.xml, filelists.xml and other.xml. */ struct cr_XmlStruct { char *primary; /*!< XML chunk for primary.xml */ char *filelists; /*!< XML chunk for filelists.xml */ char *other; /*!< XML chunk for other.xml */ }; /** Initialize dumping part of library (Initialize libxml2). */ void cr_xml_dump_init(); /** Cleanup initialized dumping part of library */ void cr_xml_dump_cleanup(); /** Generate primary xml chunk from cr_Package. * @param package cr_Package * @param err **GError * @return xml chunk string or NULL on error */ char *cr_xml_dump_primary(cr_Package *package, GError **err); /** Generate filelists xml chunk from cr_Package. * @param package cr_Package * @param err **GError * @return xml chunk string or NULL on error */ char *cr_xml_dump_filelists(cr_Package *package, GError **err); /** Generate other xml chunk from cr_Package. * @param package cr_Package * @param err **GError * @return xml chunk string or NULL on error */ char *cr_xml_dump_other(cr_Package *package, GError **err); /** Generate all three xml chunks (primary, filelists, other) from cr_Package. * @param package cr_Package * @param err **GError * @return cr_XmlStruct */ struct cr_XmlStruct cr_xml_dump(cr_Package *package, GError **err); /** Generate xml representation of cr_Repomd. * @param repomd cr_Repomd * @param err **GError * @return repomd.xml content */ char *cr_xml_dump_repomd(cr_Repomd *repomd, GError **err); /** Generate xml representation of cr_UpdateInfo. * @param updateinfo cr_UpdateInfo * @param err **GError * @return repomd.xml content */ char *cr_xml_dump_updateinfo(cr_UpdateInfo *updateinfo, GError **err); /** Generate xml representation of cr_UpdateRecord * @param rec cr_UpdateRecord * @param err **GError * @return xml chunk string or NULL on error */ char *cr_xml_dump_updaterecord(cr_UpdateRecord *rec, GError **err); /** Generate xml representation of cr_DeltaPackage * @param dpkg cr_DeltaPackage * @param err **GError * @return xml chunk string or NULL on error */ char *cr_xml_dump_deltapackage(cr_DeltaPackage *dpkg, GError **err); /** Prepare string to xml dump. * If string is not utf8 it is converted (source encoding is supposed to be * iso-8859-1). * Control chars (chars with value <32 except 9, 10 and 13) are excluded. * * @param in input string. * @param out output string. space of output string must be * at least (strlen(in) * 2 + 1) * sizeof(char) */ void cr_latin1_to_utf8(const unsigned char *in, unsigned char *out) __attribute__ ((hot)); /** * Check if string contains chars with value <32 (except 9, 10 and 13). * * @param str String (NOT NULL!!!!) * @return TRUE if at leas one char with value <32 (except the * 9, 10, 13) is present in the string. */ gboolean cr_hascontrollchars(const unsigned char *str); /** * Prepend protocol if necessary * * @param url input url * @return output string, must be freed */ gchar *cr_prepend_protocol(const gchar *url); /** Check if package contains any strings with chars * with value <32 (except 9, 10 and 13), using cr_hascontrollchars * * @param pkg the cr_Package in question * @return boolean value */ gboolean cr_Package_contains_forbidden_control_chars(cr_Package *pkg); /** Check if list of cr_Dependency stucts contains any strings with chars * with value <32 (except 9, 10 and 13), using cr_hascontrollchars * * @param deps the GSList of cr_Dependencies in question * @return boolean value */ gboolean cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(GSList *deps); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_DUMP_H__ */ createrepo_c-0.17.0/src/xml_dump_deltapackage.c000066400000000000000000000105451400672373200215100ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "deltarpms.h" #include "error.h" #include "misc.h" #include "package.h" #include "xml_dump.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define INDENT 4 void cr_xml_dump_delta(xmlNodePtr root, cr_DeltaPackage *package) { /*********************************** Element: delta ************************************/ cr_NEVR * nevr = cr_str_to_nevr(package->nevr); // Add oldepoch attribute cr_xmlNewProp(root, BAD_CAST "oldepoch", BAD_CAST ((nevr->epoch && *(nevr->epoch)) ? nevr->epoch : "0")); // Add oldversion attribute cr_xmlNewProp(root, BAD_CAST "oldversion", BAD_CAST nevr->version); // Add oldrelease attribute cr_xmlNewProp(root, BAD_CAST "oldrelease", BAD_CAST nevr->release); cr_nevr_free(nevr); /*********************************** Element: filename ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "filename", BAD_CAST package->package->location_href); /*********************************** Element: sequence ************************************/ char *sequence = g_strconcat(package->nevr, "-", package->sequence, NULL); cr_xmlNewTextChild(root, NULL, BAD_CAST "sequence", BAD_CAST sequence); g_free(sequence); /*********************************** Element: size ************************************/ char size_str[SIZE_STR_MAX_LEN]; g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->package->size_package); cr_xmlNewTextChild(root, NULL, BAD_CAST "size", BAD_CAST size_str); /*********************************** Element: checksum ************************************/ xmlNodePtr checksum; checksum = cr_xmlNewTextChild(root, NULL, BAD_CAST "checksum", BAD_CAST package->package->pkgId); cr_xmlNewProp(checksum, BAD_CAST "type", BAD_CAST package->package->checksum_type); } char * cr_xml_dump_deltapackage(cr_DeltaPackage *package, GError **err) { xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!package) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No package object to dump specified"); return NULL; } // Dump IT! xmlBufferPtr buf = xmlBufferCreate(); if (buf == NULL) { g_critical("%s: Error creating the xml buffer", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot create an xml buffer"); return NULL; } root = xmlNewNode(NULL, BAD_CAST "delta"); cr_xml_dump_delta(root, package); // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory xmlNodeDump(buf, NULL, root, 2, FORMAT_XML); assert(buf->content); // First line in the buf is not indented, we must indent it by ourself result = g_malloc(sizeof(char *) * buf->use + INDENT + 1); for (int x = 0; x < INDENT; x++) result[x] = ' '; memcpy((void *) result+INDENT, buf->content, buf->use); result[buf->use + INDENT] = '\n'; result[buf->use + INDENT + 1] = '\0'; // Cleanup xmlBufferFree(buf); xmlFreeNode(root); return result; } createrepo_c-0.17.0/src/xml_dump_filelists.c000066400000000000000000000062551400672373200211040ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "error.h" #include "package.h" #include "xml_dump.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR void cr_xml_dump_filelists_items(xmlNodePtr root, cr_Package *package) { /*********************************** Element: package ************************************/ // Add pkgid attribute cr_xmlNewProp(root, BAD_CAST "pkgid", BAD_CAST package->pkgId); // Add name attribute cr_xmlNewProp(root, BAD_CAST "name", BAD_CAST package->name); // Add arch attribute cr_xmlNewProp(root, BAD_CAST "arch", BAD_CAST package->arch); /*********************************** Element: version ************************************/ xmlNodePtr version; // Add version element version = xmlNewChild(root, NULL, BAD_CAST "version", NULL); // Write version attribute epoch cr_xmlNewProp(version, BAD_CAST "epoch", BAD_CAST package->epoch); // Write version attribute ver cr_xmlNewProp(version, BAD_CAST "ver", BAD_CAST package->version); // Write version attribute rel cr_xmlNewProp(version, BAD_CAST "rel", BAD_CAST package->release); // Files dump cr_xml_dump_files(root, package, 0); } char * cr_xml_dump_filelists(cr_Package *package, GError **err) { xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!package) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No package object to dump specified"); return NULL; } // Dump IT! xmlBufferPtr buf = xmlBufferCreate(); if (buf == NULL) { g_critical("%s: Error creating the xml buffer", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot create an xml buffer"); return NULL; } root = xmlNewNode(NULL, BAD_CAST "package"); cr_xml_dump_filelists_items(root, package); // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory xmlNodeDump(buf, NULL, root, FORMAT_LEVEL, FORMAT_XML); assert(buf->content); result = g_strndup((char *) buf->content, (buf->use+1)); result[buf->use] = '\n'; result[buf->use+1] = '\0'; // Cleanup xmlBufferFree(buf); xmlFreeNode(root); return result; } createrepo_c-0.17.0/src/xml_dump_internal.h000066400000000000000000000060331400672373200207210ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_XML_DUMP_PRIVATE_H__ #define __C_CREATEREPOLIB_XML_DUMP_PRIVATE_H__ #ifdef __cplusplus extern "C" { #endif #include "package.h" #include #define XML_DOC_VERSION "1.0" #define XML_ENCODING "UTF-8" #define FORMAT_XML 1 #define FORMAT_LEVEL 0 #define DATE_STR_MAX_LEN 32 #define SIZE_STR_MAX_LEN 32 #if DATE_STR_MAX_LEN > SIZE_STR_MAX_LEN #define DATESIZE_STR_MAX_LEN DATE_STR_MAX_LEN #else #define DATESIZE_STR_MAX_LEN SIZE_STR_MAX_LEN #endif /** Dump files from the package and append them to the node as childrens. * @param node parent xml node * @param package cr_Package * @param primary process only primary files (see cr_is_primary() function * in the misc module) */ void cr_xml_dump_files(xmlNodePtr node, cr_Package *package, int primary); /** Createrepo_c wrapper over libxml xmlNewTextChild. * It allows content to be NULL and non UTF-8 (if content is no UTF8 * then iso-8859-1 is assumed). */ xmlNodePtr cr_xmlNewTextChild(xmlNodePtr parent, xmlNsPtr ns, const xmlChar *name, const xmlChar *content); /** Inserts new node only if its value is not NULL */ static inline xmlNodePtr cr_xmlNewTextChild_c(xmlNodePtr parent, xmlNsPtr ns, const xmlChar *name, const xmlChar *content) { if (!content) return NULL; return cr_xmlNewTextChild(parent, ns, name, content); } /** Createrepo_c wrapper over the libxml xmlNewProp. * It allows content to be NULL and non UTF-8 (if content is no UTF8 * then iso-8859-1 is assumed) */ xmlAttrPtr cr_xmlNewProp(xmlNodePtr node, const xmlChar *name, const xmlChar *value); /** Inserts new proprety (attribute) only if its value is not NULL */ static inline xmlAttrPtr cr_xmlNewProp_c(xmlNodePtr node, const xmlChar *name, const xmlChar *orig_content) { if (!orig_content) return NULL; return cr_xmlNewProp(node, name, orig_content); } #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_DUMP_PRIVATE_H__ */ createrepo_c-0.17.0/src/xml_dump_other.c000066400000000000000000000103551400672373200202230ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "package.h" #include "xml_dump.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR void cr_xml_dump_other_changelog(xmlNodePtr root, cr_Package *package) { if (!package->changelogs) { return; } GSList *element = NULL; for(element = package->changelogs; element; element=element->next) { cr_ChangelogEntry *entry = (cr_ChangelogEntry*) element->data; assert(entry); // *********************************** // Element: Changelog // *********************************** xmlNodePtr changelog; // Add changelog element changelog = cr_xmlNewTextChild(root, NULL, BAD_CAST "changelog", BAD_CAST entry->changelog); // Write param author cr_xmlNewProp(changelog, BAD_CAST "author", BAD_CAST entry->author); // Write param date char date_str[DATE_STR_MAX_LEN]; g_snprintf(date_str, DATE_STR_MAX_LEN, "%"G_GINT64_FORMAT, entry->date); xmlNewProp(changelog, BAD_CAST "date", BAD_CAST date_str); } } void cr_xml_dump_other_items(xmlNodePtr root, cr_Package *package) { /*********************************** Element: package ************************************/ // Add pkgid attribute cr_xmlNewProp(root, BAD_CAST "pkgid", BAD_CAST package->pkgId); // Add name attribute cr_xmlNewProp(root, BAD_CAST "name", BAD_CAST package->name); // Add arch attribute cr_xmlNewProp(root, BAD_CAST "arch", BAD_CAST package->arch); /*********************************** Element: version ************************************/ xmlNodePtr version; // Add version element version = xmlNewChild(root, NULL, BAD_CAST "version", NULL); // Write version attribute epoch xmlNewProp(version, BAD_CAST "epoch", BAD_CAST package->epoch); // Write version attribute ver xmlNewProp(version, BAD_CAST "ver", BAD_CAST package->version); // Write version attribute rel xmlNewProp(version, BAD_CAST "rel", BAD_CAST package->release); // Changelog dump cr_xml_dump_other_changelog(root, package); } char * cr_xml_dump_other(cr_Package *package, GError **err) { xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!package) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No package object to dump specified"); return NULL; } // Dump IT! xmlBufferPtr buf = xmlBufferCreate(); if (buf == NULL) { g_critical("%s: Error creating the xml buffer", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot create an xml buffer"); return NULL; } root = xmlNewNode(NULL, BAD_CAST "package"); cr_xml_dump_other_items(root, package); // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory xmlNodeDump(buf, NULL, root, FORMAT_LEVEL, FORMAT_XML); assert(buf->content); result = g_strndup((char *) buf->content, (buf->use+1)); result[buf->use] = '\n'; result[buf->use+1] = '\0'; // Cleanup xmlBufferFree(buf); xmlFreeNode(root); return result; } createrepo_c-0.17.0/src/xml_dump_primary.c000066400000000000000000000304411400672373200205630ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "error.h" #include "package.h" #include "xml_dump.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR typedef enum { PCO_TYPE_PROVIDES, PCO_TYPE_CONFLICTS, PCO_TYPE_OBSOLETES, PCO_TYPE_REQUIRES, PCO_TYPE_SUGGESTS, PCO_TYPE_ENHANCES, PCO_TYPE_RECOMMENDS, PCO_TYPE_SUPPLEMENTS, PCO_TYPE_SENTINEL, } PcoType; typedef struct { const char *elemname; size_t listoffset; } PcoInfo; // Order of this list MUST be the same as the order of the related constants above! static PcoInfo pco_info[] = { { "rpm:provides", offsetof(cr_Package, provides) }, { "rpm:conflicts", offsetof(cr_Package, conflicts) }, { "rpm:obsoletes", offsetof(cr_Package, obsoletes) }, { "rpm:requires", offsetof(cr_Package, requires) }, { "rpm:suggests", offsetof(cr_Package, suggests) }, { "rpm:enhances", offsetof(cr_Package, enhances) }, { "rpm:recommends", offsetof(cr_Package, recommends) }, { "rpm:supplements", offsetof(cr_Package, supplements) }, { NULL, 0 }, }; void cr_xml_dump_primary_dump_pco(xmlNodePtr root, cr_Package *package, PcoType pcotype) { const char *elem_name; GSList *list = NULL; if (pcotype >= PCO_TYPE_SENTINEL) return; elem_name = pco_info[pcotype].elemname; list = *((GSList **) ((size_t) package + pco_info[pcotype].listoffset)); if (!list) return; /*********************************** PCOR Element: provides, oboletes, conflicts, requires ************************************/ xmlNodePtr pcor_node; pcor_node = xmlNewChild(root, NULL, BAD_CAST elem_name, NULL); GSList *element = NULL; for(element = list; element; element=element->next) { cr_Dependency *entry = (cr_Dependency*) element->data; assert(entry); if (!entry->name || entry->name[0] == '\0') { continue; } /*********************************** Element: entry ************************************/ xmlNodePtr entry_node; entry_node = xmlNewChild(pcor_node, NULL, BAD_CAST "rpm:entry", NULL); cr_xmlNewProp(entry_node, BAD_CAST "name", BAD_CAST entry->name); if (entry->flags && entry->flags[0] != '\0') { cr_xmlNewProp(entry_node, BAD_CAST "flags", BAD_CAST entry->flags); if (entry->epoch && entry->epoch[0] != '\0') { cr_xmlNewProp(entry_node, BAD_CAST "epoch", BAD_CAST entry->epoch); } if (entry->version && entry->version[0] != '\0') { cr_xmlNewProp(entry_node, BAD_CAST "ver", BAD_CAST entry->version); } if (entry->release && entry->release[0] != '\0') { cr_xmlNewProp(entry_node, BAD_CAST "rel", BAD_CAST entry->release); } } if (pcotype == PCO_TYPE_REQUIRES && entry->pre) { // Add pre attribute xmlNewProp(entry_node, BAD_CAST "pre", BAD_CAST "1"); } } } void cr_xml_dump_primary_base_items(xmlNodePtr root, cr_Package *package) { /*********************************** Element: package ************************************/ // Add an attribute with type to package xmlNewProp(root, BAD_CAST "type", BAD_CAST "rpm"); /*********************************** Element: name ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "name", BAD_CAST package->name); /*********************************** Element: arch ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "arch", BAD_CAST package->arch); /*********************************** Element: version ************************************/ xmlNodePtr version; // Add version element version = xmlNewChild(root, NULL, BAD_CAST "version", NULL); // Write version attribute epoch cr_xmlNewProp(version, BAD_CAST "epoch", BAD_CAST package->epoch); // Write version attribute ver cr_xmlNewProp(version, BAD_CAST "ver", BAD_CAST package->version); // Write version attribute rel cr_xmlNewProp(version, BAD_CAST "rel", BAD_CAST package->release); /*********************************** Element: checksum ************************************/ xmlNodePtr checksum; checksum = cr_xmlNewTextChild(root, NULL, BAD_CAST "checksum", BAD_CAST package->pkgId); // Write checksum attribute checksum_type cr_xmlNewProp(checksum, BAD_CAST "type", BAD_CAST package->checksum_type); // Write checksum attribute pkgid xmlNewProp(checksum, BAD_CAST "pkgid", BAD_CAST "YES"); /*********************************** Element: summary ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "summary", BAD_CAST package->summary); /*********************************** Element: description ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "description", BAD_CAST package->description); /*********************************** Element: packager ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "packager", BAD_CAST package->rpm_packager); /*********************************** Element: url ************************************/ cr_xmlNewTextChild(root, NULL, BAD_CAST "url", BAD_CAST package->url); /*********************************** Element: time ************************************/ xmlNodePtr time; char date_str[DATE_STR_MAX_LEN]; time = xmlNewChild(root, NULL, BAD_CAST "time", NULL); // Write time attribute file g_snprintf(date_str, DATE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->time_file); xmlNewProp(time, BAD_CAST "file", BAD_CAST date_str); // Write time attribute build g_snprintf(date_str, DATE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->time_build); xmlNewProp(time, BAD_CAST "build", BAD_CAST date_str); /*********************************** Element: size ************************************/ xmlNodePtr size; char size_str[SIZE_STR_MAX_LEN]; size = xmlNewChild(root, NULL, BAD_CAST "size", NULL); // Write size attribute package g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->size_package); xmlNewProp(size, BAD_CAST "package", BAD_CAST size_str); // Write size attribute installed g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->size_installed); xmlNewProp(size, BAD_CAST "installed", BAD_CAST size_str); // Write size attribute archive g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->size_archive); xmlNewProp(size, BAD_CAST "archive", BAD_CAST size_str); /*********************************** Element: location ************************************/ xmlNodePtr location; location = xmlNewChild(root, NULL, BAD_CAST "location", NULL); // Write location attribute base if (package->location_base && package->location_base[0] != '\0') { gchar *location_base_with_protocol = NULL; location_base_with_protocol = cr_prepend_protocol(package->location_base); cr_xmlNewProp(location, BAD_CAST "xml:base", BAD_CAST location_base_with_protocol); g_free(location_base_with_protocol); } // Write location attribute href cr_xmlNewProp(location, BAD_CAST "href", BAD_CAST package->location_href); /*********************************** Element: format ************************************/ xmlNodePtr format; format = xmlNewChild(root, NULL, BAD_CAST "format", NULL); /*********************************** Element: license ************************************/ cr_xmlNewTextChild(format, NULL, BAD_CAST "rpm:license", BAD_CAST package->rpm_license); /*********************************** Element: vendor ************************************/ cr_xmlNewTextChild(format, NULL, BAD_CAST "rpm:vendor", BAD_CAST package->rpm_vendor); /*********************************** Element: group ************************************/ cr_xmlNewTextChild(format, NULL, BAD_CAST "rpm:group", BAD_CAST package->rpm_group); /*********************************** Element: buildhost ************************************/ cr_xmlNewTextChild(format, NULL, BAD_CAST "rpm:buildhost", BAD_CAST package->rpm_buildhost); /*********************************** Element: sourcerpm ************************************/ cr_xmlNewTextChild(format, NULL, BAD_CAST "rpm:sourcerpm", BAD_CAST package->rpm_sourcerpm); /*********************************** Element: header-range ************************************/ xmlNodePtr header_range; header_range = xmlNewChild(format, NULL, BAD_CAST "rpm:header-range", NULL); // Write header-range attribute hdrstart g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->rpm_header_start); xmlNewProp(header_range, BAD_CAST "start", BAD_CAST size_str); // Write header-range attribute hdrend g_snprintf(size_str, SIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, package->rpm_header_end); xmlNewProp(header_range, BAD_CAST "end", BAD_CAST size_str); // Files dump cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_PROVIDES); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_REQUIRES); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_CONFLICTS); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_OBSOLETES); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_SUGGESTS); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_ENHANCES); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_RECOMMENDS); cr_xml_dump_primary_dump_pco(format, package, PCO_TYPE_SUPPLEMENTS); cr_xml_dump_files(format, package, 1); } char * cr_xml_dump_primary(cr_Package *package, GError **err) { xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!package) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No package object to dump specified"); return NULL; } // Dump IT! xmlBufferPtr buf = xmlBufferCreate(); if (buf == NULL) { g_critical("%s: Error creating the xml buffer", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot create an xml buffer"); return NULL; } root = xmlNewNode(NULL, BAD_CAST "package"); cr_xml_dump_primary_base_items(root, package); // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory xmlNodeDump(buf, NULL, root, FORMAT_LEVEL, FORMAT_XML); assert(buf->content); result = g_strndup((char *) buf->content, (buf->use+1)); result[buf->use] = '\n'; result[buf->use+1] = '\0'; // Cleanup xmlBufferFree(buf); xmlFreeNode(root); return result; } createrepo_c-0.17.0/src/xml_dump_repomd.c000066400000000000000000000207071400672373200203720ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "repomd.h" #include "xml_dump.h" #include "xml_dump_internal.h" void cr_xml_dump_repomd_record(xmlNodePtr root, cr_RepomdRecord *rec) { xmlNodePtr data, node; gchar str_buffer[DATESIZE_STR_MAX_LEN]; if (!rec) return; // Data element data = xmlNewChild(root, NULL, BAD_CAST "data", NULL); xmlNewProp(data, BAD_CAST "type", BAD_CAST rec->type); // Checksum element node = cr_xmlNewTextChild(data, NULL, BAD_CAST "checksum", BAD_CAST rec->checksum); cr_xmlNewProp(node, BAD_CAST "type", BAD_CAST rec->checksum_type); // Checksum_open element if (rec->checksum_open) { node = cr_xmlNewTextChild(data, NULL, BAD_CAST "open-checksum", BAD_CAST rec->checksum_open); cr_xmlNewProp(node, BAD_CAST "type", BAD_CAST rec->checksum_open_type); } // Checksum_header element if (rec->checksum_header) { node = cr_xmlNewTextChild(data, NULL, BAD_CAST "header-checksum", BAD_CAST rec->checksum_header); cr_xmlNewProp(node, BAD_CAST "type", BAD_CAST rec->checksum_header_type); } // Location element node = xmlNewChild(data, NULL, BAD_CAST "location", NULL); cr_xmlNewProp(node, BAD_CAST "href", BAD_CAST rec->location_href); if (rec->location_base) cr_xmlNewProp(node, BAD_CAST "xml:base", BAD_CAST rec->location_base); // Timestamp element g_snprintf(str_buffer, DATESIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, rec->timestamp); xmlNewChild(data, NULL, BAD_CAST "timestamp", BAD_CAST str_buffer); // Size element g_snprintf(str_buffer, DATESIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, rec->size); xmlNewChild(data, NULL, BAD_CAST "size", BAD_CAST str_buffer); // Open-size element if (rec->size_open != -1) { g_snprintf(str_buffer, DATESIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, rec->size_open); xmlNewChild(data, NULL, BAD_CAST "open-size", BAD_CAST str_buffer); } // Header-size element if (rec->checksum_header && rec->size_header != -1) { g_snprintf(str_buffer, DATESIZE_STR_MAX_LEN, "%"G_GINT64_FORMAT, rec->size_header); xmlNewChild(data, NULL, BAD_CAST "header-size", BAD_CAST str_buffer); } // Database_version element if (g_str_has_suffix((char *) rec->type, "_db")) { g_snprintf(str_buffer, DATESIZE_STR_MAX_LEN, "%d", rec->db_ver); xmlNewChild(data, NULL, BAD_CAST "database_version", BAD_CAST str_buffer); } } void cr_xml_dump_repomd_body(xmlNodePtr root, cr_Repomd *repomd) { GSList *element; // Add namespaces to the root element xmlNewNs(root, BAD_CAST CR_XML_REPOMD_NS, BAD_CAST NULL); xmlNewNs(root, BAD_CAST CR_XML_RPM_NS, BAD_CAST "rpm"); // ********************************** // Element: Revision // ********************************** if (repomd->revision) { cr_xmlNewTextChild(root, NULL, BAD_CAST "revision", BAD_CAST repomd->revision); } else { // Use the current time if no revision was explicitly specified gchar *rev = g_strdup_printf("%ld", time(NULL)); xmlNewChild(root, NULL, BAD_CAST "revision", BAD_CAST rev); g_free(rev); } // ********************************** // Element: Repoid // ********************************** if (repomd->repoid) { xmlNodePtr repoid_elem = cr_xmlNewTextChild(root, NULL, BAD_CAST "repoid", BAD_CAST repomd->repoid); if (repomd->repoid_type) cr_xmlNewProp(repoid_elem, BAD_CAST "type", BAD_CAST repomd->repoid_type); } // ********************************** // Element: Contenthash // ********************************** if (repomd->contenthash) { xmlNodePtr contenthash_elem = cr_xmlNewTextChild(root, NULL, BAD_CAST "contenthash", BAD_CAST repomd->contenthash); if (repomd->contenthash_type) cr_xmlNewProp(contenthash_elem, BAD_CAST "type", BAD_CAST repomd->contenthash_type); } // ********************************** // Element: Tags // ********************************** if (repomd->repo_tags || repomd->distro_tags || repomd->content_tags) { GSList *element; xmlNodePtr tags = xmlNewChild(root, NULL, BAD_CAST "tags", NULL); // Content tags element = repomd->content_tags; for (; element; element = g_slist_next(element)) cr_xmlNewTextChild(tags, NULL, BAD_CAST "content", BAD_CAST element->data); // Repo tags element = repomd->repo_tags; for (; element; element = g_slist_next(element)) cr_xmlNewTextChild(tags, NULL, BAD_CAST "repo", BAD_CAST element->data); // Distro tags element = repomd->distro_tags; for (; element; element = g_slist_next(element)) { cr_DistroTag *distro = (cr_DistroTag *) element->data; xmlNodePtr distro_elem = cr_xmlNewTextChild(tags, NULL, BAD_CAST "distro", BAD_CAST distro->val); // Cpeid attribute of distro tag if (distro->cpeid) cr_xmlNewProp(distro_elem, BAD_CAST "cpeid", BAD_CAST distro->cpeid); } } // Dump records for (element = repomd->records; element; element = g_slist_next(element)) { cr_RepomdRecord *rec = element->data; cr_xml_dump_repomd_record(root, rec); } } char * cr_xml_dump_repomd(cr_Repomd *repomd, GError **err) { xmlDocPtr doc; xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!repomd) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No repomd object to dump specified"); return NULL; } // Dump IT! doc = xmlNewDoc(BAD_CAST XML_DOC_VERSION); root = xmlNewNode(NULL, BAD_CAST "repomd"); cr_xml_dump_repomd_body(root, repomd); xmlDocSetRootElement(doc, root); xmlDocDumpFormatMemoryEnc(doc, (xmlChar **) &result, NULL, XML_ENCODING, FORMAT_XML); // Clean up xmlFreeDoc(doc); return result; } createrepo_c-0.17.0/src/xml_dump_updateinfo.c000066400000000000000000000222011400672373200212310ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include #include "error.h" #include "updateinfo.h" #include "xml_dump.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define INDENT 2 void cr_xml_dump_updatecollectionpackages(xmlNodePtr collection, GSList *packages) { for (GSList *elem = packages; elem; elem = g_slist_next(elem)) { cr_UpdateCollectionPackage *pkg = elem->data; xmlNodePtr package; package = xmlNewChild(collection, NULL, BAD_CAST "package", NULL); cr_xmlNewProp_c(package, BAD_CAST "name", BAD_CAST pkg->name); cr_xmlNewProp_c(package, BAD_CAST "version", BAD_CAST pkg->version); cr_xmlNewProp_c(package, BAD_CAST "release", BAD_CAST pkg->release); cr_xmlNewProp_c(package, BAD_CAST "epoch", BAD_CAST pkg->epoch); cr_xmlNewProp_c(package, BAD_CAST "arch", BAD_CAST pkg->arch); cr_xmlNewProp_c(package, BAD_CAST "src", BAD_CAST pkg->src); cr_xmlNewTextChild_c(package, NULL, BAD_CAST "filename", BAD_CAST pkg->filename); if (pkg->sum) { xmlNodePtr sum; sum = cr_xmlNewTextChild_c(package, NULL, BAD_CAST "sum", BAD_CAST pkg->sum); cr_xmlNewProp_c(sum, BAD_CAST "type", BAD_CAST cr_checksum_name_str(pkg->sum_type)); } if (pkg->reboot_suggested) xmlNewChild(package, NULL, BAD_CAST "reboot_suggested", BAD_CAST "True"); if (pkg->restart_suggested) xmlNewChild(package, NULL, BAD_CAST "restart_suggested", BAD_CAST "True"); if (pkg->relogin_suggested) xmlNewChild(package, NULL, BAD_CAST "relogin_suggested", BAD_CAST "True"); } } void cr_xml_dump_updatecollectionmodule(xmlNodePtr collection, cr_UpdateCollectionModule *module) { if (!module) return; xmlNodePtr xml_module; xml_module = xmlNewChild(collection, NULL, BAD_CAST "module", NULL); cr_xmlNewProp_c(xml_module, BAD_CAST "name", BAD_CAST module->name); cr_xmlNewProp_c(xml_module, BAD_CAST "stream", BAD_CAST module->stream); gchar buf[21]; //20 + '\0' is max number of chars of guint64: G_MAXUINT64 (= 18,446,744,073,709,551,615) snprintf(buf, 21, "%" G_GUINT64_FORMAT, module->version); cr_xmlNewProp_c(xml_module, BAD_CAST "version", BAD_CAST buf); cr_xmlNewProp_c(xml_module, BAD_CAST "context", BAD_CAST module->context); cr_xmlNewProp_c(xml_module, BAD_CAST "arch", BAD_CAST module->arch); } void cr_xml_dump_updateinforecord_pkglist(xmlNodePtr update, GSList *collections) { xmlNodePtr pkglist; pkglist = xmlNewChild(update, NULL, BAD_CAST "pkglist", NULL); for (GSList *elem = collections; elem; elem = g_slist_next(elem)) { cr_UpdateCollection *col = elem->data; xmlNodePtr collection; collection = xmlNewChild(pkglist, NULL, BAD_CAST "collection", NULL); cr_xmlNewProp_c(collection, BAD_CAST "short", BAD_CAST col->shortname); cr_xmlNewTextChild_c(collection, NULL, BAD_CAST "name", BAD_CAST col->name); cr_xml_dump_updatecollectionmodule(collection, col->module); cr_xml_dump_updatecollectionpackages(collection, col->packages); } } void cr_xml_dump_updateinforecord_references(xmlNodePtr update, GSList *refs) { xmlNodePtr references; references = xmlNewChild(update, NULL, BAD_CAST "references", NULL); for (GSList *elem = refs; elem; elem = g_slist_next(elem)) { cr_UpdateReference *ref = elem->data; xmlNodePtr reference; reference = xmlNewChild(references, NULL, BAD_CAST "reference", NULL); cr_xmlNewProp_c(reference, BAD_CAST "href", BAD_CAST ref->href); cr_xmlNewProp_c(reference, BAD_CAST "id", BAD_CAST ref->id); cr_xmlNewProp_c(reference, BAD_CAST "type", BAD_CAST ref->type); cr_xmlNewProp_c(reference, BAD_CAST "title", BAD_CAST ref->title); } } xmlNodePtr cr_xml_dump_updateinforecord_internal(xmlNodePtr root, cr_UpdateRecord *rec) { xmlNodePtr update, node; if (!rec) return NULL; // Update element if (!root) update = xmlNewNode(NULL, BAD_CAST "update"); else update = xmlNewChild(root, NULL, BAD_CAST "update", NULL); cr_xmlNewProp_c(update, BAD_CAST "from", BAD_CAST rec->from); cr_xmlNewProp_c(update, BAD_CAST "status", BAD_CAST rec->status); cr_xmlNewProp_c(update, BAD_CAST "type", BAD_CAST rec->type); cr_xmlNewProp_c(update, BAD_CAST "version", BAD_CAST rec->version); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "id", BAD_CAST rec->id); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "title", BAD_CAST rec->title); if (rec->issued_date) { node = xmlNewChild(update, NULL, BAD_CAST "issued", NULL); cr_xmlNewProp(node, BAD_CAST "date", BAD_CAST rec->issued_date); } if (rec->updated_date) { node = xmlNewChild(update, NULL, BAD_CAST "updated", NULL); cr_xmlNewProp(node, BAD_CAST "date", BAD_CAST rec->updated_date); } cr_xmlNewTextChild_c(update, NULL, BAD_CAST "rights", BAD_CAST rec->rights); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "release", BAD_CAST rec->release); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "pushcount", BAD_CAST rec->pushcount); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "severity", BAD_CAST rec->severity); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "summary", BAD_CAST rec->summary); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "description", BAD_CAST rec->description); cr_xmlNewTextChild_c(update, NULL, BAD_CAST "solution", BAD_CAST rec->solution); if (rec->reboot_suggested) xmlNewChild(update, NULL, BAD_CAST "reboot_suggested", BAD_CAST "True"); // References cr_xml_dump_updateinforecord_references(update, rec->references); // Pkglist cr_xml_dump_updateinforecord_pkglist(update, rec->collections); return update; } void cr_xml_dump_updateinfo_body(xmlNodePtr root, cr_UpdateInfo *ui) { GSList *element; // Dump updates for (element = ui->updates; element; element = g_slist_next(element)) { cr_UpdateRecord *rec = element->data; cr_xml_dump_updateinforecord_internal(root, rec); } } char * cr_xml_dump_updateinfo(cr_UpdateInfo *updateinfo, GError **err) { xmlDocPtr doc; xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!updateinfo) return NULL; // Dump IT! doc = xmlNewDoc(BAD_CAST XML_DOC_VERSION); root = xmlNewNode(NULL, BAD_CAST "updates"); cr_xml_dump_updateinfo_body(root, updateinfo); xmlDocSetRootElement(doc, root); xmlDocDumpFormatMemoryEnc(doc, (xmlChar **) &result, NULL, XML_ENCODING, FORMAT_XML); // Clean up xmlFreeDoc(doc); return result; } char * cr_xml_dump_updaterecord(cr_UpdateRecord *rec, GError **err) { xmlNodePtr root; char *result; assert(!err || *err == NULL); if (!rec) { g_set_error(err, CREATEREPO_C_ERROR, CRE_BADARG, "No updateinfo object to dump specified"); return NULL; } // Dump IT! xmlBufferPtr buf = xmlBufferCreate(); if (buf == NULL) { g_critical("%s: Error creating the xml buffer", __func__); g_set_error(err, ERR_DOMAIN, CRE_MEMORY, "Cannot create an xml buffer"); return NULL; } root = cr_xml_dump_updateinforecord_internal(NULL, rec); // xmlNodeDump seems to be a little bit faster than xmlDocDumpFormatMemory xmlNodeDump(buf, NULL, root, 1, FORMAT_XML); assert(buf->content); // First line in the buf is not indented, we must indent it by ourself result = g_malloc(sizeof(char *) * buf->use + INDENT + 1); for (int x = 0; x < INDENT; x++) result[x] = ' '; memcpy((void *) result+INDENT, buf->content, buf->use); result[buf->use + INDENT] = '\n'; result[buf->use + INDENT + 1] = '\0'; // Cleanup xmlBufferFree(buf); xmlFreeNode(root); return result; } createrepo_c-0.17.0/src/xml_file.c000066400000000000000000000355541400672373200170040ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "xml_file.h" #include #include "error.h" #include "xml_dump.h" #include "compression_wrapper.h" #include "xml_dump_internal.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define XML_HEADER "\n" #define XML_PRIMARY_HEADER XML_HEADER"\n" #define XML_FILELISTS_HEADER XML_HEADER"\n" #define XML_OTHER_HEADER XML_HEADER"\n" #define XML_PRESTODELTA_HEADER XML_HEADER"\n" #define XML_UPDATEINFO_HEADER XML_HEADER"\n" #define XML_MAX_HEADER_SIZE 300 #define XML_RECOMPRESS_BUFFER_SIZE 8192 #define XML_PRIMARY_FOOTER "" #define XML_FILELISTS_FOOTER "" #define XML_OTHER_FOOTER "" #define XML_PRESTODELTA_FOOTER "" #define XML_UPDATEINFO_FOOTER "" cr_XmlFile * cr_xmlfile_sopen(const char *filename, cr_XmlFileType type, cr_CompressionType comtype, cr_ContentStat *stat, GError **err) { cr_XmlFile *f; GError *tmp_err = NULL; assert(filename); assert(type < CR_XMLFILE_SENTINEL); assert(comtype < CR_CW_COMPRESSION_SENTINEL); assert(!err || *err == NULL); if (g_file_test(filename, G_FILE_TEST_EXISTS)) { g_set_error(err, ERR_DOMAIN, CRE_EXISTS, "File already exists"); return NULL; } CR_FILE *cr_f = cr_sopen(filename, CR_CW_MODE_WRITE, comtype, stat, &tmp_err); if (!cr_f) { g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", filename); return NULL; } f = g_new0(cr_XmlFile, 1); f->f = cr_f; f->type = type; f->header = 0; f->footer = 0; f->pkgs = 0; return f; } int cr_xmlfile_set_num_of_pkgs(cr_XmlFile *f, long num, GError **err) { assert(f); assert(!err || *err == NULL); if (f->header != 0) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "Header was already written"); return CRE_BADARG; } if (num < 0) { g_set_error(err, ERR_DOMAIN, CRE_BADARG, "The number must be a positive integer number"); return CRE_BADARG; } f->pkgs = num; return CRE_OK; } int cr_xmlfile_write_xml_header(cr_XmlFile *f, GError **err) { const char *xml_header; GError *tmp_err = NULL; assert(f); assert(!err || *err == NULL); assert(f->header == 0); switch (f->type) { case CR_XMLFILE_PRIMARY: xml_header = XML_PRIMARY_HEADER; break; case CR_XMLFILE_FILELISTS: xml_header = XML_FILELISTS_HEADER; break; case CR_XMLFILE_OTHER: xml_header = XML_OTHER_HEADER; break; case CR_XMLFILE_PRESTODELTA: xml_header = XML_PRESTODELTA_HEADER; break; case CR_XMLFILE_UPDATEINFO: xml_header = XML_UPDATEINFO_HEADER; break; default: g_critical("%s: Bad file type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type"); return CRE_ASSERT; } if (cr_printf(&tmp_err, f->f, xml_header, f->pkgs) == CR_CW_ERR) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot write XML header: "); return code; } f->header = 1; return cr_end_chunk(f->f, err); } int cr_xmlfile_write_xml_footer(cr_XmlFile *f, GError **err) { const char *xml_footer; GError *tmp_err = NULL; assert(f); assert(!err || *err == NULL); assert(f->footer == 0); switch (f->type) { case CR_XMLFILE_PRIMARY: xml_footer = XML_PRIMARY_FOOTER; break; case CR_XMLFILE_FILELISTS: xml_footer = XML_FILELISTS_FOOTER; break; case CR_XMLFILE_OTHER: xml_footer = XML_OTHER_FOOTER; break; case CR_XMLFILE_PRESTODELTA: xml_footer = XML_PRESTODELTA_FOOTER; break; case CR_XMLFILE_UPDATEINFO: xml_footer = XML_UPDATEINFO_FOOTER; break; default: g_critical("%s: Bad file type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type"); return CRE_ASSERT; } cr_puts(f->f, xml_footer, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot write XML footer: "); return code; } f->footer = 1; return CRE_OK; } int cr_xmlfile_add_pkg(cr_XmlFile *f, cr_Package *pkg, GError **err) { char *xml; GError *tmp_err = NULL; assert(f); assert(pkg); assert(!err || *err == NULL); assert(f->footer == 0); switch (f->type) { case CR_XMLFILE_PRIMARY: xml = cr_xml_dump_primary(pkg, &tmp_err); break; case CR_XMLFILE_FILELISTS: xml = cr_xml_dump_filelists(pkg, &tmp_err); break; case CR_XMLFILE_OTHER: xml = cr_xml_dump_other(pkg, &tmp_err); break; default: g_critical("%s: Bad file type", __func__); assert(0); g_set_error(err, ERR_DOMAIN, CRE_ASSERT, "Bad file type"); return CRE_ASSERT; } if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } if (xml) { cr_xmlfile_add_chunk(f, xml, &tmp_err); g_free(xml); if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } } return CRE_OK; } int cr_xmlfile_add_chunk(cr_XmlFile *f, const char* chunk, GError **err) { GError *tmp_err = NULL; assert(f); assert(!err || *err == NULL); assert(f->footer == 0); if (!chunk) return CRE_OK; if (f->header == 0) { cr_xmlfile_write_xml_header(f, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } } cr_puts(f->f, chunk, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while write: "); return code; } return CRE_OK; } int cr_xmlfile_close(cr_XmlFile *f, GError **err) { GError *tmp_err = NULL; assert(!err || *err == NULL); if (!f) return CRE_OK; if (f->header == 0) { cr_xmlfile_write_xml_header(f, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } } if (f->footer == 0) { cr_xmlfile_write_xml_footer(f, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_error(err, tmp_err); return code; } } cr_close(f->f, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while closing a file: "); return code; } g_free(f); return CRE_OK; } static int write_modified_header(int task_count, int package_count, cr_XmlFile *cr_file, gchar *header_buf, int header_len, GError **err) { GError *tmp_err = NULL; gchar *package_count_string; gchar *task_count_string; int bytes_written = 0; int package_count_string_len = rasprintf(&package_count_string, "packages=\"%i\"", package_count); int task_count_string_len = rasprintf(&task_count_string, "packages=\"%i\"", task_count); gchar *pointer_to_pkgs = g_strstr_len(header_buf, header_len, task_count_string); if (!pointer_to_pkgs){ g_free(package_count_string); g_free(task_count_string); return 0; } gchar *pointer_to_pkgs_end = pointer_to_pkgs + task_count_string_len; bytes_written += cr_write(cr_file->f, header_buf, pointer_to_pkgs - header_buf, &tmp_err); if (!tmp_err) bytes_written += cr_write(cr_file->f, package_count_string, package_count_string_len, &tmp_err); if (!tmp_err) bytes_written += cr_write(cr_file->f, pointer_to_pkgs_end, header_len - (pointer_to_pkgs_end - header_buf), &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while writing header part:"); g_free(package_count_string); g_free(task_count_string); return 0; } g_free(package_count_string); g_free(task_count_string); return bytes_written; } void cr_rewrite_header_package_count(gchar *original_filename, cr_CompressionType xml_compression, int package_count, int task_count, cr_ContentStat *file_stat, gchar *zck_dict_file, GError **err) { GError *tmp_err = NULL; CR_FILE *original_file = cr_open(original_filename, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while reopening for reading:"); return; } gchar *tmp_xml_filename = g_strconcat(original_filename, ".tmp", NULL); cr_XmlFile *new_file = cr_xmlfile_sopen_primary(tmp_xml_filename, xml_compression, file_stat, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while opening for writing:"); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } // We want to keep identical zchunk chunk sizes, therefore we copy by chunk if (xml_compression == CR_CW_ZCK_COMPRESSION) { if (zck_dict_file){ gchar *zck_dict = NULL; size_t zck_dict_size = 0; if (g_file_get_contents(zck_dict_file, &zck_dict, &zck_dict_size, &tmp_err)){ cr_set_dict(new_file->f, zck_dict, zck_dict_size, &tmp_err); } else { g_propagate_prefixed_error(err, tmp_err, "Error encountered setting zck dict:"); cr_xmlfile_close(new_file, NULL); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } } char *copy_buf = NULL; // Chunk with index 0 is dictionary, data (xml metadata and our header) starts at 1 ssize_t zchunk_index = 1; ssize_t len_read = cr_get_zchunk_with_index(original_file, zchunk_index, ©_buf, &tmp_err); if (!tmp_err) write_modified_header(task_count, package_count, new_file, copy_buf, len_read, &tmp_err); if (tmp_err){ g_propagate_prefixed_error(err, tmp_err, "Error encountered while recompressing:"); cr_xmlfile_close(new_file, NULL); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } zchunk_index++; while(len_read){ g_free(copy_buf); len_read = cr_get_zchunk_with_index(original_file, zchunk_index, ©_buf, &tmp_err); if (!tmp_err) cr_write(new_file->f, copy_buf, len_read, &tmp_err); if (!tmp_err) cr_end_chunk(new_file->f, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while recompressing:"); cr_xmlfile_close(new_file, NULL); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } zchunk_index++; } } else { gchar header_buf[XML_MAX_HEADER_SIZE]; int len_read = cr_read(original_file, header_buf, XML_MAX_HEADER_SIZE, &tmp_err); if (!tmp_err) write_modified_header(task_count, package_count, new_file, header_buf, len_read, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while recompressing:"); cr_xmlfile_close(new_file, NULL); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } //Copy the rest of the file gchar copy_buf[XML_RECOMPRESS_BUFFER_SIZE]; while(len_read) { len_read = cr_read(original_file, copy_buf, XML_RECOMPRESS_BUFFER_SIZE, &tmp_err); if (!tmp_err) cr_write(new_file->f, copy_buf, len_read, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while recompressing:"); cr_xmlfile_close(new_file, NULL); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } } } new_file->header = 1; new_file->footer = 1; cr_xmlfile_close(new_file, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while writing:"); cr_close(original_file, NULL); g_free(tmp_xml_filename); return; } cr_close(original_file, &tmp_err); if (tmp_err) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while writing:"); g_free(tmp_xml_filename); return; } if (g_rename(tmp_xml_filename, original_filename) == -1) { g_propagate_prefixed_error(err, tmp_err, "Error encountered while renaming:"); g_free(tmp_xml_filename); return; } g_free(tmp_xml_filename); } createrepo_c-0.17.0/src/xml_file.h000066400000000000000000000223231400672373200167770ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_XML_FILE_H__ #define __C_CREATEREPOLIB_XML_FILE_H__ #ifdef __cplusplus extern "C" { #endif #include #include "compression_wrapper.h" #include "package.h" /** \defgroup xml_file XML file API. * \addtogroup xml_file * @{ */ /** Supported types of xml files */ typedef enum { CR_XMLFILE_PRIMARY, /*!< primary.xml */ CR_XMLFILE_FILELISTS, /*!< filelists.xml */ CR_XMLFILE_OTHER, /*!< other.xml */ CR_XMLFILE_PRESTODELTA, /*!< prestodelta.xml */ CR_XMLFILE_UPDATEINFO, /*!< updateinfo.xml */ CR_XMLFILE_SENTINEL, /*!< sentinel of the list */ } cr_XmlFileType; /** cr_XmlFile structure. */ typedef struct { CR_FILE *f; /*!< File */ cr_XmlFileType type; /*!< Type of XML file. */ int header; /*!< 0 if no header was written yet. */ int footer; /*!< 0 if no footer was written yet. */ long pkgs; /*!< Number of packages */ } cr_XmlFile; /** Open a new primary XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open_primary(FILENAME, COMTYPE, ERR) \ cr_xmlfile_open(FILENAME, CR_XMLFILE_PRIMARY, COMTYPE, ERR) /** Open a new primary XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param STAT cr_ContentStat object or NULL. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_sopen_primary(FILENAME, COMTYPE, STAT, ERR) \ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_PRIMARY, COMTYPE, STAT, ERR) /** Open a new filelists XML file. * @param FILENAME Filename. * @param COMTYPE Type of used compression. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open_filelists(FILENAME, COMTYPE, ERR) \ cr_xmlfile_open(FILENAME, CR_XMLFILE_FILELISTS, COMTYPE, ERR) /** Open a new filelists XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param STAT cr_ContentStat object or NULL. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_sopen_filelists(FILENAME, COMTYPE, STAT, ERR) \ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_FILELISTS, COMTYPE, STAT, ERR) /** Open a new other XML file. * @param FILENAME Filename. * @param COMTYPE Type of used compression. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open_other(FILENAME, COMTYPE, ERR) \ cr_xmlfile_open(FILENAME, CR_XMLFILE_OTHER, COMTYPE, ERR) /** Open a new other XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param STAT cr_ContentStat object or NULL. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_sopen_other(FILENAME, COMTYPE, STAT, ERR) \ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_OTHER, COMTYPE, STAT, ERR) /** Open a new prestodelta XML file. * @param FILENAME Filename. * @param COMTYPE Type of used compression. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open_prestodelta(FILENAME, COMTYPE, ERR) \ cr_xmlfile_open(FILENAME, CR_XMLFILE_PRESTODELTA, COMTYPE, ERR) /** Open a new prestodelta XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param STAT cr_ContentStat object or NULL. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_sopen_prestodelta(FILENAME, COMTYPE, STAT, ERR) \ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_PRESTODELTA, COMTYPE, STAT, ERR) /** Open a new updateinfo XML file. * @param FILENAME Filename. * @param COMTYPE Type of used compression. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open_updateinfo(FILENAME, COMTYPE, ERR) \ cr_xmlfile_open(FILENAME, CR_XMLFILE_UPDATEINFO, COMTYPE, ERR) /** Open a new updateinfo XML file. * @param FILENAME Filename. * @param COMTYPE Type of compression. * @param STAT cr_ContentStat object or NULL. * @param ERR GError ** * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_sopen_updateinfo(FILENAME, COMTYPE, STAT, ERR) \ cr_xmlfile_sopen(FILENAME, CR_XMLFILE_UPDATEINFO, COMTYPE, STAT, ERR) /** Open a new XML file with stats. * Note: Opened file must not exists! This function cannot * open existing file!. * @param FILENAME Filename. * @param TYPE Type of XML file. * @param COMTYPE Type of used compression. * @param ERR **GError * @return Opened cr_XmlFile or NULL on error */ #define cr_xmlfile_open(FILENAME, TYPE, COMTYPE, ERR) \ cr_xmlfile_sopen(FILENAME, TYPE, COMTYPE, NULL, ERR) /** Open a new XML file. * Note: Opened file must not exists! This function cannot * open existing file!. * @param filename Filename. * @param type Type of XML file. * @param comtype Type of used compression. * @param stat pointer to cr_ContentStat or NULL * @param err **GError * @return Opened cr_XmlFile or NULL on error */ cr_XmlFile *cr_xmlfile_sopen(const char *filename, cr_XmlFileType type, cr_CompressionType comtype, cr_ContentStat *stat, GError **err); /** Set total number of packages that will be in the file. * This number must be set before any write operation * (cr_xml_add_pkg, cr_xml_file_add_chunk, ..). * @param f An opened cr_XmlFile * @param num Total number of packages in the file. * @param err **GError * @return cr_Error code */ int cr_xmlfile_set_num_of_pkgs(cr_XmlFile *f, long num, GError **err); /** Add package to the xml file. * @param f An opened cr_XmlFile * @param pkg Package object. * @param err **GError * @return cr_Error code */ int cr_xmlfile_add_pkg(cr_XmlFile *f, cr_Package *pkg, GError **err); /** Add (write) string with XML chunk into the file. * Note: Because of writing, in case of multithreaded program, should be * guarded by locks, this function could be much more effective than * cr_xml_file_add_pkg(). In case of _add_pkg() function, creating of * string with xml chunk is done in a critical section. In _add_chunk() * function, you could just dump XML whenever you want and in the * critical section do only writting. * @param f An opened cr_XmlFile * @param chunk String with XML chunk. * @param err **GError * @return cr_Error code */ int cr_xmlfile_add_chunk(cr_XmlFile *f, const char *chunk, GError **err); /** Close an opened cr_XmlFile. * @param f An opened cr_XmlFile * @param err **GError * @return cr_Error code */ int cr_xmlfile_close(cr_XmlFile *f, GError **err); /** Rewrite package count field in repodata header in xml file. * In order to do this we have to decompress and after the change * compress the whole file again, so entirely new file is created. * @param original_filename Current file with wrong value in header * @param package_count Actual package count (desired value in header) * @param task_count Task count (current value in header) * @param file_stat cr_ContentStat for stats of the new file, it will be modified * @param zck_dict_file Optional path to zck dictionary * @param err **GError */ void cr_rewrite_header_package_count(gchar *original_filename, cr_CompressionType xml_compression, int package_count, int task_count, cr_ContentStat *file_stat, gchar *zck_dict_file, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_FILE_H__ */ createrepo_c-0.17.0/src/xml_parser.c000066400000000000000000000171521400672373200173530ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include "error.h" #include "xml_parser.h" #include "xml_parser_internal.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR cr_ParserData * cr_xml_parser_data(unsigned int numstates) { cr_ParserData *pd = g_new0(cr_ParserData, 1); pd->content = g_malloc(CONTENT_REALLOC_STEP); pd->acontent = CONTENT_REALLOC_STEP; pd->swtab = g_malloc0(sizeof(cr_StatesSwitch *) * numstates); pd->sbtab = g_malloc(sizeof(unsigned int) * numstates); return pd; } void cr_xml_parser_data_free(cr_ParserData *pd) { g_free(pd->content); g_free(pd->swtab); g_free(pd->sbtab); g_free(pd); } void cr_char_handler(void *pdata, const xmlChar *s, int len) { int l; char *c; cr_ParserData *pd = pdata; if (pd->err) return; /* There was an error -> do nothing */ if (!pd->docontent) return; /* Do not store the content */ l = pd->lcontent + len + 1; if (l > pd->acontent) { pd->acontent = l + CONTENT_REALLOC_STEP; pd->content = realloc(pd->content, pd->acontent); } c = pd->content + pd->lcontent; pd->lcontent += len; while (len-- > 0) *c++ = *s++; *c = '\0'; } int cr_xml_parser_warning(cr_ParserData *pd, cr_XmlParserWarningType type, const char *msg, ...) { int ret; va_list args; char *warn; GError *tmp_err; assert(pd); assert(msg); if (!pd->warningcb) return CR_CB_RET_OK; va_start(args, msg); g_vasprintf(&warn, msg, args); va_end(args); tmp_err = NULL; ret = pd->warningcb(type, warn, pd->warningcb_data, &tmp_err); g_free(warn); if (ret != CR_CB_RET_OK) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); } assert(pd->err || ret == CR_CB_RET_OK); return ret; } gint64 cr_xml_parser_strtoll(cr_ParserData *pd, const char *nptr, unsigned int base) { gint64 val; char *endptr = NULL; assert(pd); assert(base <= 36 && base != 1); if (!nptr) return 0; val = g_ascii_strtoll(nptr, &endptr, base); if ((val == G_MAXINT64 || val == G_MININT64) && errno == ERANGE) cr_xml_parser_warning(pd, CR_XML_WARNING_BADATTRVAL, "Correct integer value \"%s\" caused overflow", nptr); else if (val == 0 && *endptr != '\0') cr_xml_parser_warning(pd, CR_XML_WARNING_BADATTRVAL, "Conversion of \"%s\" to integer failed", nptr); return val; } int cr_newpkgcb(cr_Package **pkg, G_GNUC_UNUSED const char *pkgId, G_GNUC_UNUSED const char *name, G_GNUC_UNUSED const char *arch, G_GNUC_UNUSED void *cbdata, GError **err) { assert(pkg && *pkg == NULL); assert(!err || *err == NULL); *pkg = cr_package_new(); return CRE_OK; } int cr_xml_parser_generic(xmlParserCtxtPtr parser, cr_ParserData *pd, const char *path, GError **err) { /* Note: This function uses .err members of cr_ParserData! */ int ret = CRE_OK; CR_FILE *f; GError *tmp_err = NULL; char buf[XML_BUFFER_SIZE]; assert(parser); assert(pd); assert(path); assert(!err || *err == NULL); f = cr_open(path, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, &tmp_err); if (tmp_err) { int code = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Cannot open %s: ", path); return code; } while (1) { int len; len = cr_read(f, buf, XML_BUFFER_SIZE, &tmp_err); if (tmp_err) { ret = tmp_err->code; g_critical("%s: Error while reading xml '%s': %s", __func__, path, tmp_err->message); g_propagate_prefixed_error(err, tmp_err, "Read error: "); break; } if (xmlParseChunk(parser, buf, len, len == 0)) { ret = CRE_XMLPARSER; xmlErrorPtr xml_err = xmlCtxtGetLastError(parser); g_critical("%s: parsing error '%s': %s", __func__, path, xml_err->message); g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER, "Parse error '%s' at line: %d (%s)", path, (int) xml_err->line, (char *) xml_err->message); break; } if (pd->err) { ret = pd->err->code; g_propagate_error(err, pd->err); break; } if (len == 0) break; } if (ret != CRE_OK) { // An error already encoutentered // just close the file without error checking cr_close(f, NULL); } else { // No error encountered yet cr_close(f, &tmp_err); if (tmp_err) { ret = tmp_err->code; g_propagate_prefixed_error(err, tmp_err, "Error while closing: "); } } return ret; } int cr_xml_parser_generic_from_string(xmlParserCtxtPtr parser, cr_ParserData *pd, const char *xml_string, GError **err) { /* Note: This function uses .err members of cr_ParserData! */ int ret = CRE_OK; int block_size = XML_BUFFER_SIZE; const char *next_data = xml_string; const char *end_of_string = xml_string + strlen(xml_string); int finished = 0; assert(parser); assert(pd); assert(xml_string); assert(!err || *err == NULL); const char *data; while (!finished) { data = next_data; // Check if we are in the last loop next_data = data + block_size; if (next_data > end_of_string) { block_size = strlen(data); finished = 1; } if (xmlParseChunk(parser, data, block_size, finished)) { ret = CRE_XMLPARSER; xmlErrorPtr xml_err = xmlCtxtGetLastError(parser); g_critical("%s: parsing error '%s': %s", __func__, data, xml_err->message); g_set_error(err, ERR_DOMAIN, CRE_XMLPARSER, "Parse error '%s' at line: %d (%s)", data, (int) xml_err->line, (char *) xml_err->message); } if (pd->err) { ret = pd->err->code; g_propagate_error(err, pd->err); } } return ret; } createrepo_c-0.17.0/src/xml_parser.h000066400000000000000000000316071400672373200173610ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_XML_PARSER_H__ #define __C_CREATEREPOLIB_XML_PARSER_H__ #ifdef __cplusplus extern "C" { #endif #include #include "package.h" #include "repomd.h" #include "updateinfo.h" /** \defgroup xml_parser XML parser API. * \addtogroup xml_parser * @{ */ #define CR_CB_RET_OK 0 /*!< Return value for callbacks signalizing success */ #define CR_CB_RET_ERR 1 /*!< Return value for callbacks signalizing error */ /** Type of warnings reported by parsers by the warning callback. */ typedef enum { CR_XML_WARNING_UNKNOWNTAG, /*!< Unknown tag */ CR_XML_WARNING_MISSINGATTR, /*!< Missing attribute */ CR_XML_WARNING_UNKNOWNVAL, /*!< Unknown tag or attribute value */ CR_XML_WARNING_BADATTRVAL, /*!< Bad attribute value */ CR_XML_WARNING_MISSINGVAL, /*!< Missing tag value */ CR_XML_WARNING_BADMDTYPE, /*!< Bad metadata type (expected mandatory tag was not found) */ CR_XML_WARNING_SENTINEL, } cr_XmlParserWarningType; /** Callback for XML parser wich is called when a new package object parsing * is started. This function has to set *pkg to package object which will * be populated by parser. The object could be empty, or already partially * filled (by other XML parsers) package object. * If the pointer is set to NULL, current package will be skiped. * Note: For the primary.xml file pkgId, name and arch are NULL! * @param pkg Package that will be populated. * @param pkgId pkgId (hash) of the new package (in case of filelists.xml * or other.xml) or package type ("rpm" in case * of primary.xml). * @param name Name of the new package. * @param arch Arch of the new package. * @param cbdata User data. * @param err GError ** * @return CR_CB_RET_OK (0) or CR_CB_RET_ERR (1) - stops the parsing */ typedef int (*cr_XmlParserNewPkgCb)(cr_Package **pkg, const char *pkgId, const char *name, const char *arch, void *cbdata, GError **err); /** Callback for XML parser wich is called when a package element is parsed. * @param pkg Currently parsed package. * @param cbdata User data. * @param err GError ** * @return CR_CB_RET_OK (0) or CR_CB_RET_ERR (1) - stops the parsing */ typedef int (*cr_XmlParserPkgCb)(cr_Package *pkg, void *cbdata, GError **err); /** Callback for XML parser warnings. All reported warnings are non-fatal, * and ignored by default. But if callback return CR_CB_RET_ERR instead of * CR_CB_RET_OK then parsing is immediately interrupted. * @param type Type of warning * @param msg Warning msg. The message is destroyed after the call. * If you want touse the message later, you have to copy it. * @param cbdata User data. * @param err GError ** * @return CR_CB_RET_OK (0) or CR_CB_RET_ERR (1) - stops the parsing */ typedef int (*cr_XmlParserWarningCb)(cr_XmlParserWarningType type, char *msg, void *cbdata, GError **err); /** Parse primary.xml. File could be compressed. * @param path Path to filelists.xml * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param do_files 0 - Ignore file tags in primary.xml. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_primary(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int do_files, GError **err); /** Parse string snippet of primary xml repodata. Snippet cannot contain * root xml element . It contains only elemetns. * @param xml_string String containg primary xml data * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param do_files 0 - Ignore file tags in primary.xml. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_primary_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int do_files, GError **err); /** Parse filelists.xml. File could be compressed. * @param path Path to filelists.xml * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_filelists(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** Parse string snippet of filelists xml repodata. Snippet cannot contain * root xml element . It contains only elemetns. * @param xml_string String containg filelists xml data * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_filelists_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** Parse other.xml. File could be compressed. * @param path Path to other.xml * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_other(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** Parse string snippet of other xml repodata. Snippet cannot contain * root xml element . It contains only elemetns. * @param xml_string String containg other xml data * @param newpkgcb Callback for new package (Called when new package * xml chunk is found and package object to store * the data is needed). If NULL cr_newpkgcb is used. * @param newpkgcb_data User data for the newpkgcb. * @param pkgcb Package callback. (Called when complete package * xml chunk is parsed.). Could be NULL if newpkgcb is * not NULL. * @param pkgcb_data User data for the pkgcb. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_other_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** Parse repomd.xml. File could be compressed. * @param path Path to repomd.xml * @param repomd cr_Repomd object. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_repomd(const char *path, cr_Repomd *repomd, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** Parse updateinfo.xml. File could be compressed. * @param path Path to updateinfo.xml * @param updateinfo cr_UpdateInfo object. * @param warningcb Callback for warning messages. * @param warningcb_data User data for the warningcb. * @param err GError ** * @return cr_Error code. */ int cr_xml_parse_updateinfo(const char *path, cr_UpdateInfo *updateinfo, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err); /** @} */ #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_PARSER_H__ */ createrepo_c-0.17.0/src/xml_parser_filelists.c000066400000000000000000000313151400672373200214260ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "xml_parser_internal.h" #include "xml_parser.h" #include "error.h" #include "package.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ERR_CODE_XML CRE_BADXMLFILELISTS typedef enum { STATE_START, STATE_FILELISTS, STATE_PACKAGE, STATE_VERSION, STATE_FILE, NUMSTATES, } cr_FilState; /* NOTE: Same states in the first column must be together!!! * Performance tip: More frequent elements should be listed * first in its group (eg: element "package" (STATE_PACKAGE) * has a "file" element listed first, because it is more frequent * than a "version" element). */ static cr_StatesSwitch stateswitches[] = { { STATE_START, "filelists", STATE_FILELISTS, 0 }, { STATE_FILELISTS, "package", STATE_PACKAGE, 0 }, { STATE_PACKAGE, "file", STATE_FILE, 1 }, { STATE_PACKAGE, "version", STATE_VERSION, 0 }, { NUMSTATES, NULL, NUMSTATES, 0 }, }; static void XMLCALL cr_start_handler(void *pdata, const xmlChar *element, const xmlChar **attr) { GError *tmp_err = NULL; cr_ParserData *pd = pdata; cr_StatesSwitch *sw; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // We are inside of unknown element pd->depth++; return; } pd->depth++; if (!pd->swtab[pd->state]) { // Current element should not have any sub elements return; } if (!pd->pkg && pd->state != STATE_FILELISTS && pd->state != STATE_START) return; // Do not parse current package tag and its content // Find current state by its name for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++) if (!strcmp((char *) element, sw->ename)) break; if (sw->from != pd->state) { // No state for current element (unknown element) cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG, "Unknown element \"%s\"", element); return; } // Update parser data pd->state = sw->to; pd->docontent = sw->docontent; pd->statedepth = pd->depth; pd->lcontent = 0; pd->content[0] = '\0'; const char *val; switch(pd->state) { case STATE_START: break; case STATE_FILELISTS: pd->main_tag_found = TRUE; break; case STATE_PACKAGE: { const char *pkgId = cr_find_attr("pkgid", attr); const char *name = cr_find_attr("name", attr); const char *arch = cr_find_attr("arch", attr); if (!pkgId) { // Package without a pkgid attr is error g_set_error(&pd->err, ERR_DOMAIN, ERR_CODE_XML, "Package pkgid attributte is missing!"); break; } if (!name) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"name\" of a package element"); if (!arch) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"arch\" of a package element"); // Get package object to store current package or NULL if // current XML package element should be skipped/ignored. if (pd->newpkgcb(&pd->pkg, pkgId, name, arch, pd->newpkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); break; } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } if (pd->pkg) { if (!pd->pkg->pkgId) pd->pkg->pkgId = g_string_chunk_insert(pd->pkg->chunk, pkgId); if (!pd->pkg->name && name) pd->pkg->name = g_string_chunk_insert(pd->pkg->chunk, name); if (!pd->pkg->arch && arch) pd->pkg->arch = g_string_chunk_insert(pd->pkg->chunk, arch); } break; } case STATE_VERSION: assert(pd->pkg); // Version string insert only if them don't already exists if (!pd->pkg->epoch) pd->pkg->epoch = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("epoch", attr)); if (!pd->pkg->version) pd->pkg->version = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("ver", attr)); if (!pd->pkg->release) pd->pkg->release = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("rel", attr)); break; case STATE_FILE: assert(pd->pkg); val = cr_find_attr("type", attr); pd->last_file_type = FILE_FILE; if (val) { if (!strcmp(val, "dir")) pd->last_file_type = FILE_DIR; else if (!strcmp(val, "ghost")) pd->last_file_type = FILE_GHOST; else cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNVAL, "Unknown file type \"%s\"", val); } break; default: break; } } static void XMLCALL cr_end_handler(void *pdata, G_GNUC_UNUSED const xmlChar *element) { cr_ParserData *pd = pdata; GError *tmp_err = NULL; unsigned int state = pd->state; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // Back from the unknown state pd->depth--; return; } pd->depth--; pd->statedepth--; pd->state = pd->sbtab[pd->state]; pd->docontent = 0; switch (state) { case STATE_START: case STATE_FILELISTS: case STATE_VERSION: break; case STATE_PACKAGE: if (!pd->pkg) return; // Reverse list of files pd->pkg->files = g_slist_reverse(pd->pkg->files); if (pd->pkgcb && pd->pkgcb(pd->pkg, pd->pkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } pd->pkg = NULL; break; case STATE_FILE: { assert(pd->pkg); if (!pd->content) break; cr_PackageFile *pkg_file = cr_package_file_new(); pkg_file->name = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_get_filename(pd->content)); pd->content[pd->lcontent - strlen(pkg_file->name)] = '\0'; pkg_file->path = cr_safe_string_chunk_insert_const(pd->pkg->chunk, pd->content); switch (pd->last_file_type) { case FILE_FILE: pkg_file->type = NULL; break; // NULL => "file" case FILE_DIR: pkg_file->type = "dir"; break; case FILE_GHOST: pkg_file->type = "ghost"; break; default: assert(0); // Should not happend } pd->pkg->files = g_slist_prepend(pd->pkg->files, pkg_file); break; } default: break; } } int cr_xml_parse_filelists_internal(const char *target, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int (*parser_func)(xmlParserCtxtPtr, cr_ParserData *, const char *, GError**), GError **err) { int ret = CRE_OK; cr_ParserData *pd; GError *tmp_err = NULL; assert(target); assert(newpkgcb || pkgcb); assert(!err || *err == NULL); if (!newpkgcb) // Use default newpkgcb newpkgcb = cr_newpkgcb; // Init xmlSAXHandler sax; memset(&sax, 0, sizeof(sax)); sax.startElement = cr_start_handler; sax.endElement = cr_end_handler; sax.characters = cr_char_handler; pd = cr_xml_parser_data(NUMSTATES); xmlParserCtxtPtr parser; parser = xmlCreatePushParserCtxt(&sax, pd, NULL, 0, NULL); pd->parser = parser; pd->state = STATE_START; pd->newpkgcb_data = newpkgcb_data; pd->newpkgcb = newpkgcb; pd->pkgcb_data = pkgcb_data; pd->pkgcb = pkgcb; pd->warningcb = warningcb; pd->warningcb_data = warningcb_data; for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) { if (!pd->swtab[sw->from]) pd->swtab[sw->from] = sw; pd->sbtab[sw->to] = sw->from; } // Parsing ret = parser_func(parser, pd, target, &tmp_err); if (tmp_err) g_propagate_error(err, tmp_err); // Warning if file was probably a different type than expected if (!pd->main_tag_found && ret == CRE_OK) cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE, "The target doesn't contain the expected element " "\"\" - The target probably isn't " "a valid filelists xml"); // Clean up if (ret != CRE_OK && newpkgcb == cr_newpkgcb) { // Prevent memory leak when the parsing is interrupted by an error. // If a new package object was created by the cr_newpkgcb then // is obvious that there is no other reference to the package // except of the parser reference in pd->pkg. // If a caller supplied its own newpkgcb, then the freeing // of the currently parsed package is the caller responsibility. cr_package_free(pd->pkg); } cr_xml_parser_data_free(pd); xmlFreeParserCtxt(parser); return ret; } int cr_xml_parse_filelists(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { return cr_xml_parse_filelists_internal(path, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, &cr_xml_parser_generic, err); } int cr_xml_parse_filelists_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { char* wrapped_xml_string = g_strconcat("", xml_string, "", NULL); int ret = cr_xml_parse_filelists_internal(wrapped_xml_string, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, &cr_xml_parser_generic_from_string, err); free(wrapped_xml_string); return ret; } createrepo_c-0.17.0/src/xml_parser_internal.h000066400000000000000000000170571400672373200212600ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_XML_PARSER_INTERNAL_H__ #define __C_CREATEREPOLIB_XML_PARSER_INTERNAL_H__ #ifdef __cplusplus extern "C" { #endif #include #include #include "xml_parser.h" #include "error.h" #include "package.h" #include "repomd.h" #include "updateinfo.h" #define XML_BUFFER_SIZE 8192 #define CONTENT_REALLOC_STEP 256 /* Some notes about XML parsing (primary, filelists, other) * ======================================================== * - Error during parsing is indicated via cr_ParserData->err member. * - User specified callback have to be sanitized! User callbacks * are allowed return CR_CB_RET_ERR and do not set the GError. * So if the CR_CB_RET_ERR is returned and GError not setted, caller * of the callback has to set the GError by himself. */ /** File types in filelists.xml */ typedef enum { FILE_FILE, FILE_DIR, FILE_GHOST, FILE_SENTINEL, } cr_FileType; /** Structure used for elements in the state switches in XML parsers */ typedef struct { unsigned int from; /*!< State (current tag) */ char *ename; /*!< String name of sub-tag */ unsigned int to; /*!< State of sub-tag */ int docontent; /*!< Read text content of element? */ } cr_StatesSwitch; /** Parser data */ typedef struct _cr_ParserData { int depth; /*!< Current depth in a XML tree */ int statedepth; /*!< Depth of the last known state (element) */ unsigned int state; /*!< current state */ GError *err; /*!< Error message */ /* Tag content related values */ int docontent; /*!< Store text content of the current element? */ char *content; /*!< Text content of the element */ int lcontent; /*!< The content lenght */ int acontent; /*!< Available bytes in the content */ xmlParserCtxtPtr parser; /*!< The parser */ cr_StatesSwitch **swtab; /*!< Pointers to statesswitches table */ unsigned int *sbtab; /*!< stab[to_state] = from_state */ /* Common stuf */ gboolean main_tag_found; /*!< Was the main tag present? E.g.: For primary.xml For filelists.xml For other.xml For repomd.xml For updateinfo.xml If the main tag is missing the most likely the input file was a different then expected type */ /* Package stuff */ void *newpkgcb_data; /*!< User data for the newpkgcb. */ cr_XmlParserNewPkgCb newpkgcb; /*!< Callback called to get (create new or use existing from a previous parsing of other or primary xml file) pkg object for the currently loaded pkg. */ void *pkgcb_data; /*!< User data for the pkgcb. */ cr_XmlParserPkgCb pkgcb; /*!< Callback called when a signel pkg data are completly parsed. */ void *warningcb_data; /*!< User data fot he warningcb. */ cr_XmlParserWarningCb warningcb; /*!< Warning callback */ cr_Package *pkg; /*!< The package which is currently loaded. */ /* Primary related stuff */ int do_files; /*!< If == 0 then parser will ignore files elements in the primary.xml. This is useful when you are inteding parse primary.xml as well as filelists.xml. In this case files will be filled from filelists.xml. If you are inteding parse only the primary.xml then it coud be useful to parse files in primary. If you parse files from both a primary.xml and a filelists.xml then some files in package object will be duplicated! */ /* Filelists + Primary related stuff */ cr_FileType last_file_type; /*!< Type of file in a currently parsed element */ /* Other related stuff */ cr_ChangelogEntry *changelog; /*!< Changelog entry object for currently parsed element (entry) */ /* Repomd related stuff */ cr_Repomd *repomd; /*!< Repomd object */ cr_RepomdRecord *repomdrecord; /*!< Repomd record object for a currently parsed element */ char *cpeid; /*!< cpeid value for the currently parsed distro tag */ /* Updateinfo related stuff */ cr_UpdateInfo *updateinfo; /*!< Update info object */ cr_UpdateRecord *updaterecord; /*!< Update record object */ cr_UpdateCollection *updatecollection; /*!< Update collection object */ cr_UpdateCollectionModule *updatecollectionmodule; /*!< Update collection module object */ cr_UpdateCollectionPackage *updatecollectionpackage; /*!< Update collection package object */ } cr_ParserData; /** Malloc and initialize common part of XML parser data. */ cr_ParserData *cr_xml_parser_data(); /** Frees XML parser data. */ void cr_xml_parser_data_free(cr_ParserData *pd); /** Find attribute in list of attributes. * @param name Attribute name. * @param attr List of attributes of the tag * @return Value or NULL */ static inline const char * cr_find_attr(const char *name, const xmlChar **attr) { while (attr && *attr) { if (!strcmp(name, (char *) *attr)) return (const char *) attr[1]; attr += 2; } return NULL; } /** XML character handler */ void cr_char_handler(void *pdata, const xmlChar *s, int len); /** Wrapper for user warning cb. * It checks if warningcb is defined, if defined, it build warning msg from * va_args, calls warningcb and propagate (set) error if necessary. */ int cr_xml_parser_warning(cr_ParserData *pd, cr_XmlParserWarningType type, const char *msg, ...); /** strtoll with ability to call warning cb if error during conversion. */ gint64 cr_xml_parser_strtoll(cr_ParserData *pd, const char *nptr, unsigned int base); /** Default callback for the new package. */ int cr_newpkgcb(cr_Package **pkg, const char *pkgId, const char *name, const char *arch, void *cbdata, GError **err); /** Generic parser. */ int cr_xml_parser_generic(xmlParserCtxtPtr parser, cr_ParserData *pd, const char *path, GError **err); int cr_xml_parser_generic_from_string(xmlParserCtxtPtr parser, cr_ParserData *pd, const char *xml_string, GError **err); #ifdef __cplusplus } #endif #endif /* __C_CREATEREPOLIB_XML_PARSER_INTERNAL_H__ */ createrepo_c-0.17.0/src/xml_parser_other.c000066400000000000000000000305771400672373200205620ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "xml_parser_internal.h" #include "xml_parser.h" #include "error.h" #include "package.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ERR_CODE_XML CRE_BADXMLOTHER typedef enum { STATE_START, STATE_OTHERDATA, STATE_PACKAGE, STATE_VERSION, STATE_CHANGELOG, NUMSTATES, } cr_OthState; /* NOTE: Same states in the first column must be together!!! * Performance tip: More frequent elements should be listed * first in its group (eg: element "package" (STATE_PACKAGE) * has a "file" element listed first, because it is more frequent * than a "version" element). */ static cr_StatesSwitch stateswitches[] = { { STATE_START, "otherdata", STATE_OTHERDATA, 0 }, { STATE_OTHERDATA, "package", STATE_PACKAGE, 0 }, { STATE_PACKAGE, "changelog", STATE_CHANGELOG, 1 }, { STATE_PACKAGE, "version", STATE_VERSION, 0 }, { NUMSTATES, NULL, NUMSTATES, 0 }, }; static void XMLCALL cr_start_handler(void *pdata, const xmlChar *element, const xmlChar **attr) { GError *tmp_err = NULL; cr_ParserData *pd = pdata; cr_StatesSwitch *sw; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // We are inside of unknown element pd->depth++; return; } pd->depth++; if (!pd->swtab[pd->state]) { // Current element should not have any sub elements return; } if (!pd->pkg && pd->state != STATE_OTHERDATA && pd->state != STATE_START) return; // Do not parse current package tag and its content // Find current state by its name for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++) if (!strcmp((char *) element, sw->ename)) break; if (sw->from != pd->state) { // No state for current element (unknown element) cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG, "Unknown element \"%s\"", element); return; } // Update parser data pd->state = sw->to; pd->docontent = sw->docontent; pd->statedepth = pd->depth; pd->lcontent = 0; pd->content[0] = '\0'; const char *val; switch(pd->state) { case STATE_START: break; case STATE_OTHERDATA: pd->main_tag_found = TRUE; break; case STATE_PACKAGE: { const char *pkgId = cr_find_attr("pkgid", attr); const char *name = cr_find_attr("name", attr); const char *arch = cr_find_attr("arch", attr); if (!pkgId) { // Package without a pkgid attr is error g_set_error(&pd->err, ERR_DOMAIN, ERR_CODE_XML, "Package pkgid attributte is missing!"); break; } if (!name) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"name\" of a package element"); if (!arch) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"arch\" of a package element"); // Get package object to store current package or NULL if // current XML package element should be skipped/ignored. if (pd->newpkgcb(&pd->pkg, pkgId, name, arch, pd->newpkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); break; } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } if (pd->pkg) { if (!pd->pkg->pkgId) pd->pkg->pkgId = g_string_chunk_insert(pd->pkg->chunk, pkgId); if (!pd->pkg->name && name) pd->pkg->name = g_string_chunk_insert(pd->pkg->chunk, name); if (!pd->pkg->arch && arch) pd->pkg->arch = g_string_chunk_insert(pd->pkg->chunk, arch); } break; } case STATE_VERSION: assert(pd->pkg); // Version string insert only if them don't already exists if (!pd->pkg->epoch) pd->pkg->epoch = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("epoch", attr)); if (!pd->pkg->version) pd->pkg->version = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("ver", attr)); if (!pd->pkg->release) pd->pkg->release = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("rel", attr)); break; case STATE_CHANGELOG: { assert(pd->pkg); assert(!pd->changelog); cr_ChangelogEntry *changelog = cr_changelog_entry_new(); val = cr_find_attr("author", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"author\" of a package element"); else changelog->author = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("date", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"date\" of a package element"); else changelog->date = cr_xml_parser_strtoll(pd, val, 10); pd->pkg->changelogs = g_slist_prepend(pd->pkg->changelogs, changelog); pd->changelog = changelog; break; } default: break; } } static void XMLCALL cr_end_handler(void *pdata, G_GNUC_UNUSED const xmlChar *element) { cr_ParserData *pd = pdata; GError *tmp_err = NULL; unsigned int state = pd->state; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // Back from the unknown state pd->depth--; return; } pd->depth--; pd->statedepth--; pd->state = pd->sbtab[pd->state]; pd->docontent = 0; switch (state) { case STATE_START: case STATE_OTHERDATA: case STATE_VERSION: break; case STATE_PACKAGE: if (!pd->pkg) return; // Reverse list of changelogs pd->pkg->changelogs = g_slist_reverse(pd->pkg->changelogs); if (pd->pkgcb && pd->pkgcb(pd->pkg, pd->pkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } pd->pkg = NULL; break; case STATE_CHANGELOG: { assert(pd->pkg); assert(pd->changelog); if (!pd->content) break; pd->changelog->changelog = g_string_chunk_insert(pd->pkg->chunk, pd->content); pd->changelog = NULL; break; } default: break; } } int cr_xml_parse_other_internal(const char *target, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int (*parser_func)(xmlParserCtxtPtr, cr_ParserData *, const char *, GError**), GError **err) { int ret = CRE_OK; cr_ParserData *pd; GError *tmp_err = NULL; assert(target); assert(newpkgcb || pkgcb); assert(!err || *err == NULL); if (!newpkgcb) // Use default newpkgcb newpkgcb = cr_newpkgcb; // Init xmlSAXHandler sax; memset(&sax, 0, sizeof(sax)); sax.startElement = cr_start_handler; sax.endElement = cr_end_handler; sax.characters = cr_char_handler; pd = cr_xml_parser_data(NUMSTATES); xmlParserCtxtPtr parser; parser = xmlCreatePushParserCtxt(&sax, pd, NULL, 0, NULL); pd->parser = parser; pd->state = STATE_START; pd->newpkgcb_data = newpkgcb_data; pd->newpkgcb = newpkgcb; pd->pkgcb_data = pkgcb_data; pd->pkgcb = pkgcb; pd->warningcb = warningcb; pd->warningcb_data = warningcb_data; for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) { if (!pd->swtab[sw->from]) pd->swtab[sw->from] = sw; pd->sbtab[sw->to] = sw->from; } // Parsing ret = parser_func(parser, pd, target, &tmp_err); if (tmp_err) g_propagate_error(err, tmp_err); // Warning if file was probably a different type than expected if (!pd->main_tag_found && ret == CRE_OK) cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE, "The target doesn't contain the expected element " "\"\" - The target probably isn't " "a valid other xml"); // Clean up if (ret != CRE_OK && newpkgcb == cr_newpkgcb) { // Prevent memory leak when the parsing is interrupted by an error. // If a new package object was created by the cr_newpkgcb then // is obvious that there is no other reference to the package // except of the parser reference in pd->pkg. // If a caller supplied its own newpkgcb, then the freeing // of the currently parsed package is the caller responsibility. cr_package_free(pd->pkg); } cr_xml_parser_data_free(pd); xmlFreeParserCtxt(parser); return ret; } int cr_xml_parse_other(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { return cr_xml_parse_other_internal(path, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, &cr_xml_parser_generic, err); } int cr_xml_parse_other_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { char* wrapped_xml_string = g_strconcat("", xml_string, "", NULL); int ret = cr_xml_parse_other_internal(wrapped_xml_string, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, &cr_xml_parser_generic_from_string, err); free(wrapped_xml_string); return ret; } createrepo_c-0.17.0/src/xml_parser_primary.c000066400000000000000000000645001400672373200211150ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "xml_parser_internal.h" #include "xml_parser.h" #include "error.h" #include "package.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ERR_CODE_XML CRE_BADXMLPRIMARY typedef enum { STATE_START, STATE_METADATA, STATE_PACKAGE, STATE_NAME, STATE_ARCH, STATE_VERSION, STATE_CHECKSUM, STATE_SUMMARY, STATE_DESCRIPTION, STATE_PACKAGER, STATE_URL, STATE_TIME, STATE_SIZE, STATE_LOCATION, STATE_FORMAT, STATE_RPM_LICENSE, STATE_RPM_VENDOR, STATE_RPM_GROUP, STATE_RPM_BUILDHOST, STATE_RPM_SOURCERPM, STATE_RPM_HEADER_RANGE, STATE_RPM_PROVIDES, STATE_RPM_ENTRY_PROVIDES, STATE_RPM_REQUIRES, STATE_RPM_ENTRY_REQUIRES, STATE_RPM_CONFLICTS, STATE_RPM_ENTRY_CONFLICTS, STATE_RPM_OBSOLETES, STATE_RPM_ENTRY_OBSOLETES, STATE_RPM_SUGGESTS, STATE_RPM_ENTRY_SUGGESTS, STATE_RPM_ENHANCES, STATE_RPM_ENTRY_ENHANCES, STATE_RPM_RECOMMENDS, STATE_RPM_ENTRY_RECOMMENDS, STATE_RPM_SUPPLEMENTS, STATE_RPM_ENTRY_SUPPLEMENTS, STATE_FILE, NUMSTATES, } cr_PriState; /* NOTE: Same states in the first column must be together!!! * Performance tip: More frequent elements should be listed * first in its group (eg: element "package" (STATE_PACKAGE) * has a "file" element listed first, because it is more frequent * than other elements). */ static cr_StatesSwitch stateswitches[] = { { STATE_START, "metadata", STATE_METADATA, 0 }, { STATE_METADATA, "package", STATE_PACKAGE, 0 }, { STATE_PACKAGE, "name", STATE_NAME, 1 }, { STATE_PACKAGE, "arch", STATE_ARCH, 1 }, { STATE_PACKAGE, "version", STATE_VERSION, 0 }, { STATE_PACKAGE, "checksum", STATE_CHECKSUM, 1 }, { STATE_PACKAGE, "summary", STATE_SUMMARY, 1 }, { STATE_PACKAGE, "description", STATE_DESCRIPTION, 1 }, { STATE_PACKAGE, "packager", STATE_PACKAGER, 1 }, { STATE_PACKAGE, "url", STATE_URL, 1 }, { STATE_PACKAGE, "time", STATE_TIME, 0 }, { STATE_PACKAGE, "size", STATE_SIZE, 0 }, { STATE_PACKAGE, "location", STATE_LOCATION, 0 }, { STATE_PACKAGE, "format", STATE_FORMAT, 0 }, { STATE_FORMAT, "file", STATE_FILE, 1 }, { STATE_FORMAT, "rpm:license", STATE_RPM_LICENSE, 1 }, { STATE_FORMAT, "rpm:vendor", STATE_RPM_VENDOR, 1 }, { STATE_FORMAT, "rpm:group", STATE_RPM_GROUP, 1 }, { STATE_FORMAT, "rpm:buildhost", STATE_RPM_BUILDHOST, 1 }, { STATE_FORMAT, "rpm:sourcerpm", STATE_RPM_SOURCERPM, 1 }, { STATE_FORMAT, "rpm:header-range", STATE_RPM_HEADER_RANGE, 0 }, { STATE_FORMAT, "rpm:provides", STATE_RPM_PROVIDES, 0 }, { STATE_FORMAT, "rpm:requires", STATE_RPM_REQUIRES, 0 }, { STATE_FORMAT, "rpm:conflicts", STATE_RPM_CONFLICTS, 0 }, { STATE_FORMAT, "rpm:obsoletes", STATE_RPM_OBSOLETES, 0 }, { STATE_FORMAT, "rpm:suggests", STATE_RPM_SUGGESTS, 0 }, { STATE_FORMAT, "rpm:enhances", STATE_RPM_ENHANCES, 0 }, { STATE_FORMAT, "rpm:recommends", STATE_RPM_RECOMMENDS, 0 }, { STATE_FORMAT, "rpm:supplements", STATE_RPM_SUPPLEMENTS, 0 }, { STATE_RPM_PROVIDES, "rpm:entry", STATE_RPM_ENTRY_PROVIDES, 0 }, { STATE_RPM_REQUIRES, "rpm:entry", STATE_RPM_ENTRY_REQUIRES, 0 }, { STATE_RPM_CONFLICTS, "rpm:entry", STATE_RPM_ENTRY_CONFLICTS, 0 }, { STATE_RPM_OBSOLETES, "rpm:entry", STATE_RPM_ENTRY_OBSOLETES, 0 }, { STATE_RPM_SUGGESTS, "rpm:entry", STATE_RPM_ENTRY_SUGGESTS, 0 }, { STATE_RPM_ENHANCES, "rpm:entry", STATE_RPM_ENTRY_ENHANCES, 0 }, { STATE_RPM_RECOMMENDS, "rpm:entry", STATE_RPM_ENTRY_RECOMMENDS, 0 }, { STATE_RPM_SUPPLEMENTS,"rpm:entry", STATE_RPM_ENTRY_SUPPLEMENTS, 0 }, { NUMSTATES, NULL, NUMSTATES, 0 }, }; static void XMLCALL cr_start_handler(void *pdata, const xmlChar *element, const xmlChar **attr) { GError *tmp_err = NULL; cr_ParserData *pd = pdata; cr_StatesSwitch *sw; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // We are inside of unknown element pd->depth++; return; } pd->depth++; if (!pd->swtab[pd->state]) { // Current element should not have any sub elements return; } if (!pd->pkg && pd->state != STATE_METADATA && pd->state != STATE_START) return; // Do not parse current package tag and its content // Find current state by its name for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++) if (!strcmp((char *) element, sw->ename)) break; if (sw->from != pd->state) { // No state for current element (unknown element) cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG, "Unknown element \"%s\"", element); return; } // Update parser data pd->state = sw->to; pd->docontent = sw->docontent; pd->statedepth = pd->depth; pd->lcontent = 0; pd->content[0] = '\0'; const char *val; switch(pd->state) { case STATE_START: break; case STATE_METADATA: pd->main_tag_found = TRUE; break; case STATE_PACKAGE: assert(!pd->pkg); val = cr_find_attr("type", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of a package element"); // Get package object to store current package or NULL if // current XML package element should be skipped/ignored. if (pd->newpkgcb(&pd->pkg, val, NULL, NULL, pd->newpkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); break; } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } break; case STATE_NAME: case STATE_ARCH: break; case STATE_VERSION: assert(pd->pkg); // Version strings insert only if them don't already exists // They could be already filled by filelists or other parser. if (!pd->pkg->epoch) pd->pkg->epoch = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("epoch", attr)); if (!pd->pkg->version) pd->pkg->version = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("ver", attr)); if (!pd->pkg->release) pd->pkg->release = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_find_attr("rel", attr)); break; case STATE_CHECKSUM: assert(pd->pkg); val = cr_find_attr("type", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of a checksum element"); else pd->pkg->checksum_type = g_string_chunk_insert(pd->pkg->chunk, val); break; case STATE_SUMMARY: case STATE_DESCRIPTION: case STATE_PACKAGER: case STATE_URL: break; case STATE_TIME: assert(pd->pkg); val = cr_find_attr("file", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"file\" of a time element"); else pd->pkg->time_file = cr_xml_parser_strtoll(pd, val, 10); val = cr_find_attr("build", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"build\" of a time element"); else pd->pkg->time_build = cr_xml_parser_strtoll(pd, val, 10); break; case STATE_SIZE: assert(pd->pkg); val = cr_find_attr("package", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"package\" of a size element"); else pd->pkg->size_package = cr_xml_parser_strtoll(pd, val, 10); val = cr_find_attr("installed", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"installed\" of a size element"); else pd->pkg->size_installed = cr_xml_parser_strtoll(pd, val, 10); val = cr_find_attr("archive", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"archive\" of a size element"); else pd->pkg->size_archive = cr_xml_parser_strtoll(pd, val, 10); break; case STATE_LOCATION: assert(pd->pkg); val = cr_find_attr("href", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"href\" of a location element"); else pd->pkg->location_href = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("xml:base", attr); if (val) pd->pkg->location_base = g_string_chunk_insert(pd->pkg->chunk, val); break; case STATE_FORMAT: case STATE_RPM_LICENSE: case STATE_RPM_VENDOR: case STATE_RPM_GROUP: case STATE_RPM_BUILDHOST: case STATE_RPM_SOURCERPM: break; case STATE_RPM_HEADER_RANGE: assert(pd->pkg); val = cr_find_attr("start", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"start\" of a header-range element"); else pd->pkg->rpm_header_start = cr_xml_parser_strtoll(pd, val, 10); val = cr_find_attr("end", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"end\" of a time element"); else pd->pkg->rpm_header_end = cr_xml_parser_strtoll(pd, val, 10); break; case STATE_RPM_PROVIDES: case STATE_RPM_REQUIRES: case STATE_RPM_CONFLICTS: case STATE_RPM_OBSOLETES: case STATE_RPM_SUGGESTS: case STATE_RPM_ENHANCES: case STATE_RPM_RECOMMENDS: case STATE_RPM_SUPPLEMENTS: break; case STATE_RPM_ENTRY_PROVIDES: case STATE_RPM_ENTRY_REQUIRES: case STATE_RPM_ENTRY_CONFLICTS: case STATE_RPM_ENTRY_OBSOLETES: case STATE_RPM_ENTRY_SUGGESTS: case STATE_RPM_ENTRY_ENHANCES: case STATE_RPM_ENTRY_RECOMMENDS: case STATE_RPM_ENTRY_SUPPLEMENTS: { assert(pd->pkg); cr_Dependency *dep = cr_dependency_new(); val = cr_find_attr("name", attr); if (!val) cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"name\" of an entry element"); else dep->name = g_string_chunk_insert(pd->pkg->chunk, val); // Rest of attrs is optional val = cr_find_attr("flags", attr); if (val) dep->flags = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("epoch", attr); if (val) dep->epoch = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("ver", attr); if (val) dep->version = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("rel", attr); if (val) dep->release = g_string_chunk_insert(pd->pkg->chunk, val); val = cr_find_attr("pre", attr); if (val) { if (!strcmp(val, "0") || !strcmp(val, "FALSE") || !strcmp(val, "false") || !strcmp(val, "False")) dep->pre = FALSE; else dep->pre = TRUE; } switch (pd->state) { case STATE_RPM_ENTRY_PROVIDES: pd->pkg->provides = g_slist_prepend(pd->pkg->provides, dep); break; case STATE_RPM_ENTRY_REQUIRES: pd->pkg->requires = g_slist_prepend(pd->pkg->requires, dep); break; case STATE_RPM_ENTRY_CONFLICTS: pd->pkg->conflicts = g_slist_prepend(pd->pkg->conflicts, dep); break; case STATE_RPM_ENTRY_OBSOLETES: pd->pkg->obsoletes = g_slist_prepend(pd->pkg->obsoletes, dep); break; case STATE_RPM_ENTRY_SUGGESTS: pd->pkg->suggests = g_slist_prepend(pd->pkg->suggests, dep); break; case STATE_RPM_ENTRY_ENHANCES: pd->pkg->enhances = g_slist_prepend(pd->pkg->enhances, dep); break; case STATE_RPM_ENTRY_RECOMMENDS: pd->pkg->recommends = g_slist_prepend(pd->pkg->recommends, dep); break; case STATE_RPM_ENTRY_SUPPLEMENTS: pd->pkg->supplements = g_slist_prepend(pd->pkg->supplements, dep); break; default: assert(0); } break; } case STATE_FILE: assert(pd->pkg); if (!pd->do_files) break; val = cr_find_attr("type", attr); pd->last_file_type = FILE_FILE; if (val) { if (!strcmp(val, "dir")) pd->last_file_type = FILE_DIR; else if (!strcmp(val, "ghost")) pd->last_file_type = FILE_GHOST; else cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNVAL, "Unknown file type \"%s\"", val); } break; default: break; } } static void XMLCALL cr_end_handler(void *pdata, G_GNUC_UNUSED const xmlChar *element) { cr_ParserData *pd = pdata; GError *tmp_err = NULL; unsigned int state = pd->state; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // Back from the unknown state pd->depth--; return; } pd->depth--; pd->statedepth--; pd->state = pd->sbtab[pd->state]; pd->docontent = 0; switch (state) { case STATE_START: case STATE_METADATA: break; case STATE_PACKAGE: if (!pd->pkg) return; if (!pd->pkg->pkgId) { // Package without a pkgid attr is error g_set_error(&pd->err, ERR_DOMAIN, ERR_CODE_XML, "Package without pkgid (checksum)!"); break; } if (pd->pkg->pkgId[0] == '\0') { // Package without a pkgid attr is error g_set_error(&pd->err, ERR_DOMAIN, ERR_CODE_XML, "Package with empty pkgid (checksum)!"); break; } if (pd->do_files) // Reverse order of files pd->pkg->files = g_slist_reverse(pd->pkg->files); if (pd->pkgcb && pd->pkgcb(pd->pkg, pd->pkgcb_data, &tmp_err)) { if (tmp_err) g_propagate_prefixed_error(&pd->err, tmp_err, "Parsing interrupted: "); else g_set_error(&pd->err, ERR_DOMAIN, CRE_CBINTERRUPTED, "Parsing interrupted"); } else { // If callback return CRE_OK but it simultaneously set // the tmp_err then it's a programming error. assert(tmp_err == NULL); } pd->pkg = NULL; break; case STATE_NAME: assert(pd->pkg); if (!pd->pkg->name) // name could be already filled by filelists or other xml parser pd->pkg->name = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_ARCH: assert(pd->pkg); if (!pd->pkg->arch) // arch could be already filled by filelists or other xml parser pd->pkg->arch = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_CHECKSUM: assert(pd->pkg); if (!pd->pkg->pkgId) // pkgId could be already filled by filelists or other xml parser pd->pkg->pkgId = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_SUMMARY: assert(pd->pkg); pd->pkg->summary = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_DESCRIPTION: assert(pd->pkg); pd->pkg->description = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_PACKAGER: assert(pd->pkg); pd->pkg->rpm_packager = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_URL: assert(pd->pkg); pd->pkg->url = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_LICENSE: assert(pd->pkg); pd->pkg->rpm_license = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_VENDOR: assert(pd->pkg); pd->pkg->rpm_vendor = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_GROUP: assert(pd->pkg); pd->pkg->rpm_group = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_BUILDHOST: assert(pd->pkg); pd->pkg->rpm_buildhost = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_SOURCERPM: assert(pd->pkg); pd->pkg->rpm_sourcerpm = cr_safe_string_chunk_insert_null(pd->pkg->chunk, pd->content); break; case STATE_RPM_PROVIDES: pd->pkg->provides = g_slist_reverse(pd->pkg->provides); break; case STATE_RPM_REQUIRES: pd->pkg->requires = g_slist_reverse(pd->pkg->requires); break; case STATE_RPM_CONFLICTS: pd->pkg->conflicts = g_slist_reverse(pd->pkg->conflicts); break; case STATE_RPM_OBSOLETES: pd->pkg->obsoletes = g_slist_reverse(pd->pkg->obsoletes); break; case STATE_RPM_SUGGESTS: pd->pkg->suggests = g_slist_reverse(pd->pkg->suggests); break; case STATE_RPM_ENHANCES: pd->pkg->enhances = g_slist_reverse(pd->pkg->enhances); break; case STATE_RPM_RECOMMENDS: pd->pkg->recommends = g_slist_reverse(pd->pkg->recommends); break; case STATE_RPM_SUPPLEMENTS: pd->pkg->supplements = g_slist_reverse(pd->pkg->supplements); break; case STATE_FILE: { assert(pd->pkg); if (!pd->do_files) break; if (!pd->content) break; cr_PackageFile *pkg_file = cr_package_file_new(); pkg_file->name = cr_safe_string_chunk_insert(pd->pkg->chunk, cr_get_filename(pd->content)); pd->content[pd->lcontent - strlen(pkg_file->name)] = '\0'; pkg_file->path = cr_safe_string_chunk_insert_const(pd->pkg->chunk, pd->content); switch (pd->last_file_type) { case FILE_FILE: pkg_file->type = NULL; break; // NULL => "file" case FILE_DIR: pkg_file->type = "dir"; break; case FILE_GHOST: pkg_file->type = "ghost"; break; default: assert(0); // Should not happend } pd->pkg->files = g_slist_prepend(pd->pkg->files, pkg_file); break; } default: break; } } int cr_xml_parse_primary_internal(const char *target, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int do_files, int (*parser_func)(xmlParserCtxtPtr, cr_ParserData *, const char *, GError**), GError **err) { int ret = CRE_OK; cr_ParserData *pd; GError *tmp_err = NULL; assert(target); assert(newpkgcb || pkgcb); assert(!err || *err == NULL); if (!newpkgcb) // Use default newpkgcb newpkgcb = cr_newpkgcb; // Init xmlSAXHandler sax; memset(&sax, 0, sizeof(sax)); sax.startElement = cr_start_handler; sax.endElement = cr_end_handler; sax.characters = cr_char_handler; pd = cr_xml_parser_data(NUMSTATES); xmlParserCtxtPtr parser; parser = xmlCreatePushParserCtxt(&sax, pd, NULL, 0, NULL); pd->parser = parser; pd->state = STATE_START; pd->newpkgcb_data = newpkgcb_data; pd->newpkgcb = newpkgcb; pd->pkgcb_data = pkgcb_data; pd->pkgcb = pkgcb; pd->do_files = do_files; pd->warningcb = warningcb; pd->warningcb_data = warningcb_data; for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) { if (!pd->swtab[sw->from]) pd->swtab[sw->from] = sw; pd->sbtab[sw->to] = sw->from; } // Parsing ret = parser_func(parser, pd, target, &tmp_err); if (tmp_err) g_propagate_error(err, tmp_err); // Warning if file was probably a different type than expected if (!pd->main_tag_found && ret == CRE_OK) cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE, "The target doesn't contain the expected element " "\"\" - The target probably isn't " "a valid primary xml"); // Clean up if (ret != CRE_OK && newpkgcb == cr_newpkgcb) { // Prevent memory leak when the parsing is interrupted by an error. // If a new package object was created by the cr_newpkgcb then // is obvious that there is no other reference to the package // except of the parser reference in pd->pkg. // If a caller supplied its own newpkgcb, then the freeing // of the currently parsed package is the caller responsibility. cr_package_free(pd->pkg); } cr_xml_parser_data_free(pd); xmlFreeParserCtxt(parser); return ret; } int cr_xml_parse_primary(const char *path, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int do_files, GError **err) { return cr_xml_parse_primary_internal(path, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, do_files, &cr_xml_parser_generic, err); } int cr_xml_parse_primary_snippet(const char *xml_string, cr_XmlParserNewPkgCb newpkgcb, void *newpkgcb_data, cr_XmlParserPkgCb pkgcb, void *pkgcb_data, cr_XmlParserWarningCb warningcb, void *warningcb_data, int do_files, GError **err) { char* wrapped_xml_string = g_strconcat("", xml_string, "", NULL); int ret = cr_xml_parse_primary_internal(wrapped_xml_string, newpkgcb, newpkgcb_data, pkgcb, pkgcb_data, warningcb, warningcb_data, do_files, &cr_xml_parser_generic_from_string, err); free(wrapped_xml_string); return ret; } createrepo_c-0.17.0/src/xml_parser_repomd.c000066400000000000000000000337011400672373200207170ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "xml_parser_internal.h" #include "xml_parser.h" #include "error.h" #include "package.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ERR_CODE_XML CRE_BADXMLREPOMD typedef enum { STATE_START, STATE_REPOMD, STATE_REVISION, STATE_REPOID, STATE_CONTENTHASH, STATE_TAGS, STATE_REPO, STATE_CONTENT, STATE_DISTRO, STATE_DATA, STATE_LOCATION, STATE_CHECKSUM, STATE_OPENCHECKSUM, STATE_HEADERCHECKSUM, STATE_TIMESTAMP, STATE_SIZE, STATE_OPENSIZE, STATE_HEADERSIZE, STATE_DBVERSION, NUMSTATES } cr_RepomdState; /* NOTE: Same states in the first column must be together!!! * Performance tip: More frequent elements should be listed * first in its group (eg: element "package" (STATE_PACKAGE) * has a "file" element listed first, because it is more frequent * than a "version" element). */ static cr_StatesSwitch stateswitches[] = { { STATE_START, "repomd", STATE_REPOMD, 0 }, { STATE_REPOMD, "revision", STATE_REVISION, 1 }, { STATE_REPOMD, "repoid", STATE_REPOID, 1 }, { STATE_REPOMD, "contenthash", STATE_CONTENTHASH, 1 }, { STATE_REPOMD, "tags", STATE_TAGS, 0 }, { STATE_REPOMD, "data", STATE_DATA, 0 }, { STATE_TAGS, "repo", STATE_REPO, 1 }, { STATE_TAGS, "content", STATE_CONTENT, 1 }, { STATE_TAGS, "distro", STATE_DISTRO, 1 }, { STATE_DATA, "location", STATE_LOCATION, 0 }, { STATE_DATA, "checksum", STATE_CHECKSUM, 1 }, { STATE_DATA, "open-checksum", STATE_OPENCHECKSUM, 1 }, { STATE_DATA, "header-checksum", STATE_HEADERCHECKSUM, 1 }, { STATE_DATA, "timestamp", STATE_TIMESTAMP, 1 }, { STATE_DATA, "size", STATE_SIZE, 1 }, { STATE_DATA, "open-size", STATE_OPENSIZE, 1 }, { STATE_DATA, "header-size", STATE_HEADERSIZE, 1 }, { STATE_DATA, "database_version", STATE_DBVERSION, 1 }, { NUMSTATES, NULL, NUMSTATES, 0 } }; static void cr_start_handler(void *pdata, const xmlChar *element, const xmlChar **attr) { cr_ParserData *pd = pdata; cr_StatesSwitch *sw; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // We are inside of unknown element pd->depth++; return; } pd->depth++; if (!pd->swtab[pd->state]) { // Current element should not have any sub elements return; } // Find current state by its name for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++) if (!strcmp((char *) element, sw->ename)) break; if (sw->from != pd->state) { // No state for current element (unknown element) cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG, "Unknown element \"%s\"", element); return; } // Update parser data pd->state = sw->to; pd->docontent = sw->docontent; pd->statedepth = pd->depth; pd->lcontent = 0; pd->content[0] = '\0'; const char *val; switch(pd->state) { case STATE_START: break; case STATE_REPOMD: pd->main_tag_found = TRUE; break; case STATE_REVISION: case STATE_TAGS: case STATE_REPO: case STATE_CONTENT: break; case STATE_REPOID: assert(pd->repomd); assert(!pd->repomdrecord); val = cr_find_attr("type", attr); if (val) pd->repomd->repoid_type = g_string_chunk_insert(pd->repomd->chunk, val); break; case STATE_CONTENTHASH: assert(pd->repomd); assert(!pd->repomdrecord); val = cr_find_attr("type", attr); if (val) pd->repomd->contenthash_type = g_string_chunk_insert( pd->repomd->chunk, val); break; case STATE_DISTRO: assert(pd->repomd); assert(!pd->repomdrecord); val = cr_find_attr("cpeid", attr); if (val) pd->cpeid = g_strdup(val); break; case STATE_DATA: assert(pd->repomd); assert(!pd->repomdrecord); val = cr_find_attr("type", attr); if (!val) { cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of a data element"); val = "unknown"; } pd->repomdrecord = cr_repomd_record_new(val, NULL); cr_repomd_set_record(pd->repomd, pd->repomdrecord); break; case STATE_LOCATION: assert(pd->repomd); assert(pd->repomdrecord); val = cr_find_attr("href", attr); if (val) pd->repomdrecord->location_href = g_string_chunk_insert( pd->repomdrecord->chunk, val); else cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"href\" of a location element"); val = cr_find_attr("xml:base", attr); if (val) pd->repomdrecord->location_base = g_string_chunk_insert( pd->repomdrecord->chunk, val); break; case STATE_CHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); val = cr_find_attr("type", attr); if (!val) { cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of a checksum element"); break; } pd->repomdrecord->checksum_type = g_string_chunk_insert( pd->repomdrecord->chunk, val); break; case STATE_OPENCHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); val = cr_find_attr("type", attr); if (!val) { cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of an open checksum element"); break; } pd->repomdrecord->checksum_open_type = g_string_chunk_insert( pd->repomdrecord->chunk, val); break; case STATE_HEADERCHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); val = cr_find_attr("type", attr); if (!val) { cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGATTR, "Missing attribute \"type\" of a header checksum element"); break; } pd->repomdrecord->checksum_header_type = g_string_chunk_insert( pd->repomdrecord->chunk, val); break; case STATE_TIMESTAMP: case STATE_SIZE: case STATE_OPENSIZE: case STATE_HEADERSIZE: case STATE_DBVERSION: default: break; } } static void cr_end_handler(void *pdata, G_GNUC_UNUSED const xmlChar *element) { cr_ParserData *pd = pdata; unsigned int state = pd->state; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // Back from the unknown state pd->depth--; return; } pd->depth--; pd->statedepth--; pd->state = pd->sbtab[pd->state]; pd->docontent = 0; switch (state) { case STATE_START: case STATE_REPOMD: break; case STATE_REVISION: assert(pd->repomd); assert(!pd->repomdrecord); if (pd->lcontent == 0) { cr_xml_parser_warning(pd, CR_XML_WARNING_MISSINGVAL, "Missing value of a revision element"); break; } cr_repomd_set_revision(pd->repomd, pd->content); break; case STATE_REPOID: assert(pd->repomd); assert(!pd->repomdrecord); pd->repomd->repoid = g_string_chunk_insert(pd->repomd->chunk, pd->content); break; case STATE_CONTENTHASH: assert(pd->repomd); assert(!pd->repomdrecord); pd->repomd->contenthash = g_string_chunk_insert(pd->repomd->chunk, pd->content); break; case STATE_TAGS: break; case STATE_REPO: assert(pd->repomd); assert(!pd->repomdrecord); cr_repomd_add_repo_tag(pd->repomd, pd->content); break; case STATE_CONTENT: assert(pd->repomd); assert(!pd->repomdrecord); cr_repomd_add_content_tag(pd->repomd, pd->content); break; case STATE_DISTRO: assert(pd->repomd); assert(!pd->repomdrecord); cr_repomd_add_distro_tag(pd->repomd, pd->cpeid, pd->content); if (pd->cpeid) { g_free(pd->cpeid); pd->cpeid = NULL; } break; case STATE_DATA: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord = NULL; break; case STATE_LOCATION: break; case STATE_CHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->checksum = cr_safe_string_chunk_insert( pd->repomdrecord->chunk, pd->content); break; case STATE_OPENCHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->checksum_open = cr_safe_string_chunk_insert( pd->repomdrecord->chunk, pd->content); break; case STATE_HEADERCHECKSUM: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->checksum_header = cr_safe_string_chunk_insert( pd->repomdrecord->chunk, pd->content); break; case STATE_TIMESTAMP: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->timestamp = cr_xml_parser_strtoll(pd, pd->content, 0); break; case STATE_SIZE: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->size = cr_xml_parser_strtoll(pd, pd->content, 0); break; case STATE_OPENSIZE: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->size_open = cr_xml_parser_strtoll(pd, pd->content, 0); break; case STATE_HEADERSIZE: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->size_header = cr_xml_parser_strtoll(pd, pd->content, 0); break; case STATE_DBVERSION: assert(pd->repomd); assert(pd->repomdrecord); pd->repomdrecord->db_ver = (int) cr_xml_parser_strtoll(pd, pd->content, 0); break; default: break; } } int cr_xml_parse_repomd(const char *path, cr_Repomd *repomd, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { int ret = CRE_OK; cr_ParserData *pd; GError *tmp_err = NULL; assert(path); assert(repomd); assert(!err || *err == NULL); // Init xmlSAXHandler sax; memset(&sax, 0, sizeof(sax)); sax.startElement = cr_start_handler; sax.endElement = cr_end_handler; sax.characters = cr_char_handler; pd = cr_xml_parser_data(NUMSTATES); xmlParserCtxtPtr parser; parser = xmlCreatePushParserCtxt(&sax, pd, NULL, 0, NULL); pd->parser = parser; pd->state = STATE_START; pd->repomd = repomd; pd->warningcb = warningcb; pd->warningcb_data = warningcb_data; for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) { if (!pd->swtab[sw->from]) pd->swtab[sw->from] = sw; pd->sbtab[sw->to] = sw->from; } // Parsing ret = cr_xml_parser_generic(parser, pd, path, &tmp_err); if (tmp_err) g_propagate_error(err, tmp_err); // Warning if file was probably a different type than expected if (!pd->main_tag_found && ret == CRE_OK) cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE, "The file don't contain the expected element " "\"\" - The file probably isn't " "a valid repomd.xml"); // Clean up cr_xml_parser_data_free(pd); xmlFreeParserCtxt(parser); return ret; } createrepo_c-0.17.0/src/xml_parser_updateinfo.c000066400000000000000000000466621400672373200216010ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2014 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "xml_parser_internal.h" #include "xml_parser.h" #include "updateinfo.h" #include "error.h" #include "misc.h" #define ERR_DOMAIN CREATEREPO_C_ERROR #define ERR_CODE_XML CRE_BADXMLUPDATEINFO typedef enum { STATE_START, STATE_UPDATES, STATE_UPDATE, // ----------------------------- STATE_ID, STATE_TITLE, STATE_ISSUED, STATE_UPDATED, STATE_RIGHTS, STATE_RELEASE, STATE_PUSHCOUNT, STATE_SEVERITY, STATE_SUMMARY, STATE_DESCRIPTION, STATE_SOLUTION, STATE_MESSAGE, // Not implemented STATE_REFERENCES, // ------------------------- STATE_REFERENCE, STATE_PKGLIST, // ---------------------------- STATE_COLLECTION, STATE_NAME, STATE_MODULE, STATE_PACKAGE, STATE_FILENAME, STATE_SUM, STATE_UPDATERECORD_REBOOTSUGGESTED, STATE_REBOOTSUGGESTED, STATE_RESTARTSUGGESTED, STATE_RELOGINSUGGESTED, NUMSTATES, } cr_UpdateinfoState; /* NOTE: Same states in the first column must be together!!! * Performance tip: More frequent elements should be listed * first in its group (eg: element "package" (STATE_PACKAGE) * has a "file" element listed first, because it is more frequent * than a "version" element). */ static cr_StatesSwitch stateswitches[] = { { STATE_START, "updates", STATE_UPDATES, 0 }, { STATE_UPDATES, "update", STATE_UPDATE, 0 }, { STATE_UPDATE, "id", STATE_ID, 1 }, { STATE_UPDATE, "title", STATE_TITLE, 1 }, { STATE_UPDATE, "issued", STATE_ISSUED, 0 }, { STATE_UPDATE, "updated", STATE_UPDATED, 0 }, { STATE_UPDATE, "rights", STATE_RIGHTS, 1 }, { STATE_UPDATE, "release", STATE_RELEASE, 1 }, { STATE_UPDATE, "pushcount", STATE_PUSHCOUNT, 1 }, { STATE_UPDATE, "severity", STATE_SEVERITY, 1 }, { STATE_UPDATE, "summary", STATE_SUMMARY, 1 }, { STATE_UPDATE, "description", STATE_DESCRIPTION, 1 }, { STATE_UPDATE, "solution", STATE_SOLUTION, 1 }, { STATE_UPDATE, "message", STATE_MESSAGE, 1 }, // NI { STATE_UPDATE, "references", STATE_REFERENCES, 0 }, { STATE_UPDATE, "pkglist", STATE_PKGLIST, 0 }, { STATE_UPDATE, "reboot_suggested", STATE_UPDATERECORD_REBOOTSUGGESTED,0 }, { STATE_REFERENCES, "reference", STATE_REFERENCE, 0 }, { STATE_PKGLIST, "collection", STATE_COLLECTION, 0 }, { STATE_COLLECTION, "package", STATE_PACKAGE, 0 }, { STATE_COLLECTION, "name", STATE_NAME, 1 }, { STATE_COLLECTION, "module", STATE_MODULE, 0 }, { STATE_PACKAGE, "filename", STATE_FILENAME, 1 }, { STATE_PACKAGE, "sum", STATE_SUM, 1 }, { STATE_PACKAGE, "reboot_suggested", STATE_REBOOTSUGGESTED, 0 }, { STATE_PACKAGE, "restart_suggested", STATE_RESTARTSUGGESTED, 0 }, { STATE_PACKAGE, "relogin_suggested", STATE_RELOGINSUGGESTED, 0 }, { NUMSTATES, NULL, NUMSTATES, 0 } }; static void cr_start_handler(void *pdata, const xmlChar *element, const xmlChar **attr) { cr_ParserData *pd = pdata; cr_StatesSwitch *sw; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // We are inside of unknown element pd->depth++; return; } pd->depth++; if (!pd->swtab[pd->state]) { // Current element should not have any sub elements return; } // Find current state by its name for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++) if (!strcmp((char *) element, sw->ename)) break; if (sw->from != pd->state) { // No state for current element (unknown element) cr_xml_parser_warning(pd, CR_XML_WARNING_UNKNOWNTAG, "Unknown element \"%s\"", element); return; } // Update parser data pd->state = sw->to; pd->docontent = sw->docontent; pd->statedepth = pd->depth; pd->lcontent = 0; pd->content[0] = '\0'; const char *val; // Shortcuts cr_UpdateRecord *rec = pd->updaterecord; cr_UpdateCollection *collection = pd->updatecollection; cr_UpdateCollectionPackage *package = pd->updatecollectionpackage; switch(pd->state) { case STATE_START: break; case STATE_UPDATES: pd->main_tag_found = TRUE; break; case STATE_ID: case STATE_TITLE: case STATE_RIGHTS: case STATE_RELEASE: case STATE_PUSHCOUNT: case STATE_SEVERITY: case STATE_SUMMARY: case STATE_DESCRIPTION: case STATE_SOLUTION: case STATE_NAME: case STATE_FILENAME: case STATE_REFERENCES: case STATE_PKGLIST: default: // All states which don't have attributes and no action is // required for them should be skipped break; case STATE_UPDATE: assert(pd->updateinfo); assert(!pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); rec = cr_updaterecord_new(); cr_updateinfo_apped_record(pd->updateinfo, rec); pd->updaterecord = rec; val = cr_find_attr("from", attr); if (val) rec->from = g_string_chunk_insert(rec->chunk, val); val = cr_find_attr("status", attr); if (val) rec->status = g_string_chunk_insert(rec->chunk, val); val = cr_find_attr("type", attr); if (val) rec->type = g_string_chunk_insert(rec->chunk, val); val = cr_find_attr("version", attr); if (val) rec->version = g_string_chunk_insert(rec->chunk, val); break; case STATE_ISSUED: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); val = cr_find_attr("date", attr); if (val) rec->issued_date = g_string_chunk_insert(rec->chunk, val); break; case STATE_UPDATED: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); val = cr_find_attr("date", attr); if (val) rec->updated_date = g_string_chunk_insert(rec->chunk, val); break; case STATE_REFERENCE: { cr_UpdateReference *ref; assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); ref = cr_updatereference_new(); cr_updaterecord_append_reference(rec, ref); val = cr_find_attr("id", attr); if (val) ref->id = g_string_chunk_insert(ref->chunk, val); val = cr_find_attr("href", attr); if (val) ref->href = g_string_chunk_insert(ref->chunk, val); val = cr_find_attr("type", attr); if (val) ref->type = g_string_chunk_insert(ref->chunk, val); val = cr_find_attr("title", attr); if (val) ref->title = g_string_chunk_insert(ref->chunk, val); break; } case STATE_COLLECTION: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); collection = cr_updatecollection_new(); cr_updaterecord_append_collection(rec, collection); pd->updatecollection = collection; val = cr_find_attr("short", attr); if (val) collection->shortname = g_string_chunk_insert(collection->chunk, val); break; case STATE_MODULE: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(!pd->updatecollectionmodule); assert(!pd->updatecollectionpackage); cr_UpdateCollectionModule *module = cr_updatecollectionmodule_new(); assert(module); if (module) collection->module = module; val = cr_find_attr("name", attr); if (val) module->name = g_string_chunk_insert(module->chunk, val); val = cr_find_attr("stream", attr); if (val) module->stream = g_string_chunk_insert(module->chunk, val); val = cr_find_attr("version", attr); if (val){ gchar *endptr; errno = 0; module->version = strtoull(val, &endptr, 10); if ((errno == ERANGE && (module->version == ULLONG_MAX)) || (errno != 0 && module->version == 0)) { perror("strtoull error when parsing module version"); module->version = 0; } if (endptr == val) module->version = 0; } val = cr_find_attr("context", attr); if (val) module->context = g_string_chunk_insert(module->chunk, val); val = cr_find_attr("arch", attr); if (val) module->arch = g_string_chunk_insert(module->chunk, val); break; case STATE_PACKAGE: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(!pd->updatecollectionpackage); package = cr_updatecollectionpackage_new(); assert(package); cr_updatecollection_append_package(collection, package); pd->updatecollectionpackage = package; val = cr_find_attr("name", attr); if (val) package->name = g_string_chunk_insert(package->chunk, val); val = cr_find_attr("version", attr); if (val) package->version = g_string_chunk_insert(package->chunk, val); val = cr_find_attr("release", attr); if (val) package->release = g_string_chunk_insert(package->chunk, val); val = cr_find_attr("epoch", attr); if (val) package->epoch = g_string_chunk_insert(package->chunk, val); val = cr_find_attr("arch", attr); if (val) package->arch = g_string_chunk_insert(package->chunk, val); val = cr_find_attr("src", attr); if (val) package->src = g_string_chunk_insert(package->chunk, val); break; case STATE_SUM: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); val = cr_find_attr("type", attr); if (val) package->sum_type = cr_checksum_type(val); break; case STATE_UPDATERECORD_REBOOTSUGGESTED: assert(pd->updateinfo); assert(pd->updaterecord); rec->reboot_suggested = TRUE; break; case STATE_REBOOTSUGGESTED: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); package->reboot_suggested = TRUE; break; case STATE_RESTARTSUGGESTED: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); package->restart_suggested = TRUE; break; case STATE_RELOGINSUGGESTED: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); package->relogin_suggested = TRUE; break; } } static void cr_end_handler(void *pdata, G_GNUC_UNUSED const xmlChar *element) { cr_ParserData *pd = pdata; unsigned int state = pd->state; if (pd->err) return; // There was an error -> do nothing if (pd->depth != pd->statedepth) { // Back from the unknown state pd->depth--; return; } pd->depth--; pd->statedepth--; pd->state = pd->sbtab[pd->state]; pd->docontent = 0; // Shortcuts char *content = pd->content; cr_UpdateRecord *rec = pd->updaterecord; cr_UpdateCollection *col = pd->updatecollection; cr_UpdateCollectionPackage *package = pd->updatecollectionpackage; switch (state) { case STATE_START: case STATE_UPDATES: case STATE_ISSUED: case STATE_UPDATED: case STATE_REFERENCES: case STATE_REFERENCE: case STATE_MODULE: case STATE_PKGLIST: case STATE_REBOOTSUGGESTED: case STATE_RESTARTSUGGESTED: case STATE_RELOGINSUGGESTED: case STATE_UPDATERECORD_REBOOTSUGGESTED: // All elements with no text data and without need of any // post processing should go here break; case STATE_ID: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->id = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_TITLE: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->title = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_RIGHTS: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->rights = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_RELEASE: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->release = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_PUSHCOUNT: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->pushcount = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_SEVERITY: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->severity = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_SUMMARY: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->summary = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_DESCRIPTION: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->description = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_SOLUTION: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); rec->solution = cr_safe_string_chunk_insert_null(rec->chunk, content); break; case STATE_NAME: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(!pd->updatecollectionpackage); col->name = cr_safe_string_chunk_insert_null(col->chunk, content); break; case STATE_FILENAME: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); package->filename = cr_safe_string_chunk_insert_null(package->chunk, content); break; case STATE_SUM: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); package->sum = cr_safe_string_chunk_insert_null(package->chunk, content); break; case STATE_PACKAGE: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(pd->updatecollectionpackage); pd->updatecollectionpackage = NULL; break; case STATE_COLLECTION: assert(pd->updateinfo); assert(pd->updaterecord); assert(pd->updatecollection); assert(!pd->updatecollectionpackage); pd->updatecollection = NULL; break; case STATE_UPDATE: assert(pd->updateinfo); assert(pd->updaterecord); assert(!pd->updatecollection); assert(!pd->updatecollectionpackage); pd->updaterecord = NULL; break; default: break; } } int cr_xml_parse_updateinfo(const char *path, cr_UpdateInfo *updateinfo, cr_XmlParserWarningCb warningcb, void *warningcb_data, GError **err) { int ret = CRE_OK; cr_ParserData *pd; GError *tmp_err = NULL; assert(path); assert(updateinfo); assert(!err || *err == NULL); // Init xmlSAXHandler sax; memset(&sax, 0, sizeof(sax)); sax.startElement = cr_start_handler; sax.endElement = cr_end_handler; sax.characters = cr_char_handler; pd = cr_xml_parser_data(NUMSTATES); xmlParserCtxtPtr parser; parser = xmlCreatePushParserCtxt(&sax, pd, NULL, 0, NULL); pd->parser = parser; pd->state = STATE_START; pd->updateinfo = updateinfo; pd->warningcb = warningcb; pd->warningcb_data = warningcb_data; for (cr_StatesSwitch *sw = stateswitches; sw->from != NUMSTATES; sw++) { if (!pd->swtab[sw->from]) pd->swtab[sw->from] = sw; pd->sbtab[sw->to] = sw->from; } // Parsing ret = cr_xml_parser_generic(parser, pd, path, &tmp_err); if (tmp_err) g_propagate_error(err, tmp_err); // Warning if file was probably a different type than expected if (!pd->main_tag_found && ret == CRE_OK) cr_xml_parser_warning(pd, CR_XML_WARNING_BADMDTYPE, "The file don't contain the expected element " "\"\" - The file probably isn't " "a valid updates.xml"); // Clean up cr_xml_parser_data_free(pd); xmlFreeParserCtxt(parser); return ret; } createrepo_c-0.17.0/tests/000077500000000000000000000000001400672373200154005ustar00rootroot00000000000000createrepo_c-0.17.0/tests/CMakeLists.txt000066400000000000000000000051141400672373200201410ustar00rootroot00000000000000ADD_EXECUTABLE(test_checksum test_checksum.c) TARGET_LINK_LIBRARIES(test_checksum libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_checksum) ADD_EXECUTABLE(test_compression_wrapper test_compression_wrapper.c) TARGET_LINK_LIBRARIES(test_compression_wrapper libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_compression_wrapper) ADD_EXECUTABLE(test_load_metadata test_load_metadata.c) TARGET_LINK_LIBRARIES(test_load_metadata libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_load_metadata) ADD_EXECUTABLE(test_locate_metadata test_locate_metadata.c) TARGET_LINK_LIBRARIES(test_locate_metadata libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_locate_metadata) ADD_EXECUTABLE(test_misc test_misc.c) TARGET_LINK_LIBRARIES(test_misc libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_misc) ADD_EXECUTABLE(test_sqlite test_sqlite.c) TARGET_LINK_LIBRARIES(test_sqlite libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_sqlite) ADD_EXECUTABLE(test_xml_file test_xml_file.c) TARGET_LINK_LIBRARIES(test_xml_file libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_file) ADD_EXECUTABLE(test_xml_parser_filelists test_xml_parser_filelists.c) TARGET_LINK_LIBRARIES(test_xml_parser_filelists libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_parser_filelists) ADD_EXECUTABLE(test_xml_parser_repomd test_xml_parser_repomd.c) TARGET_LINK_LIBRARIES(test_xml_parser_repomd libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_parser_repomd) ADD_EXECUTABLE(test_xml_parser_updateinfo test_xml_parser_updateinfo.c) TARGET_LINK_LIBRARIES(test_xml_parser_updateinfo libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_parser_updateinfo) ADD_EXECUTABLE(test_xml_dump test_xml_dump.c) TARGET_LINK_LIBRARIES(test_xml_dump libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_dump) ADD_EXECUTABLE(test_xml_dump_primary test_xml_dump_primary.c) TARGET_LINK_LIBRARIES(test_xml_dump_primary libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_xml_dump_primary) ADD_EXECUTABLE(test_koji test_koji.c) TARGET_LINK_LIBRARIES(test_koji libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_koji) ADD_EXECUTABLE(test_modifyrepo_shared test_modifyrepo_shared.c) TARGET_LINK_LIBRARIES(test_modifyrepo_shared libcreaterepo_c ${GLIB2_LIBRARIES}) ADD_DEPENDENCIES(tests test_modifyrepo_shared) CONFIGURE_FILE("run_gtester.sh.in" "${CMAKE_BINARY_DIR}/tests/run_gtester.sh") ADD_TEST(test_main run_gtester.sh) IF (ENABLE_PYTHON) ADD_SUBDIRECTORY(python) ENDIF (ENABLE_PYTHON) createrepo_c-0.17.0/tests/createrepo000077700000000000000000000000001400672373200204532../src/ustar00rootroot00000000000000createrepo_c-0.17.0/tests/fixtures.h000066400000000000000000000237611400672373200174330ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #ifndef __C_CREATEREPOLIB_TEST_FIXTURES_H__ #define __C_CREATEREPOLIB_TEST_FIXTURES_H__ #define TMPDIR_TEMPLATE "/tmp/cr_testXXXXXX" #define TEST_DATA_PATH "testdata/" #define TEST_COMPRESSED_FILES_PATH TEST_DATA_PATH"compressed_files/" #define TEST_MODIFIED_REPO_FILES_PATH TEST_DATA_PATH"modified_repo_files/" #define TEST_PACKAGES_PATH TEST_DATA_PATH"packages/" #define TEST_REPO_00 TEST_DATA_PATH"repo_00/" #define TEST_REPO_01 TEST_DATA_PATH"repo_01/" #define TEST_REPO_02 TEST_DATA_PATH"repo_02/" #define TEST_REPO_03 TEST_DATA_PATH"repo_03/" #define TEST_REPO_KOJI_01 TEST_DATA_PATH"repo_koji_01/" #define TEST_REPO_KOJI_02 TEST_DATA_PATH"repo_koji_02/" #define TEST_FILES_PATH TEST_DATA_PATH"test_files/" #define TEST_UPDATEINFO_FILES_PATH TEST_DATA_PATH"updateinfo_files/" #define TEST_REPO_WITH_ADDITIONAL_METADATA TEST_DATA_PATH"repo_with_additional_metadata/" #define TEST_REPODATA_SNIPPETS TEST_DATA_PATH"repodata_snippets/" // Repo files #define TEST_REPO_00_REPOMD TEST_REPO_00"repodata/repomd.xml" #define TEST_REPO_00_PRIMARY TEST_REPO_00"repodata/1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz" #define TEST_REPO_00_FILELISTS TEST_REPO_00"repodata/95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz" #define TEST_REPO_00_OTHER TEST_REPO_00"repodata/ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz" #define TEST_REPO_01_REPOMD TEST_REPO_01"repodata/repomd.xml" #define TEST_REPO_01_PRIMARY TEST_REPO_01"repodata/6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz" #define TEST_REPO_01_FILELISTS TEST_REPO_01"repodata/c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c-filelists.xml.gz" #define TEST_REPO_01_OTHER TEST_REPO_01"repodata/b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz" #define TEST_REPO_02_REPOMD TEST_REPO_02"repodata/repomd.xml" #define TEST_REPO_02_PRIMARY TEST_REPO_02"repodata/bcde64b04916a2a72fdc257d61bc922c70b3d58e953499180585f7a360ce86cf-primary.xml.gz" #define TEST_REPO_02_FILELISTS TEST_REPO_02"repodata/3b7e6ecd01af9cb674aff6458186911d7081bb5676d5562a21a963afc8a8bcc7-filelists.xml.gz" #define TEST_REPO_02_OTHER TEST_REPO_02"repodata/ab5d3edeea50f9b4ec5ee13e4d25c147e318e3a433dbabc94d3461f58ac28255-other.xml.gz" // REPO_03 is a copy of REPO_01 with some module metadata #define TEST_REPO_03_REPOMD TEST_REPO_03"repodata/repomd.xml" #define TEST_REPO_03_PRIMARY TEST_REPO_03"repodata/1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz" #define TEST_REPO_03_FILELISTS TEST_REPO_03"repodata/95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz" #define TEST_REPO_03_OTHER TEST_REPO_03"repodata/ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz" #define TEST_REPO_03_MODULEMD TEST_REPO_03"repodata/a850093e240506c728d6ce26a6fc51d6a7fe10730c67988d13afa7dd82df82d5-modules.yaml.xz" #define TEST_REPO_WITH_ADDITIONAL_METADATA_REPOMD TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/repomd.xml" #define TEST_REPO_WITH_ADDITIONAL_METADATA_PRIMARY_XML_GZ TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/490a2a494a3827b8a356f728ac36bc02fb009b0eaea173c890e727bb54219037-primary.xml.gz" #define TEST_REPO_WITH_ADDITIONAL_METADATA_PRIMARY_SQLITE_BZ2 TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/1e12239bf5cb07ec73c74482c35e80dabe30dbe2fdd57bd9e557d987cbacc8c2-primary.sqlite.bz2" #define TEST_REPO_WITH_ADDITIONAL_METADATA_FILELISTS_XML_GZ TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/ba5a4fdbb20e7b9b70d9a9abd974bcab1065b1e81d711f80e06ad8cae30c4183-filelists.xml.gz" #define TEST_REPO_WITH_ADDITIONAL_METADATA_FILELISTS_SQLITE_BZ2 TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/4f4de7d3254a033b84626f330bc6adb8a3c1a4a20f0ddbe30a5692a041318c81-filelists.sqlite.bz2" #define TEST_REPO_WITH_ADDITIONAL_METADATA_OTHER_XML_GZ TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/fd458a424a3f3e0dadc95b806674b79055c24e73637e47ad5a6e57926aa1b9d1-other.xml.gz" #define TEST_REPO_WITH_ADDITIONAL_METADATA_OTHER_SQLITE_BZ2 TEST_REPO_WITH_ADDITIONAL_METADATA"repodata/8b13cba732c1a02b841f43d6791ca68788d45f376787d9f3ccf68e75f01af499-other.sqlite.bz2" // Modified repo files (MFR) #define TEST_MRF_BAD_TYPE_FIL TEST_MODIFIED_REPO_FILES_PATH"bad_file_type-filelists.xml" #define TEST_MRF_NO_PKGID_FIL TEST_MODIFIED_REPO_FILES_PATH"no_pkgid-filelists.xml" #define TEST_MRF_NO_PKGID_OTH TEST_MODIFIED_REPO_FILES_PATH"no_pkgid-other.xml" #define TEST_MRF_MISSING_TYPE_REPOMD TEST_MODIFIED_REPO_FILES_PATH"missing_type-repomd.xml" #define TEST_MRF_UE_PRI_00 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_00-primary.xml" #define TEST_MRF_UE_PRI_01 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_01-primary.xml" #define TEST_MRF_UE_PRI_02 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_02-primary.xml" #define TEST_MRF_UE_FIL_00 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_00-filelists.xml" #define TEST_MRF_UE_FIL_01 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_01-filelists.xml" #define TEST_MRF_UE_FIL_02 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_02-filelists.xml" #define TEST_MRF_UE_OTH_00 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_00-other.xml" #define TEST_MRF_UE_OTH_01 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_01-other.xml" #define TEST_MRF_UE_OTH_02 TEST_MODIFIED_REPO_FILES_PATH"unknown_element_02-other.xml" // Test files #define TEST_EMPTY_FILE TEST_FILES_PATH"empty_file" #define TEST_TEXT_FILE TEST_FILES_PATH"text_file" #define TEST_TEXT_FILE_SHA256SUM "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb" #define TEST_TEXT_FILE_GZ TEST_FILES_PATH"text_file.gz" #define TEST_TEXT_FILE_XZ TEST_FILES_PATH"text_file.xz" #define TEST_SQLITE_FILE TEST_FILES_PATH"sqlite_file.sqlite" #define TEST_BINARY_FILE TEST_FILES_PATH"binary_file" // Repodata snippets #define TEST_FILELISTS_SNIPPET_01 TEST_REPODATA_SNIPPETS"filelists_snippet_01.xml" #define TEST_FILELISTS_SNIPPET_02 TEST_REPODATA_SNIPPETS"filelists_snippet_02.xml" // Other #define NON_EXIST_FILE "/tmp/foobarfile.which.should.not.exists" // Updateinfo files #define TEST_UPDATEINFO_00 TEST_UPDATEINFO_FILES_PATH"updateinfo_00.xml" #define TEST_UPDATEINFO_01 TEST_UPDATEINFO_FILES_PATH"updateinfo_01.xml" #define TEST_UPDATEINFO_02 TEST_UPDATEINFO_FILES_PATH"updateinfo_02.xml.xz" #define TEST_UPDATEINFO_03 TEST_UPDATEINFO_FILES_PATH"updateinfo_03.xml" #include "createrepo/package.h" cr_Package * get_package() { cr_Package *p; cr_Dependency *dep; cr_PackageFile *file; p = cr_package_new(); p->pkgId = "123456"; p->name = "foo"; p->arch = "x86_64"; p->version = "1.2.3"; p->epoch = "1"; p->release = "2"; p->summary = "foo package"; p->description = "super cool package"; p->url = "http://package.com"; p->time_file = 123456; p->time_build = 234567; p->rpm_license = "GPL"; p->rpm_vendor = NULL; p->rpm_group = NULL; p->rpm_buildhost = NULL; p->rpm_sourcerpm = "foo.src.rpm"; p->rpm_header_start = 20; p->rpm_header_end = 120; p->rpm_packager = NULL; p->size_package = 123; p->size_installed = 20; p->size_archive = 30; p->location_href = "foo.rpm"; p->location_base = "/test/"; p->checksum_type = "sha256"; dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = NULL; dep->pre = FALSE; p->provides = (g_slist_prepend(p->provides, dep)); dep = cr_dependency_new(); dep->name = "foobar_dep"; dep->flags = NULL; dep->pre = FALSE; dep->epoch = "3"; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = "foobar_pre_dep"; dep->flags = "LE"; dep->epoch = "3"; dep->pre = TRUE; p->requires = g_slist_prepend(p->requires, dep); file = cr_package_file_new(); file->type = ""; file->path = "/bin/"; file->name = "foo"; p->files = g_slist_prepend(p->files, file); file = cr_package_file_new(); file->type = "dir"; file->path = "/var/foo/"; file->name = NULL; p->files = g_slist_prepend(p->files, file); file = cr_package_file_new(); file->type = "dir"; file->path = "/var/foo/"; file->name = "baz"; p->files = g_slist_prepend(p->files, file); return p; } cr_Package * get_empty_package() { cr_Package *p; cr_Dependency *dep; cr_PackageFile *file; p = cr_package_new(); p->name = "foo"; dep = cr_dependency_new(); dep->name = NULL; dep->flags = NULL; dep->pre = FALSE; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = NULL; dep->flags = NULL; dep->pre = TRUE; p->requires = g_slist_prepend(p->requires, dep); file = cr_package_file_new(); file->type = NULL; file->path = NULL; file->name = NULL; p->files = g_slist_prepend(p->files, file); return p; } #endif createrepo_c-0.17.0/tests/python/000077500000000000000000000000001400672373200167215ustar00rootroot00000000000000createrepo_c-0.17.0/tests/python/CMakeLists.txt000066400000000000000000000000311400672373200214530ustar00rootroot00000000000000ADD_SUBDIRECTORY (tests) createrepo_c-0.17.0/tests/python/tests/000077500000000000000000000000001400672373200200635ustar00rootroot00000000000000createrepo_c-0.17.0/tests/python/tests/CMakeLists.txt000066400000000000000000000015461400672373200226310ustar00rootroot00000000000000# Detect nosetest version suffix EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} -c "import sys; sys.stdout.write('%s.%s' % (sys.version_info[0], sys.version_info[1]))" OUTPUT_VARIABLE PYTHON_MAJOR_DOT_MINOR_VERSION) SET(NOSETEST_VERSION_SUFFIX "-${PYTHON_MAJOR_DOT_MINOR_VERSION}") message("-- nosetests program is nosetests${NOSETEST_VERSION_SUFFIX}") execute_process(COMMAND nosetests${NOSETEST_VERSION_SUFFIX} --help OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE NOSE_CHECK_RESULT) IF (NOT NOSE_CHECK_RESULT STREQUAL "0") MESSAGE("Command 'nosetests${NOSETEST_VERSION_SUFFIX}' doesn't exist! Using only 'nosetests' instead") SET(NOSETEST_VERSION_SUFFIX "") ENDIF() CONFIGURE_FILE("run_nosetests.sh.in" "${CMAKE_BINARY_DIR}/tests/python/tests/run_nosetests.sh") ADD_TEST(test_python run_nosetests.sh -s ${CMAKE_CURRENT_SOURCE_DIR}) createrepo_c-0.17.0/tests/python/tests/__init__.py000066400000000000000000000000001400672373200221620ustar00rootroot00000000000000createrepo_c-0.17.0/tests/python/tests/fixtures.py000066400000000000000000000124711400672373200223130ustar00rootroot00000000000000import os.path TEST_DATA_PATH = os.path.normpath(os.path.join(__file__, "../../../testdata")) COMPRESSED_FILES_PATH = os.path.join(TEST_DATA_PATH, "compressed_files") MODIFIED_REPO_FILES_PATH = os.path.join(TEST_DATA_PATH, "modified_repo_files") PACKAGES_PATH = os.path.join(TEST_DATA_PATH, "packages") REPOS_PATH = TEST_DATA_PATH TEST_FILES_PATH = os.path.join(TEST_DATA_PATH, "test_files") REPODATA_SNIPPETS = os.path.join(TEST_DATA_PATH, "repodata_snippets") TEST_UPDATEINFO_FILES_PATH = os.path.join(TEST_DATA_PATH, "updateinfo_files/") # Modified repo files PRIMARY_ERROR_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "error_00-primary.xml") PRIMARY_MULTI_WARN_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "multiple_warnings_00-primary.xml") FILELISTS_ERROR_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "error_00-filelists.xml") FILELISTS_MULTI_WARN_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "multiple_warnings_00-filelists.xml") OTHER_ERROR_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "error_00-other.xml") OTHER_MULTI_WARN_00_PATH = os.path.join(MODIFIED_REPO_FILES_PATH, "multiple_warnings_00-other.xml") # Packages PKG_ARCHER = "Archer-3.4.5-6.x86_64.rpm" PKG_ARCHER_PATH = os.path.join(PACKAGES_PATH, PKG_ARCHER) PKG_BALICEK_ISO88591 = "balicek-iso88591-1.1.1-1.x86_64.rpm" PKG_BALICEK_ISO88591_PATH = os.path.join(PACKAGES_PATH, PKG_BALICEK_ISO88591) PKG_BALICEK_ISO88592 = "balicek-iso88592-1.1.1-1.x86_64.rpm" PKG_BALICEK_ISO88592_PATH = os.path.join(PACKAGES_PATH, PKG_BALICEK_ISO88592) PKG_BALICEK_UTF8 = "balicek-utf8-1.1.1-1.x86_64.rpm" PKG_BALICEK_UTF8_PATH = os.path.join(PACKAGES_PATH, PKG_BALICEK_UTF8) PKG_EMPTY = "empty-0-0.x86_64.rpm" PKG_EMPTY_PATH = os.path.join(PACKAGES_PATH, PKG_EMPTY) PKG_EMPTY_SRC = "empty-0-0.x86_64.rpm" PKG_EMPTY_SRC_PATH = os.path.join(PACKAGES_PATH, PKG_EMPTY_SRC) PKG_FAKE_BASH = "fake_bash-1.1.1-1.x86_64.rpm" PKG_FAKE_BASH_PATH = os.path.join(PACKAGES_PATH, PKG_FAKE_BASH) PKG_SUPER_KERNEL = "super_kernel-6.0.1-2.x86_64.rpm" PKG_SUPER_KERNEL_PATH = os.path.join(PACKAGES_PATH, PKG_SUPER_KERNEL) # Test repositories REPO_00_PATH = os.path.join(REPOS_PATH, "repo_00") REPO_00_REPOMD = os.path.join(REPO_00_PATH, "repodata/repomd.xml") REPO_00_PRIXML = os.path.join(REPO_00_PATH, "repodata/", "1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz") REPO_00_FILXML = os.path.join(REPO_00_PATH, "repodata/", "95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz") REPO_00_OTHXML = os.path.join(REPO_00_PATH, "repodata/", "ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz") REPO_00_PRIZCK = os.path.join(REPO_00_PATH, "repodata/", "e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595-primary.xml.zck") REPO_00_FILZCK = os.path.join(REPO_00_PATH, "repodata/", "2e7db4492173b6c437fd1299dc335e63d09f24cbdadeac5175a61b787c2f7a44-filelists.xml.zck") REPO_00_OTHZCK = os.path.join(REPO_00_PATH, "repodata/", "a939c4765106655c3f7a13fb41d0f239824efa66bcd6c1e6c044a854012bda75-other.xml.zck") REPO_01_PATH = os.path.join(REPOS_PATH, "repo_01") REPO_01_REPOMD = os.path.join(REPO_01_PATH, "repodata/repomd.xml") REPO_01_PRIXML = os.path.join(REPO_01_PATH, "repodata/", "6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz") REPO_01_FILXML = os.path.join(REPO_01_PATH, "repodata/", "c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c-filelists.xml.gz") REPO_01_OTHXML = os.path.join(REPO_01_PATH, "repodata/", "b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz") REPO_02_PATH = os.path.join(REPOS_PATH, "repo_02") REPO_02_REPOMD = os.path.join(REPO_02_PATH, "repodata/repomd.xml") REPO_02_PRIXML = os.path.join(REPO_02_PATH, "repodata/", "bcde64b04916a2a72fdc257d61bc922c70b3d58e953499180585f7a360ce86cf-primary.xml.gz") REPO_02_FILXML = os.path.join(REPO_02_PATH, "repodata/", "3b7e6ecd01af9cb674aff6458186911d7081bb5676d5562a21a963afc8a8bcc7-filelists.xml.gz") REPO_02_OTHXML = os.path.join(REPO_02_PATH, "repodata/", "ab5d3edeea50f9b4ec5ee13e4d25c147e318e3a433dbabc94d3461f58ac28255-other.xml.gz") REPO_WITH_ADDITIONAL_METADATA = os.path.join(REPOS_PATH, "repo_with_additional_metadata") # Test files FILE_BINARY = "binary_file" FILE_BINARY_PATH = os.path.join(TEST_FILES_PATH, FILE_BINARY) FILE_TEXT = "text_file" FILE_TEXT = os.path.join(TEST_FILES_PATH, FILE_TEXT) FILE_TEXT_SHA256SUM = "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb" FILE_TEXT_GZ = FILE_TEXT+".gz" FILE_EMPTY = "empty_file" FILE_EMPTY = os.path.join(TEST_FILES_PATH, FILE_EMPTY) # Test snippets PRIMARY_SNIPPET_01 = os.path.join(REPODATA_SNIPPETS, "primary_snippet_01.xml") PRIMARY_SNIPPET_02 = os.path.join(REPODATA_SNIPPETS, "primary_snippet_02.xml") FILELISTS_SNIPPET_01 = os.path.join(REPODATA_SNIPPETS, "filelists_snippet_01.xml") FILELISTS_SNIPPET_02 = os.path.join(REPODATA_SNIPPETS, "filelists_snippet_02.xml") OTHER_SNIPPET_01 = os.path.join(REPODATA_SNIPPETS, "other_snippet_01.xml") OTHER_SNIPPET_02 = os.path.join(REPODATA_SNIPPETS, "other_snippet_02.xml") # Test updateinfo files TEST_UPDATEINFO_03 = os.path.join(TEST_UPDATEINFO_FILES_PATH, "updateinfo_03.xml") createrepo_c-0.17.0/tests/python/tests/run_nosetests.sh.in000077500000000000000000000003011400672373200237340ustar00rootroot00000000000000LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/src/: PYTHONPATH=${CMAKE_BINARY_DIR}/src/python/ WITH_LIBMODULEMD=${WITH_LIBMODULEMD} nosetests${NOSETEST_VERSION_SUFFIX} -s -v ${CMAKE_CURRENT_SOURCE_DIR}/ createrepo_c-0.17.0/tests/python/tests/test_checksum.py000066400000000000000000000030501400672373200232740ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseChecksum(unittest.TestCase): def test_checksum_name_str(self): self.assertEqual(cr.checksum_name_str(cr.MD5), "md5") self.assertEqual(cr.checksum_name_str(cr.SHA), "sha") self.assertEqual(cr.checksum_name_str(cr.SHA1), "sha1") self.assertEqual(cr.checksum_name_str(cr.SHA224), "sha224") self.assertEqual(cr.checksum_name_str(cr.SHA256), "sha256") self.assertEqual(cr.checksum_name_str(cr.SHA384), "sha384") self.assertEqual(cr.checksum_name_str(cr.SHA512), "sha512") self.assertEqual(cr.checksum_name_str(65), None) def test_checksum_type(self): self.assertEqual(cr.checksum_type("sha256"), cr.SHA256) self.assertEqual(cr.checksum_type("SHA256"), cr.SHA256) self.assertEqual(cr.checksum_type("Sha256"), cr.SHA256) self.assertEqual(cr.checksum_type("sHa256"), cr.SHA256) self.assertEqual(cr.checksum_type("ShA256"), cr.SHA256) self.assertEqual(cr.checksum_type("md5"), cr.MD5) self.assertEqual(cr.checksum_type("sha"), cr.SHA) self.assertEqual(cr.checksum_type("sha1"), cr.SHA1) self.assertEqual(cr.checksum_type("sha224"), cr.SHA224) self.assertEqual(cr.checksum_type("sha256"), cr.SHA256) self.assertEqual(cr.checksum_type("sha384"), cr.SHA384) self.assertEqual(cr.checksum_type("sha512"), cr.SHA512) self.assertEqual(cr.checksum_type("foobar"), cr.UNKNOWN_CHECKSUM) createrepo_c-0.17.0/tests/python/tests/test_compression_wrapper.py000066400000000000000000000062521400672373200256020ustar00rootroot00000000000000import unittest import os.path import createrepo_c as cr from .fixtures import * class TestCaseCompressionWrapper(unittest.TestCase): def test_compression_suffix(self): self.assertEqual(cr.compression_suffix(cr.AUTO_DETECT_COMPRESSION), None) self.assertEqual(cr.compression_suffix(cr.UNKNOWN_COMPRESSION), None) self.assertEqual(cr.compression_suffix(cr.NO_COMPRESSION), None) self.assertEqual(cr.compression_suffix(123), None) self.assertEqual(cr.compression_suffix(cr.GZ), ".gz") self.assertEqual(cr.compression_suffix(cr.BZ2), ".bz2") self.assertEqual(cr.compression_suffix(cr.XZ), ".xz") self.assertEqual(cr.compression_suffix(cr.ZCK), ".zck") def test_detect_compression(self): # no compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.txt") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.NO_COMPRESSION) # gz compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.txt.gz") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.GZ) # bz2 compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.txt.bz2") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.BZ2) # xz compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.txt.xz") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.XZ) # zck compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.txt.zck") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.ZCK) # Bad suffix - no compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.foo0") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.NO_COMPRESSION) # Bad suffix - gz compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.foo1") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.GZ) # Bad suffix - bz2 compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.foo2") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.BZ2) # Bad suffix - xz compression path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.foo3") comtype = cr.detect_compression(path) self.assertEqual(comtype, cr.XZ) # Disabled because magic module doesn't recognize zchunk files yet # Bad suffix - zck compression #path = os.path.join(COMPRESSED_FILES_PATH, "01_plain.foo4") #comtype = cr.detect_compression(path) #self.assertEqual(comtype, cr.ZCK) def test_compression_type(self): self.assertEqual(cr.compression_type(None), cr.UNKNOWN_COMPRESSION) self.assertEqual(cr.compression_type(""), cr.UNKNOWN_COMPRESSION) self.assertEqual(cr.compression_type("gz"), cr.GZ) self.assertEqual(cr.compression_type("bz2"), cr.BZ2) self.assertEqual(cr.compression_type("xz"), cr.XZ) self.assertEqual(cr.compression_type("XZ"), cr.XZ) self.assertEqual(cr.compression_type("zck"), cr.ZCK) createrepo_c-0.17.0/tests/python/tests/test_contentstat.py000066400000000000000000000047221400672373200240470ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseContentStat(unittest.TestCase): # TODO: # - Test rename_file() for files with checksum def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") def tearDown(self): shutil.rmtree(self.tmpdir) def test_contentstat(self): pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) pkg.time_file = 1 pkg.time_build = 1 cs = cr.ContentStat(cr.SHA256) self.assertEqual(cs.size, 0) self.assertEqual(cs.checksum_type, cr.SHA256) self.assertEqual(cs.checksum, None) path = os.path.join(self.tmpdir, "primary.xml.gz") f = cr.PrimaryXmlFile(path, cr.GZ_COMPRESSION, cs) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_pkg(pkg) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(cs.size, 2668) self.assertEqual(cs.checksum_type, cr.SHA256) self.assertEqual(cs.checksum, "67bc6282915fad80dc11f3d7c3210977a0bde"\ "05a762256d86083c2447d425776") def test_contentstat_ref_in_xmlfile(self): """Test if reference is saved properly""" pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) pkg.time_file = 1 pkg.time_build = 1 cs = cr.ContentStat(cr.SHA256) self.assertEqual(cs.size, 0) self.assertEqual(cs.checksum_type, cr.SHA256) self.assertEqual(cs.checksum, None) path = os.path.join(self.tmpdir, "primary.xml.gz") f = cr.PrimaryXmlFile(path, cr.GZ_COMPRESSION, cs) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) del cs f.add_pkg(pkg) f.close() self.assertTrue(os.path.isfile(path)) def test_contentstat_ref_in_crfile(self): """Test if reference is saved properly""" cs = cr.ContentStat(cr.SHA256) self.assertEqual(cs.size, 0) self.assertEqual(cs.checksum_type, cr.SHA256) self.assertEqual(cs.checksum, None) path = os.path.join(self.tmpdir, "foofile.gz") f = cr.CrFile(path, cr.MODE_WRITE, cr.GZ_COMPRESSION, cs) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) del cs f.write("foobar") f.close() self.assertTrue(os.path.isfile(path)) createrepo_c-0.17.0/tests/python/tests/test_crfile.py000066400000000000000000000077211400672373200227470ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseCrFile(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") def tearDown(self): shutil.rmtree(self.tmpdir) def test_crfile_basic_operations(self): f = cr.CrFile(self.tmpdir+"/foo.gz", cr.MODE_WRITE, cr.GZ_COMPRESSION, None) self.assertTrue(f) self.assertTrue(os.path.isfile(self.tmpdir+"/foo.gz")) def test_crfile_operations_on_closed_file(self): # Already closed file path = os.path.join(self.tmpdir, "primary.xml.gz") f = cr.CrFile(path, cr.MODE_WRITE, cr.GZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() self.assertRaises(cr.CreaterepoCError, f.write, "foobar") f.close() # No error should be raised del(f) # No error should be raised def test_crfile_error_cases(self): path = os.path.join(self.tmpdir, "foofile") self.assertFalse(os.path.exists(path)) # Bad open mode self.assertRaises(ValueError, cr.CrFile, path, 86, cr.GZ_COMPRESSION, None) self.assertFalse(os.path.exists(path)) # Bad compression type self.assertRaises(ValueError, cr.CrFile, path, cr.MODE_READ, 678, None) self.assertFalse(os.path.exists(path)) # Bad contentstat object self.assertRaises(TypeError, cr.XmlFile, path, cr.MODE_READ, cr.GZ_COMPRESSION, "foo") self.assertFalse(os.path.exists(path)) # Non existing path self.assertRaises(IOError, cr.CrFile, "foobar/foo/xxx/cvydmaticxuiowe") def test_crfile_no_compression(self): path = os.path.join(self.tmpdir, "foo") f = cr.CrFile(path, cr.MODE_WRITE, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.write("foobar") f.close() content = open(path).read() self.assertEqual(content, "foobar") def test_crfile_gz_compression(self): path = os.path.join(self.tmpdir, "foo.gz") f = cr.CrFile(path, cr.MODE_WRITE, cr.GZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.write("foobar") f.close() import gzip content = gzip.open(path).read().decode('utf-8') self.assertEqual(content, "foobar") def test_crfile_bz2_compression(self): path = os.path.join(self.tmpdir, "foo.bz2") f = cr.CrFile(path, cr.MODE_WRITE, cr.BZ2_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.write("foobar") f.close() import bz2 content = bz2.decompress(open(path, 'rb').read()).decode('utf-8') self.assertEqual(content, "foobar") def test_crfile_xz_compression(self): path = os.path.join(self.tmpdir, "foo.xz") f = cr.CrFile(path, cr.MODE_WRITE, cr.XZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.write("foobar") f.close() import subprocess p = subprocess.Popen(["unxz", "--stdout", path], stdout=subprocess.PIPE) content = p.stdout.read().decode('utf-8') self.assertEqual(content, "foobar") def test_crfile_zck_compression(self): if cr.HAS_ZCK == 0: return path = os.path.join(self.tmpdir, "foo.zck") f = cr.CrFile(path, cr.MODE_WRITE, cr.ZCK_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.write("foobar") f.close() import subprocess p = subprocess.Popen(["unzck", "--stdout", path], stdout=subprocess.PIPE) content = p.stdout.read().decode('utf-8') self.assertEqual(content, "foobar") createrepo_c-0.17.0/tests/python/tests/test_load_metadata.py000066400000000000000000000045611400672373200242610ustar00rootroot00000000000000import unittest import createrepo_c as cr from .fixtures import * class TestCaseLoadMetadata(unittest.TestCase): def test_load_metadata_repo00(self): md = cr.Metadata() md.locate_and_load_xml(REPO_00_PATH) self.assertTrue(md) self.assertEqual(md.key, cr.HT_KEY_DEFAULT) self.assertEqual(md.len(), 0) self.assertEqual(md.keys(), []) self.assertFalse(md.has_key("foo")) self.assertFalse(md.has_key("")) self.assertFalse(md.remove("foo")) self.assertFalse(md.get("xxx")) def test_load_metadata_repo01(self): md = cr.Metadata() md.locate_and_load_xml(REPO_01_PATH) self.assertTrue(md) self.assertEqual(md.key, cr.HT_KEY_DEFAULT) self.assertEqual(md.len(), 1) self.assertEqual(md.keys(), ['152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf']) self.assertFalse(md.has_key("foo")) self.assertFalse(md.has_key("")) self.assertFalse(md.remove("foo")) pkg = md.get('152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf') self.assertTrue(pkg) self.assertEqual(pkg.name, "super_kernel") def test_load_metadata_repo02(self): md = cr.Metadata() md.locate_and_load_xml(REPO_02_PATH) self.assertTrue(md) self.assertEqual(md.key, cr.HT_KEY_DEFAULT) self.assertEqual(md.len(), 2) self.assertEqual(md.keys(), ['6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3', '90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7']) self.assertFalse(md.has_key("foo")) self.assertFalse(md.has_key("")) self.assertFalse(md.remove("foo")) pkg = md.get('152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf') self.assertEqual(pkg, None) pkg = md.get('90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7') self.assertEqual(pkg.name, "fake_bash") def test_load_metadata_repo02_destructor(self): md = cr.Metadata(use_single_chunk=True) md.locate_and_load_xml(REPO_02_PATH) pkg = md.get('90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7') del(md) # in fact, md should not be destroyed yet, because it is # referenced from pkg! self.assertEqual(pkg.name, "fake_bash") createrepo_c-0.17.0/tests/python/tests/test_locate_metadata.py000066400000000000000000000102131400672373200246000ustar00rootroot00000000000000import unittest import createrepo_c as cr from .fixtures import * def list_has_str_ending_with(l, s): for e in l: if e.endswith(s): return True return False class TestCaseMetadataLocation(unittest.TestCase): def test_metadatalocation(self): ml = cr.MetadataLocation(REPO_00_PATH, 1) self.assertTrue(ml) self.assertTrue(ml["primary"].endswith("/repodata/1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz")) self.assertTrue(ml["filelists"].endswith("/repodata/95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz")) self.assertTrue(ml["other"].endswith("/repodata/ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz")) self.assertTrue(ml["primary_db"] is None) self.assertTrue(ml["filelists_db"] is None) self.assertTrue(ml["other_db"] is None) self.assertTrue(ml["group"] is None) self.assertTrue(ml["group_gz"] is None) self.assertTrue(ml["updateinfo"] is None) self.assertTrue(ml["foobarxyz"] is None) if os.environ.get("WITH_LIBMODULEMD", "ON").upper() != "OFF": ml = cr.MetadataLocation(REPO_WITH_ADDITIONAL_METADATA, 0) self.assertTrue(ml) self.assertTrue(ml["primary"].endswith("/repodata/490a2a494a3827b8a356f728ac36bc02fb009b0eaea173c890e727bb54219037-primary.xml.gz")) self.assertTrue(ml["filelists"].endswith("/repodata/ba5a4fdbb20e7b9b70d9a9abd974bcab1065b1e81d711f80e06ad8cae30c4183-filelists.xml.gz")) self.assertTrue(ml["other"].endswith("/repodata/fd458a424a3f3e0dadc95b806674b79055c24e73637e47ad5a6e57926aa1b9d1-other.xml.gz")) self.assertTrue(ml["primary_db"].endswith("/repodata/1e12239bf5cb07ec73c74482c35e80dabe30dbe2fdd57bd9e557d987cbacc8c2-primary.sqlite.bz2")) self.assertTrue(ml["filelists_db"].endswith("/repodata/4f4de7d3254a033b84626f330bc6adb8a3c1a4a20f0ddbe30a5692a041318c81-filelists.sqlite.bz2")) self.assertTrue(ml["other_db"].endswith("/repodata/8b13cba732c1a02b841f43d6791ca68788d45f376787d9f3ccf68e75f01af499-other.sqlite.bz2")) self.assertTrue(ml["group"].endswith("/repodata/04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f-comps.f20.xml")) self.assertTrue(ml["group_gz"].endswith("/repodata/f9d860ddcb64fbdc88a9b71a14ddb9f5670968d5dd3430412565c13d42b6804d-comps.f20.xml.gz")) self.assertTrue(ml["updateinfo"].endswith("/repodata/88514679cb03d8f51e850ad3639c089f899e83407a2380ef9e62873a8eb1db13-updateinfo_01.xml.gz")) additional_metadata = ml["additional_metadata"] self.assertTrue(len(additional_metadata) == 8) self.assertTrue(list_has_str_ending_with(additional_metadata, "4fbad65c641f4f8fb3cec9b1672fcec2357443e1ea6e93541a0bb559c7dc9238-modules.yaml.gz")) self.assertTrue(list_has_str_ending_with(additional_metadata, "cb0f4b5df8268f248158e50d66ee1565591bca23ee2dbd84ae9c457962fa3122-modules.yaml.gz.zck")) self.assertTrue(list_has_str_ending_with(additional_metadata, "04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f-comps.f20.xml")) self.assertTrue(list_has_str_ending_with(additional_metadata, "2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d-comps.f20.xml.zck")) self.assertTrue(list_has_str_ending_with(additional_metadata, "2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d-comps.f20.xml.gz.zck")) self.assertTrue(list_has_str_ending_with(additional_metadata, "f9d860ddcb64fbdc88a9b71a14ddb9f5670968d5dd3430412565c13d42b6804d-comps.f20.xml.gz")) self.assertTrue(list_has_str_ending_with(additional_metadata, "88514679cb03d8f51e850ad3639c089f899e83407a2380ef9e62873a8eb1db13-updateinfo_01.xml.gz")) self.assertTrue(list_has_str_ending_with(additional_metadata, "0219a2f1f9f32af6b7873905269ac1bc27b03e0caf3968c929a49e5a939e8935-updateinfo_01.xml.gz.zck")) self.assertTrue(ml["foobarxyz"] is None) else: with self.assertRaises(Exception): ml = cr.MetadataLocation(REPO_WITH_ADDITIONAL_METADATA, 0) createrepo_c-0.17.0/tests/python/tests/test_misc.py000066400000000000000000000062121400672373200224300ustar00rootroot00000000000000import os.path import tempfile import unittest import shutil import createrepo_c as cr from .fixtures import * class TestCaseMisc(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") self.nofile = os.path.join(self.tmpdir, "this_file_should_not_exists") self.tmpfile = os.path.join(self.tmpdir, "file") self.content = "some\nfoo\ncontent\n" open(self.tmpfile, "w").write(self.content) def tearDown(self): shutil.rmtree(self.tmpdir) def test_compress_file(self): # Non exist file self.assertRaises(IOError, cr.compress_file, self.nofile, None, cr.BZ2) # Compression - use the same name+suffix cr.compress_file(self.tmpfile, None, cr.BZ2) self.assertTrue(os.path.isfile(self.tmpfile+".bz2")) # Compression - new name new_name = os.path.join(self.tmpdir, "foobar.gz") cr.compress_file(self.tmpfile, new_name, cr.GZ) self.assertTrue(os.path.isfile(new_name)) # Compression - with stat stat = cr.ContentStat(cr.SHA256) cr.compress_file(self.tmpfile, None, cr.XZ, stat) self.assertTrue(os.path.isfile(self.tmpfile+".xz")) self.assertEqual(stat.checksum, "e61ebaa6241e335c779194ce7af98c590f1"\ "b26a749f219b997a0d7d5a773063b") self.assertEqual(stat.checksum_type, cr.SHA256) self.assertEqual(stat.size, len(self.content)) # Check directory for unexpected files self.assertEqual(set(os.listdir(self.tmpdir)), set(['file.bz2', 'file.xz', 'file', 'foobar.gz'])) def test_decompress_file(self): # Non exist file self.assertRaises(IOError, cr.decompress_file, self.nofile, None, cr.BZ2) tmpfile_gz_comp = os.path.join(self.tmpdir, "gzipedfile.gz") shutil.copy(FILE_TEXT_GZ, tmpfile_gz_comp) tmpfile_gz_comp_ns = os.path.join(self.tmpdir, "gzipedfile_no_suffix") shutil.copy(FILE_TEXT_GZ, tmpfile_gz_comp_ns) # Decompression - use the same name without suffix dest = os.path.join(self.tmpdir, "gzipedfile") cr.decompress_file(tmpfile_gz_comp, None, cr.GZ) self.assertTrue(os.path.isfile(dest)) # Decompression - use the specific name dest = os.path.join(self.tmpdir, "decompressed.file") cr.decompress_file(tmpfile_gz_comp, dest, cr.GZ) self.assertTrue(os.path.isfile(dest)) # Decompression - bad suffix by default dest = os.path.join(self.tmpdir, "gzipedfile_no_suffix.decompressed") cr.decompress_file(tmpfile_gz_comp_ns, None, cr.GZ) self.assertTrue(os.path.isfile(dest)) # Decompression - with stat stat = cr.ContentStat(cr.SHA256) dest = os.path.join(self.tmpdir, "gzipedfile") cr.decompress_file(tmpfile_gz_comp, None, cr.AUTO_DETECT_COMPRESSION, stat) self.assertTrue(os.path.isfile(dest)) self.assertEqual(stat.checksum, FILE_TEXT_SHA256SUM) self.assertEqual(stat.checksum_type, cr.SHA256) self.assertEqual(stat.size, 910) createrepo_c-0.17.0/tests/python/tests/test_package.py000066400000000000000000000243241400672373200230740ustar00rootroot00000000000000import unittest import createrepo_c as cr from .fixtures import * class TestCasePackage(unittest.TestCase): def test_package_empty(self): pkg = cr.package_from_rpm(PKG_EMPTY_PATH) self.assertTrue(pkg) self.assertEqual(pkg.pkgId, "91afc5e3a124eedfc5bc52737940950b42a37c611dccecad4692a4eb317f9810") self.assertEqual(pkg.name, "empty") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "0") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "0") self.assertEqual(pkg.summary, '""') self.assertEqual(pkg.description, None) self.assertEqual(pkg.url, None) #self.assertEqual(pkg.time_file, 1340709886) self.assertEqual(pkg.time_build, 1340696582) self.assertEqual(pkg.rpm_license, "LGPL") self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, "Unspecified") self.assertEqual(pkg.rpm_buildhost, "localhost.localdomain") self.assertEqual(pkg.rpm_sourcerpm, "empty-0-0.src.rpm") self.assertEqual(pkg.rpm_header_start, 280) self.assertEqual(pkg.rpm_header_end, 1285) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 1401) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 124) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, "sha256") self.assertEqual(pkg.requires, []) self.assertEqual(pkg.provides, [ ('empty', 'EQ', '0', '0', '0', False), ('empty(x86-64)', 'EQ', '0', '0', '0', False) ]) self.assertEqual(pkg.conflicts, []) self.assertEqual(pkg.obsoletes, []) self.assertEqual(pkg.suggests, []) self.assertEqual(pkg.enhances, []) self.assertEqual(pkg.recommends, []) self.assertEqual(pkg.supplements, []) self.assertEqual(pkg.files, []) self.assertEqual(pkg.changelogs, []) self.assertEqual(pkg.nvra(), "empty-0-0.x86_64") self.assertEqual(pkg.nevra(), "empty-0:0-0.x86_64") def test_package_archer(self): pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) self.assertEqual(pkg.pkgId, "4e0b775220c67f0f2c1fd2177e626b9c863a098130224ff09778ede25cea9a9e") self.assertEqual(pkg.name, "Archer") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "3.4.5") self.assertEqual(pkg.epoch, "2") self.assertEqual(pkg.release, "6") self.assertEqual(pkg.summary, "Complex package.") self.assertEqual(pkg.description, "Archer package") self.assertEqual(pkg.url, "http://soo_complex_package.eu/") #self.assertEqual(pkg.time_file, 1365416502) self.assertEqual(pkg.time_build, 1365416480) self.assertEqual(pkg.rpm_license, "GPL") self.assertEqual(pkg.rpm_vendor, "ISIS") self.assertEqual(pkg.rpm_group, "Development/Tools") self.assertEqual(pkg.rpm_buildhost, "localhost.localdomain") self.assertEqual(pkg.rpm_sourcerpm, "Archer-3.4.5-6.src.rpm") self.assertEqual(pkg.rpm_header_start, 280) self.assertEqual(pkg.rpm_header_end, 2865) self.assertEqual(pkg.rpm_packager, "Sterling Archer") self.assertEqual(pkg.size_package, 3101) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 544) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, "sha256") self.assertEqual(pkg.requires, [ ('fooa', 'LE', '0', '2', None, False), ('foob', 'GE', '0', '1.0.0', '1', False), ('fooc', 'EQ', '0', '3', None, False), ('food', 'LT', '0', '4', None, False), ('fooe', 'GT', '0', '5', None, False), ('foof', 'EQ', '0', '6', None, True) ]) self.assertEqual(pkg.provides, [ ('bara', 'LE', '0', '22', None, False), ('barb', 'GE', '0', '11.22.33', '44', False), ('barc', 'EQ', '0', '33', None, False), ('bard', 'LT', '0', '44', None, False), ('bare', 'GT', '0', '55', None, False), ('Archer', 'EQ', '2', '3.4.5', '6', False), ('Archer(x86-64)', 'EQ', '2', '3.4.5', '6', False) ]) self.assertEqual(pkg.conflicts, [ ('bba', 'LE', '0', '2222', None, False), ('bbb', 'GE', '0', '1111.2222.3333', '4444', False), ('bbc', 'EQ', '0', '3333', None, False), ('bbd', 'LT', '0', '4444', None, False), ('bbe', 'GT', '0', '5555', None, False) ]) self.assertEqual(pkg.obsoletes, [ ('aaa', 'LE', '0', '222', None, False), ('aab', 'GE', '0', '111.2.3', '4', False), ('aac', 'EQ', '0', '333', None, False), ('aad', 'LT', '0', '444', None, False), ('aae', 'GT', '0', '555', None, False) ]) self.assertEqual(pkg.suggests, []) self.assertEqual(pkg.enhances, []) self.assertEqual(pkg.recommends, []) self.assertEqual(pkg.supplements, []) self.assertEqual(pkg.files, [ ('', '/usr/bin/', 'complex_a'), ('dir', '/usr/share/doc/', 'Archer-3.4.5'), ('', '/usr/share/doc/Archer-3.4.5/', 'README') ]) self.assertEqual(pkg.changelogs, [ ('Tomas Mlcoch - 1.1.1-1', 1334664000, '- First changelog.'), ('Tomas Mlcoch - 2.2.2-2', 1334750400, '- That was totally ninja!'), ('Tomas Mlcoch - 3.3.3-3', 1365422400, '- 3. changelog.') ]) self.assertEqual(pkg.nvra(), "Archer-3.4.5-6.x86_64") self.assertEqual(pkg.nevra(), "Archer-2:3.4.5-6.x86_64") def test_package_setters(self): pkg = cr.Package() self.assertTrue(pkg) pkg.pkgId = "foo" self.assertEqual(pkg.pkgId, "foo") pkg.name = "bar" self.assertEqual(pkg.name, "bar") pkg.arch = "quantum" self.assertEqual(pkg.arch, "quantum") pkg.version = "11" self.assertEqual(pkg.version, "11") pkg.epoch = "22" self.assertEqual(pkg.epoch, "22") pkg.release = "33" self.assertEqual(pkg.release, "33") pkg.summary = "sum" self.assertEqual(pkg.summary, "sum") pkg.description = "desc" self.assertEqual(pkg.description, "desc") pkg.url = "http://foo" self.assertEqual(pkg.url, "http://foo") pkg.time_file = 111 self.assertEqual(pkg.time_file, 111) pkg.time_build = 112 self.assertEqual(pkg.time_build, 112) pkg.rpm_license = "EULA" self.assertEqual(pkg.rpm_license, "EULA") pkg.rpm_vendor = "Me" self.assertEqual(pkg.rpm_vendor, "Me") pkg.rpm_group = "a-team" self.assertEqual(pkg.rpm_group, "a-team") pkg.rpm_buildhost = "hal3000.space" self.assertEqual(pkg.rpm_buildhost, "hal3000.space") pkg.rpm_sourcerpm = "source.src.rpm" self.assertEqual(pkg.rpm_sourcerpm, "source.src.rpm") pkg.rpm_header_start = 1 self.assertEqual(pkg.rpm_header_start, 1) pkg.rpm_header_end = 2 self.assertEqual(pkg.rpm_header_end, 2) pkg.rpm_packager = "Arnold Rimmer" self.assertEqual(pkg.rpm_packager, "Arnold Rimmer") pkg.size_package = 33 self.assertEqual(pkg.size_package, 33) pkg.size_installed = 44 self.assertEqual(pkg.size_installed, 44) pkg.size_archive = 55 self.assertEqual(pkg.size_archive, 55) pkg.location_href = "package/foo.rpm" self.assertEqual(pkg.location_href, "package/foo.rpm") pkg.location_base = "file://dir/" self.assertEqual(pkg.location_base, "file://dir/") pkg.checksum_type = "crc" self.assertEqual(pkg.checksum_type, "crc") pkg.requires = [('bar', 'GE', '1', '3.2.1', None, True)] self.assertEqual(pkg.requires, [('bar', 'GE', '1', '3.2.1', None, True)]) pkg.provides = [('foo', None, None, None, None, False)] self.assertEqual(pkg.provides, [('foo', None, None, None, None, False)]) pkg.conflicts = [('foobar', 'LT', '0', '1.0.0', None, False)] self.assertEqual(pkg.conflicts, [('foobar', 'LT', '0', '1.0.0', None, False)]) pkg.obsoletes = [('foobar', 'GE', '0', '1.1.0', None, False)] self.assertEqual(pkg.obsoletes, [('foobar', 'GE', '0', '1.1.0', None, False)]) pkg.suggests = [('foo_sug', 'GE', '0', '1.1.0', None, False)] self.assertEqual(pkg.suggests, [('foo_sug', 'GE', '0', '1.1.0', None, False)]) pkg.enhances = [('foo_enh', 'GE', '0', '1.1.0', None, False)] self.assertEqual(pkg.enhances, [('foo_enh', 'GE', '0', '1.1.0', None, False)]) pkg.recommends = [('foo_rec', 'GE', '0', '1.1.0', None, False)] self.assertEqual(pkg.recommends, [('foo_rec', 'GE', '0', '1.1.0', None, False)]) pkg.supplements = [('foo_sup', 'GE', '0', '1.1.0', None, False)] self.assertEqual(pkg.supplements, [('foo_sup', 'GE', '0', '1.1.0', None, False)]) pkg.files = [(None, '/foo/', 'bar')] self.assertEqual(pkg.files, [(None, '/foo/', 'bar')]) pkg.changelogs = [('me', 123456, 'first commit')] self.assertEqual(pkg.changelogs, [('me', 123456, 'first commit')]) self.assertEqual(pkg.nvra(), "bar-11-33.quantum") self.assertEqual(pkg.nevra(), "bar-22:11-33.quantum") def test_package_copying(self): import copy pkg_a = cr.Package() pkg_a.name = "FooPackage" pkg_b = pkg_a self.assertEqual(id(pkg_a), id(pkg_b)) pkg_c = copy.copy(pkg_a) self.assertFalse(id(pkg_a) == id(pkg_c)) pkg_d = copy.deepcopy(pkg_a) self.assertFalse(id(pkg_a) == id(pkg_d)) self.assertFalse(id(pkg_c) == id(pkg_d)) # Next lines should not fail (if copy really works) del(pkg_a) del(pkg_b) self.assertEqual(pkg_c.name, "FooPackage") del(pkg_c) self.assertEqual(pkg_d.name, "FooPackage") del(pkg_d) createrepo_c-0.17.0/tests/python/tests/test_parsepkg.py000066400000000000000000000046031400672373200233130ustar00rootroot00000000000000import unittest import createrepo_c as cr from .fixtures import * class TestCaseParsepkg(unittest.TestCase): def test_package_from_rpm(self): pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "Archer") pkg = cr.package_from_rpm(PKG_BALICEK_ISO88591_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "balicek-iso88591") pkg = cr.package_from_rpm(PKG_BALICEK_ISO88592_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "balicek-iso88592") pkg = cr.package_from_rpm(PKG_BALICEK_UTF8_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "balicek-utf8") pkg = cr.package_from_rpm(PKG_EMPTY_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "empty") pkg = cr.package_from_rpm(PKG_EMPTY_SRC_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "empty") pkg = cr.package_from_rpm(PKG_FAKE_BASH_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "fake_bash") pkg = cr.package_from_rpm(PKG_SUPER_KERNEL_PATH) self.assertTrue(pkg) self.assertEqual(pkg.name, "super_kernel") # Test error cases # Rpm doesn't exists self.assertRaises(IOError, cr.package_from_rpm, "this_foo_pkg_should_not_exists.rpm") # Path is a directory, not a file self.assertRaises(IOError, cr.package_from_rpm, "./") # File is not a rpm self.assertRaises(IOError, cr.package_from_rpm, FILE_BINARY_PATH) def test_xml_from_rpm(self): xml = cr.xml_from_rpm(PKG_ARCHER_PATH) self.assertTrue(xml) self.assertTrue(len(xml) == 3) self.assertTrue("Archer" in xml[0]) self.assertTrue('' in xml[1]) self.assertTrue('' in xml[2]) # Test error cases # Rpm doesn't exists self.assertRaises(IOError, cr.xml_from_rpm, "this_foo_pkg_should_not_exists.rpm") # Path is a directory, not a file self.assertRaises(IOError, cr.xml_from_rpm, "./") # File is not a rpm self.assertRaises(IOError, cr.xml_from_rpm, FILE_BINARY_PATH) createrepo_c-0.17.0/tests/python/tests/test_repomd.py000066400000000000000000000114131400672373200227620ustar00rootroot00000000000000import re import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseRepomd(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") self.FN_00 = "primary.xml.gz" self.FN_01 = "primary.xml" self.path00 = os.path.join(self.tmpdir, self.FN_00) self.path01 = os.path.join(self.tmpdir, self.FN_01) def tearDown(self): shutil.rmtree(self.tmpdir) def xxx_repomdrecord_fill(self): shutil.copyfile(REPO_00_PRIXML, self.path00) self.assertTrue(os.path.exists(self.path00)) rec = cr.RepomdRecord("primary", self.path00) self.assertTrue(rec) rec.fill(cr.SHA256) rec.rename_file() # Filename should contain a (valid) checksum self.assertEqual(os.listdir(self.tmpdir), ['1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz']) def test_repomd(self): shutil.copyfile(REPO_00_PRIXML, self.path00) self.assertTrue(os.path.exists(self.path00)) md = cr.Repomd() self.assertTrue(md) xml = md.xml_dump() # Revision should be current Unix time self.assertTrue(re.search(r"[0-9]+", xml)) self.assertEqual(md.revision, None) md.set_revision("foobar") self.assertEqual(md.revision, "foobar") self.assertEqual(md.repoid, None); md.set_repoid("barid", "sha256") self.assertEqual(md.repoid, "barid") self.assertEqual(md.contenthash, None); md.set_contenthash("fooid", "sha256") self.assertEqual(md.contenthash, "fooid") self.assertEqual(md.distro_tags, []) md.add_distro_tag("tag1") md.add_distro_tag("tag2", "cpeid1") md.add_distro_tag("tag3", cpeid="cpeid2") self.assertEqual(md.distro_tags, [(None, 'tag1'), ('cpeid1', 'tag2'), ('cpeid2', 'tag3')]) self.assertEqual(md.repo_tags, []) md.add_repo_tag("repotag") self.assertEqual(md.repo_tags, ['repotag']) self.assertEqual(md.content_tags, []) md.add_content_tag("contenttag") self.assertEqual(md.content_tags, ['contenttag']) self.assertEqual(md.records, []) xml = md.xml_dump() self.assertEqual(xml, """ foobar barid fooid contenttag repotag tag1 tag2 tag3 """) rec = cr.RepomdRecord("primary", self.path00) rec.fill(cr.SHA256) rec.timestamp = 1 rec.location_base = "http://foo/" md.set_record(rec) self.assertEqual(len(md.records), 1) md.set_record(rec) self.assertEqual(len(md.records), 1) md.repoid = None md.contenthash = None xml = md.xml_dump() self.assertEqual(xml, """ foobar contenttag repotag tag1 tag2 tag3 1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f 1 134 167 """) def test_repomd_with_path_in_constructor_repo01(self): repomd = cr.Repomd(REPO_01_REPOMD) self.assertEqual(repomd.revision, "1334667230") self.assertEqual(repomd.repo_tags, []) self.assertEqual(repomd.distro_tags, []) self.assertEqual(repomd.content_tags, []) self.assertEqual(len(repomd.records), 3) def test_repomd_indexing_and_iteration_repo01(self): repomd = cr.Repomd(REPO_01_REPOMD) types = [] for rec in repomd: types.append(rec.type) self.assertEqual(types, ['filelists', 'other', 'primary']) rec = repomd["primary"] self.assertEqual(rec.type, "primary") self.assertRaises(KeyError, repomd.__getitem__, "foobar") self.assertTrue("primary" in repomd) createrepo_c-0.17.0/tests/python/tests/test_repomdrecord.py000066400000000000000000000174031400672373200241660ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseRepomdRecord(unittest.TestCase): # TODO: # - Test rename_file() for files with checksum def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") self.FN_00 = "primary.xml.gz" self.FN_01 = "primary.xml" self.FN_02 = "primary.xml.zck" self.path00 = os.path.join(self.tmpdir, self.FN_00) self.path01 = os.path.join(self.tmpdir, self.FN_01) self.path02 = os.path.join(self.tmpdir, self.FN_02) def tearDown(self): shutil.rmtree(self.tmpdir) def test_repomdrecord_fill(self): shutil.copyfile(REPO_00_PRIXML, self.path00) self.assertTrue(os.path.exists(self.path00)) rec = cr.RepomdRecord("primary", self.path00) self.assertTrue(rec) self.assertEqual(rec.location_real, self.path00) self.assertEqual(rec.location_href, "repodata/primary.xml.gz") self.assertEqual(rec.location_base, None) self.assertEqual(rec.checksum, None) self.assertEqual(rec.checksum_type, None) self.assertEqual(rec.checksum_open, None) self.assertEqual(rec.checksum_open_type, None) self.assertEqual(rec.timestamp, 0) self.assertEqual(rec.size, 0) self.assertEqual(rec.size_open, -1) self.assertEqual(rec.db_ver, 0) rec.fill(cr.SHA256) self.assertEqual(rec.location_real, self.path00) self.assertEqual(rec.location_href, "repodata/primary.xml.gz") self.assertEqual(rec.location_base, None) self.assertEqual(rec.checksum, "1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae") self.assertEqual(rec.checksum_type, "sha256") self.assertEqual(rec.checksum_open, "e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f") self.assertEqual(rec.checksum_open_type, "sha256") self.assertTrue(rec.timestamp > 0) self.assertEqual(rec.size, 134) self.assertEqual(rec.size_open, 167) rec.rename_file() shutil.copyfile(REPO_00_PRIZCK, self.path02) self.assertTrue(os.path.exists(self.path02)) zrc = cr.RepomdRecord("primary_zck", self.path02) self.assertTrue(zrc) self.assertEqual(zrc.location_real, self.path02) self.assertEqual(zrc.location_href, "repodata/primary.xml.zck") self.assertEqual(zrc.location_base, None) self.assertEqual(zrc.checksum, None) self.assertEqual(zrc.checksum_type, None) self.assertEqual(zrc.checksum_open, None) self.assertEqual(zrc.checksum_open_type, None) self.assertEqual(zrc.checksum_header, None) self.assertEqual(zrc.checksum_header_type, None) self.assertEqual(zrc.timestamp, 0) self.assertEqual(zrc.size, 0) self.assertEqual(zrc.size_open, -1) self.assertEqual(zrc.size_header, -1) if cr.HAS_ZCK == 0: filelist = os.listdir(self.tmpdir) filelist.sort() self.assertEqual(filelist, ['1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz', 'primary.xml.zck']) return zrc.fill(cr.SHA256) self.assertEqual(zrc.location_real, self.path02) self.assertEqual(zrc.location_href, "repodata/primary.xml.zck") self.assertEqual(zrc.location_base, None) self.assertEqual(zrc.checksum, "e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595") self.assertEqual(zrc.checksum_type, "sha256") self.assertEqual(zrc.checksum_open, "e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f") self.assertEqual(zrc.checksum_open_type, "sha256") self.assertEqual(zrc.checksum_header, "243baf7c02f5241d46f2e8c237ebc7ea7e257ca993d9cfe1304254c7ba7f6546") self.assertEqual(zrc.checksum_header_type, "sha256") self.assertTrue(zrc.timestamp > 0) self.assertEqual(zrc.size, 269) self.assertEqual(zrc.size_open, 167) self.assertEqual(zrc.size_header, 132) self.assertEqual(zrc.db_ver, 10) zrc.rename_file() # Filename should contain a (valid) checksum filelist = os.listdir(self.tmpdir) filelist.sort() self.assertEqual(filelist, ['1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz', 'e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595-primary.xml.zck']) def test_repomdrecord_setters(self): shutil.copyfile(REPO_00_PRIXML, self.path00) self.assertTrue(os.path.exists(self.path00)) rec = cr.RepomdRecord("primary", self.path00) self.assertTrue(rec) rec.fill(cr.SHA256) self.assertEqual(rec.type, "primary") self.assertEqual(rec.location_real, self.path00) self.assertEqual(rec.location_href, "repodata/primary.xml.gz") self.assertEqual(rec.checksum, "1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae") self.assertEqual(rec.checksum_type, "sha256") self.assertEqual(rec.checksum_open, "e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f") self.assertEqual(rec.checksum_open_type, "sha256") self.assertTrue(rec.timestamp > 0) self.assertEqual(rec.size, 134) self.assertEqual(rec.size_open, 167) self.assertEqual(rec.db_ver, 10) # Set new values rec.type = "foo" rec.location_href = "repodata/foo.xml.gz" rec.checksum = "foobar11" rec.checksum_type = "foo1" rec.checksum_open = "foobar22" rec.checksum_open_type = "foo2" rec.timestamp = 123 rec.size = 456 rec.size_open = 789 rec.db_ver = 11 # Check self.assertEqual(rec.type, "foo") self.assertEqual(rec.location_real, self.path00) self.assertEqual(rec.location_href, "repodata/foo.xml.gz") self.assertEqual(rec.checksum, "foobar11") self.assertEqual(rec.checksum_type, "foo1") self.assertEqual(rec.checksum_open, "foobar22") self.assertEqual(rec.checksum_open_type, "foo2") self.assertEqual(rec.timestamp, 123) self.assertEqual(rec.size, 456) self.assertEqual(rec.size_open, 789) self.assertEqual(rec.db_ver, 11) def test_repomdrecord_compress_and_fill(self): open(self.path01, "w").write("foobar\ncontent\nhh\n") self.assertTrue(os.path.exists(self.path01)) rec = cr.RepomdRecord("primary", self.path01) self.assertTrue(rec) rec_compressed = rec.compress_and_fill(cr.SHA256, cr.GZ_COMPRESSION) # A new compressed file should be created self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted(['primary.xml.gz', 'primary.xml'])) rec.rename_file() rec_compressed.rename_file() # Filename should contain a (valid) checksum self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted(['10091f8e2e235ae875cb18c91c443891c7f1a599d41f44d518e8af759a6c8109-primary.xml.gz', 'b33fc63178d852333a826385bc15d9b72cb6658be7fb927ec28c4e40b5d426fb-primary.xml'])) def test_repomdrecord_load_contentstat(self): rec = cr.RepomdRecord("primary", None) self.assertTrue(rec) stat = cr.ContentStat(cr.SHA256) stat.checksum = "foobar" stat.checksum_type = cr.SHA256 stat.size = 123 self.assertEqual(rec.checksum_open, None) self.assertEqual(rec.checksum_open_type, None) self.assertEqual(rec.size, 0) rec.load_contentstat(stat); self.assertEqual(rec.checksum_open, "foobar") self.assertEqual(rec.checksum_open_type, "sha256") self.assertEqual(rec.size_open, 123) createrepo_c-0.17.0/tests/python/tests/test_sqlite.py000066400000000000000000000243421400672373200230020ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import sqlite3 import createrepo_c as cr from .fixtures import * class TestCaseSqlite(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") def tearDown(self): shutil.rmtree(self.tmpdir) def test_sqlite_basic_operations(self): db_pri = cr.Sqlite(self.tmpdir+"/primary.db", cr.DB_PRIMARY) self.assertTrue(db_pri) self.assertTrue(os.path.isfile(self.tmpdir+"/primary.db")) db_pri = cr.PrimarySqlite(self.tmpdir+"/primary2.db") self.assertTrue(db_pri) self.assertTrue(os.path.isfile(self.tmpdir+"/primary2.db")) db_fil = cr.Sqlite(self.tmpdir+"/filelists.db", cr.DB_FILELISTS) self.assertTrue(db_fil) self.assertTrue(os.path.isfile(self.tmpdir+"/filelists.db")) db_fil = cr.FilelistsSqlite(self.tmpdir+"/filelists2.db") self.assertTrue(db_fil) self.assertTrue(os.path.isfile(self.tmpdir+"/filelists2.db")) db_oth = cr.Sqlite(self.tmpdir+"/other.db", cr.DB_OTHER) self.assertTrue(db_oth) self.assertTrue(os.path.isfile(self.tmpdir+"/other.db")) db_oth = cr.OtherSqlite(self.tmpdir+"/other2.db") self.assertTrue(db_oth) self.assertTrue(os.path.isfile(self.tmpdir+"/other2.db")) def test_sqlite_error_cases(self): self.assertRaises(cr.CreaterepoCError, cr.Sqlite, self.tmpdir, cr.DB_PRIMARY) self.assertRaises(ValueError, cr.Sqlite, self.tmpdir+"/foo.db", 55) self.assertRaises(TypeError, cr.Sqlite, self.tmpdir+"/foo.db", None) self.assertRaises(TypeError, cr.Sqlite, None, cr.DB_PRIMARY) def test_sqlite_operations_on_closed_db(self): pkg = cr.package_from_rpm(PKG_ARCHER_PATH) path = os.path.join(self.tmpdir, "primary.db") db = cr.Sqlite(path, cr.DB_PRIMARY) self.assertTrue(db) db.close() self.assertRaises(cr.CreaterepoCError, db.add_pkg, pkg) self.assertRaises(cr.CreaterepoCError, db.dbinfo_update, "somechecksum") db.close() # No error should be raised del db # No error should be raised def test_sqlite_primary_schema(self): path = os.path.join(self.tmpdir, "primary.db") cr.PrimarySqlite(path) self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check tables self.assertEqual(con.execute("""select name from sqlite_master where type="table";""").fetchall(), [(u'db_info',), (u'packages',), (u'files',), (u'requires',), (u'provides',), (u'conflicts',), (u'obsoletes',), (u'suggests',), (u'enhances',), (u'recommends',), (u'supplements',), ]) # Check indexes self.assertEqual(con.execute("""select name from sqlite_master where type="index";""").fetchall(), [(u'packagename',), (u'packageId',), (u'filenames',), (u'pkgfiles',), (u'pkgrequires',), (u'requiresname',), (u'pkgprovides',), (u'providesname',), (u'pkgconflicts',), (u'pkgobsoletes',), (u'pkgsuggests',), (u'pkgenhances',), (u'pkgrecommends',), (u'pkgsupplements',), ]) # Check triggers self.assertEqual(con.execute("""select name from sqlite_master where type="trigger";""").fetchall(), [(u'removals',)]) def test_sqlite_filelists_schema(self): path = os.path.join(self.tmpdir, "filelists.db") cr.FilelistsSqlite(path) self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check tables self.assertEqual(con.execute("""select name from sqlite_master where type="table";""").fetchall(), [(u'db_info',), (u'packages',), (u'filelist',)]) # Check indexes self.assertEqual(con.execute("""select name from sqlite_master where type="index";""").fetchall(), [(u'keyfile',), (u'pkgId',), (u'dirnames',)]) # Check triggers self.assertEqual(con.execute("""select name from sqlite_master where type="trigger";""").fetchall(), [(u'remove_filelist',)]) def test_sqlite_other_schema(self): path = os.path.join(self.tmpdir, "other.db") cr.OtherSqlite(path) self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check tables self.assertEqual(con.execute("""select name from sqlite_master where type="table";""").fetchall(), [(u'db_info',), (u'packages',), (u'changelog',)]) # Check indexes self.assertEqual(con.execute("""select name from sqlite_master where type="index";""").fetchall(), [(u'keychange',), (u'pkgId',)]) # Check triggers self.assertEqual(con.execute("""select name from sqlite_master where type="trigger";""").fetchall(), [(u'remove_changelogs',)]) def test_sqlite_primary(self): path = os.path.join(self.tmpdir, "primary.db") db = cr.Sqlite(path, cr.DB_PRIMARY) pkg = cr.package_from_rpm(PKG_ARCHER_PATH) db.add_pkg(pkg) self.assertRaises(TypeError, db.add_pkg, None) self.assertRaises(TypeError, db.add_pkg, 123) self.assertRaises(TypeError, db.add_pkg, "foo") db.dbinfo_update("somechecksum") self.assertRaises(TypeError, db.dbinfo_update, pkg) self.assertRaises(TypeError, db.dbinfo_update, None) self.assertRaises(TypeError, db.dbinfo_update, 123) db.close() self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check packages table res = con.execute("select * from packages").fetchall() self.assertEqual(res, [(1, u'4e0b775220c67f0f2c1fd2177e626b9c863a098130224ff09778ede25cea9a9e', u'Archer', u'x86_64', u'3.4.5', u'2', u'6', u'Complex package.', u'Archer package', u'http://soo_complex_package.eu/', res[0][10], 1365416480, u'GPL', u'ISIS', u'Development/Tools', u'localhost.localdomain', u'Archer-3.4.5-6.src.rpm', 280, 2865, u'Sterling Archer', 3101, 0, 544, None, None, u'sha256')]) # Check provides table self.assertEqual(con.execute("select * from provides").fetchall(), [(u'bara', u'LE', u'0', u'22', None, 1), (u'barb', u'GE', u'0', u'11.22.33', u'44', 1), (u'barc', u'EQ', u'0', u'33', None, 1), (u'bard', u'LT', u'0', u'44', None, 1), (u'bare', u'GT', u'0', u'55', None, 1), (u'Archer', u'EQ', u'2', u'3.4.5', u'6', 1), (u'Archer(x86-64)', u'EQ', u'2', u'3.4.5', u'6', 1)]) # Check conflicts table self.assertEqual(con.execute("select * from conflicts").fetchall(), [(u'bba', u'LE', u'0', u'2222', None, 1), (u'bbb', u'GE', u'0', u'1111.2222.3333', u'4444', 1), (u'bbc', u'EQ', u'0', u'3333', None, 1), (u'bbd', u'LT', u'0', u'4444', None, 1), (u'bbe', u'GT', u'0', u'5555', None, 1)]) # Check obsoletes table self.assertEqual(con.execute("select * from obsoletes").fetchall(), [(u'aaa', u'LE', u'0', u'222', None, 1), (u'aab', u'GE', u'0', u'111.2.3', u'4', 1), (u'aac', u'EQ', u'0', u'333', None, 1), (u'aad', u'LT', u'0', u'444', None, 1), (u'aae', u'GT', u'0', u'555', None, 1)]) # Check requires table self.assertEqual(con.execute("select * from requires").fetchall(), [(u'fooa', u'LE', u'0', u'2', None, 1, u'FALSE'), (u'foob', u'GE', u'0', u'1.0.0', u'1', 1, u'FALSE'), (u'fooc', u'EQ', u'0', u'3', None, 1, u'FALSE'), (u'food', u'LT', u'0', u'4', None, 1, u'FALSE'), (u'fooe', u'GT', u'0', u'5', None, 1, u'FALSE'), (u'foof', u'EQ', u'0', u'6', None, 1, u'TRUE')]) # Check files table self.assertEqual(con.execute("select * from files").fetchall(), [(u'/usr/bin/complex_a', u'file', 1)]) # Check db_info table self.assertEqual(con.execute("select * from db_info").fetchall(), [(10, u'somechecksum')]) def test_sqlite_filelists(self): path = os.path.join(self.tmpdir, "filelists.db") db = cr.Sqlite(path, cr.DB_FILELISTS) pkg = cr.package_from_rpm(PKG_ARCHER_PATH) db.add_pkg(pkg) db.dbinfo_update("somechecksum2") db.close() self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check packages table self.assertEqual(con.execute("select * from packages").fetchall(), [(1, u'4e0b775220c67f0f2c1fd2177e626b9c863a098130224ff09778ede25cea9a9e')]) # Check files table self.assertEqual(set(con.execute("select * from filelist").fetchall()), set([(1, u'/usr/share/doc', u'Archer-3.4.5', u'd'), (1, u'/usr/bin', u'complex_a', u'f'), (1, u'/usr/share/doc/Archer-3.4.5', u'README', u'f')])) # Check db_info table self.assertEqual(con.execute("select * from db_info").fetchall(), [(10, u'somechecksum2')]) def test_sqlite_other(self): path = os.path.join(self.tmpdir, "other.db") db = cr.Sqlite(path, cr.DB_FILELISTS) pkg = cr.package_from_rpm(PKG_ARCHER_PATH) db.add_pkg(pkg) db.dbinfo_update("somechecksum3") db.close() self.assertTrue(os.path.isfile(path)) con = sqlite3.connect(path) # Check packages table self.assertEqual(con.execute("select * from packages").fetchall(), [(1, u'4e0b775220c67f0f2c1fd2177e626b9c863a098130224ff09778ede25cea9a9e')]) # Check filelist table self.assertEqual(set(con.execute("select * from filelist").fetchall()), set([(1, u'/usr/share/doc', u'Archer-3.4.5', u'd'), (1, u'/usr/bin', u'complex_a', u'f'), (1, u'/usr/share/doc/Archer-3.4.5', u'README', u'f')])) # Check db_info table self.assertEqual(con.execute("select * from db_info").fetchall(), [(10, u'somechecksum3')]) createrepo_c-0.17.0/tests/python/tests/test_updatecollection.py000066400000000000000000000057631400672373200250450ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateCollection(unittest.TestCase): def test_updatecollection_setters(self): col = cr.UpdateCollection() self.assertTrue(col) self.assertEqual(col.shortname, None) self.assertEqual(col.name, None) self.assertEqual(col.packages, []) module = cr.UpdateCollectionModule() module.name = "kangaroo" module.stream = "0" module.version = 20180730223407 module.context = "deadbeef" module.arch = "noarch" pkg = cr.UpdateCollectionPackage() pkg.name = "foo" pkg.version = "1.2" pkg.release = "3" pkg.epoch = "0" pkg.arch = "x86" pkg.src = "foo.src.rpm" pkg.filename = "foo.rpm" pkg.sum = "abcdef" pkg.sum_type = cr.SHA1 pkg.reboot_suggested = True col.shortname = "short name" col.name = "long name" col.module = module col.append(pkg) self.assertEqual(col.shortname, "short name") self.assertEqual(col.name, "long name") self.assertEqual(len(col.packages), 1) # Check if the appended module was appended properly module = col.module self.assertEqual(module.name, "kangaroo") self.assertEqual(module.stream, "0") self.assertEqual(module.version, 20180730223407) self.assertEqual(module.context, "deadbeef") self.assertEqual(module.arch, "noarch") # Also check if the appended package was appended properly pkg = col.packages[0] self.assertEqual(pkg.name, "foo") self.assertEqual(pkg.version, "1.2") self.assertEqual(pkg.release, "3") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.arch, "x86") self.assertEqual(pkg.src, "foo.src.rpm") self.assertEqual(pkg.filename, "foo.rpm") self.assertEqual(pkg.sum, "abcdef") self.assertEqual(pkg.sum_type, cr.SHA1) self.assertEqual(pkg.reboot_suggested, True) def test_updatecollection_setters_when_module_going_out_of_scope(self): def create_collection_scope(): col = cr.UpdateCollection() col.name = "name" module = cr.UpdateCollectionModule() module.name = "kangaroo" module.stream = "0" module.version = 20180730223407 module.context = "deadbeef" module.arch = "noarch" col.module = module return col col = create_collection_scope() self.assertTrue(col) self.assertEqual(col.name, "name") # Check if the appended module was appended properly module = col.module self.assertEqual(module.name, "kangaroo") self.assertEqual(module.stream, "0") self.assertEqual(module.version, 20180730223407) self.assertEqual(module.context, "deadbeef") self.assertEqual(module.arch, "noarch") createrepo_c-0.17.0/tests/python/tests/test_updatecollectionmodule.py000066400000000000000000000016521400672373200262440ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateCollectionModule(unittest.TestCase): def test_updatecollectionmodule_setters(self): module = cr.UpdateCollectionModule() self.assertTrue(module) self.assertEqual(module.name, None) self.assertEqual(module.stream, None) self.assertEqual(module.version, 0) self.assertEqual(module.context, None) self.assertEqual(module.arch, None) module.name = "foo" module.stream = "0" module.version = 20180730223407 module.context = "deadbeef" module.arch = "noarch" self.assertEqual(module.name, "foo") self.assertEqual(module.stream, "0") self.assertEqual(module.version, 20180730223407) self.assertEqual(module.context, "deadbeef") self.assertEqual(module.arch, "noarch") createrepo_c-0.17.0/tests/python/tests/test_updatecollectionpackage.py000066400000000000000000000033341400672373200263510ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateCollectionPackage(unittest.TestCase): def test_updatecollectionpackage_setters(self): pkg = cr.UpdateCollectionPackage() self.assertTrue(pkg) self.assertEqual(pkg.name, None) self.assertEqual(pkg.version, None) self.assertEqual(pkg.release, None) self.assertEqual(pkg.epoch, None) self.assertEqual(pkg.arch, None) self.assertEqual(pkg.src, None) self.assertEqual(pkg.filename, None) self.assertEqual(pkg.sum, None) self.assertEqual(pkg.sum_type, 0) self.assertEqual(pkg.reboot_suggested, 0) self.assertEqual(pkg.restart_suggested, 0) self.assertEqual(pkg.relogin_suggested, 0) pkg.name = "foo" pkg.version = "1.2" pkg.release = "3" pkg.epoch = "0" pkg.arch = "x86" pkg.src = "foo.src.rpm" pkg.filename = "foo.rpm" pkg.sum = "abcdef" pkg.sum_type = cr.SHA1 pkg.reboot_suggested = True pkg.restart_suggested = True pkg.relogin_suggested = True self.assertEqual(pkg.name, "foo") self.assertEqual(pkg.version, "1.2") self.assertEqual(pkg.release, "3") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.arch, "x86") self.assertEqual(pkg.src, "foo.src.rpm") self.assertEqual(pkg.filename, "foo.rpm") self.assertEqual(pkg.sum, "abcdef") self.assertEqual(pkg.sum_type, cr.SHA1) self.assertEqual(pkg.reboot_suggested, True) self.assertEqual(pkg.restart_suggested, True) self.assertEqual(pkg.relogin_suggested, True) createrepo_c-0.17.0/tests/python/tests/test_updateinfo.py000066400000000000000000000313631400672373200236400ustar00rootroot00000000000000from datetime import datetime import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateInfo(unittest.TestCase): def test_updateinfo_setters(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) ui = cr.UpdateInfo() self.assertTrue(ui) self.assertEqual(ui.updates, []) rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.reboot_suggested = True ui.append(rec) self.assertEqual(len(ui.updates), 1) rec = ui.updates[0] self.assertEqual(rec.fromstr, "from") self.assertEqual(rec.status, "status") self.assertEqual(rec.type, "type") self.assertEqual(rec.version, "version") self.assertEqual(rec.id, "id") self.assertEqual(rec.title, "title") self.assertEqual(rec.issued_date, now) self.assertEqual(rec.updated_date, now) self.assertEqual(rec.rights, "rights") self.assertEqual(rec.release, "release") self.assertEqual(rec.pushcount, "pushcount") self.assertEqual(rec.severity, "severity") self.assertEqual(rec.summary, "summary") self.assertEqual(rec.description, "description") self.assertEqual(rec.solution, "solution") self.assertEqual(rec.reboot_suggested, True) self.assertEqual(len(rec.references), 0) self.assertEqual(len(rec.collections), 0) rec = cr.UpdateRecord() rec.issued_date = int(now.timestamp()) ui.append(rec) self.assertEqual(len(ui.updates), 2) rec = ui.updates[1] self.assertEqual(rec.issued_date, int(now.timestamp())) def test_updateinfo_getter(self): ui = cr.UpdateInfo(TEST_UPDATEINFO_03) self.assertTrue(ui) self.assertEqual(len(ui.updates), 6) rec = ui.updates[2] self.assertRaisesRegex(cr.CreaterepoCError, "Unable to parse updateinfo record date: 15mangled2", rec.__getattribute__, "issued_date") def test_updateinfo_xml_dump_01(self): ui = cr.UpdateInfo() xml = ui.xml_dump() self.assertEqual(xml, """\n\n""") def test_updateinfo_xml_dump_02(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) ui = cr.UpdateInfo() xml = ui.xml_dump() rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.reboot_suggested = True ui.append(rec) xml = ui.xml_dump() self.assertEqual(xml, """ id title rights release pushcount severity summary description solution True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S")}) def test_updateinfo_xml_dump_03(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) mod = cr.UpdateCollectionModule() mod.name = "kangaroo" mod.stream = "0" mod.version = 18446744073709551615 mod.context = "deadbeef" mod.arch = "x86" pkg = cr.UpdateCollectionPackage() pkg.name = "foo" pkg.version = "1.2" pkg.release = "3" pkg.epoch = "0" pkg.arch = "x86" pkg.src = "foo.src.rpm" pkg.filename = "foo.rpm" pkg.sum = "abcdef" pkg.sum_type = cr.SHA1 pkg.reboot_suggested = True pkg.restart_suggested = True pkg.relogin_suggested = True col = cr.UpdateCollection() col.shortname = "short name" col.name = "long name" col.module = mod col.append(pkg) ref = cr.UpdateReference() ref.href = "href" ref.id = "id" ref.type = "type" ref.title = "title" rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.append_collection(col) rec.append_reference(ref) ui = cr.UpdateInfo() ui.append(rec) xml = ui.xml_dump() self.assertEqual(xml, """ id title rights release pushcount severity summary description solution long name foo.rpm abcdef True True True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S")}) def test_updateinfo_xml_dump_04(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) pkg = cr.UpdateCollectionPackage() pkg.name = "foo" pkg.version = "1.2" pkg.release = "3" pkg.epoch = "0" pkg.arch = "x86" pkg.src = "foo.src.rpm" pkg.filename = "foo.rpm" pkg.sum = "abcdef" pkg.sum_type = cr.SHA1 pkg.reboot_suggested = True # Collection without module col = cr.UpdateCollection() col.shortname = "short name" col.name = "long name" col.append(pkg) ref = cr.UpdateReference() ref.href = "href" ref.id = "id" ref.type = "type" ref.title = "title" rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.append_collection(col) rec.append_reference(ref) ui = cr.UpdateInfo() ui.append(rec) xml = ui.xml_dump() self.assertEqual(xml, """ id title rights release pushcount severity summary description solution long name foo.rpm abcdef True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S")}) def test_updateinfo_xml_dump_05(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) # Collection module with unset fields mod = cr.UpdateCollectionModule() mod.version = 18446744073709551615 mod.context = "deadbeef" mod.arch = "x86" pkg = cr.UpdateCollectionPackage() pkg.name = "foo" pkg.version = "1.2" pkg.release = "3" pkg.epoch = "0" pkg.arch = "x86" pkg.src = "foo.src.rpm" pkg.filename = "foo.rpm" pkg.sum = "abcdef" pkg.sum_type = cr.SHA1 pkg.reboot_suggested = True pkg.restart_suggested = True pkg.relogin_suggested = True col = cr.UpdateCollection() col.shortname = "short name" col.name = "long name" col.module = mod col.append(pkg) ref = cr.UpdateReference() ref.href = "href" ref.id = "id" ref.type = "type" ref.title = "title" rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = int(now.timestamp()) rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.reboot_suggested = True rec.append_collection(col) rec.append_reference(ref) ui = cr.UpdateInfo() ui.append(rec) xml = ui.xml_dump() self.assertEqual(xml, """ id title rights release pushcount severity summary description solution True long name foo.rpm abcdef True True True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S"), "now_epoch": now.strftime('%s')}) createrepo_c-0.17.0/tests/python/tests/test_updaterecord.py000066400000000000000000000142531400672373200241620ustar00rootroot00000000000000from datetime import datetime import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateRecord(unittest.TestCase): def test_updaterecord_setters(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) rec = cr.UpdateRecord() self.assertTrue(rec) self.assertEqual(rec.fromstr, None) self.assertEqual(rec.status, None) self.assertEqual(rec.type, None) self.assertEqual(rec.version, None) self.assertEqual(rec.id, None) self.assertEqual(rec.title, None) self.assertEqual(rec.issued_date, None) self.assertEqual(rec.updated_date, None) self.assertEqual(rec.rights, None) self.assertEqual(rec.release, None) self.assertEqual(rec.pushcount, None) self.assertEqual(rec.severity, None) self.assertEqual(rec.summary, None) self.assertEqual(rec.description, None) self.assertEqual(rec.reboot_suggested, 0) self.assertEqual(rec.solution, None) self.assertEqual(rec.references, []) self.assertEqual(rec.collections, []) ref = cr.UpdateReference() ref.href = "href" ref.id = "id" ref.type = "type" ref.title = "title" col = cr.UpdateCollection() col.shortname = "short name" col.name = "long name" rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.reboot_suggested = True rec.solution = "solution" rec.append_reference(ref) rec.append_collection(col) self.assertEqual(rec.fromstr, "from") self.assertEqual(rec.status, "status") self.assertEqual(rec.type, "type") self.assertEqual(rec.version, "version") self.assertEqual(rec.id, "id") self.assertEqual(rec.title, "title") self.assertEqual(rec.issued_date, now) self.assertEqual(rec.updated_date, now) self.assertEqual(rec.rights, "rights") self.assertEqual(rec.release, "release") self.assertEqual(rec.pushcount, "pushcount") self.assertEqual(rec.severity, "severity") self.assertEqual(rec.summary, "summary") self.assertEqual(rec.reboot_suggested, True) self.assertEqual(rec.description, "description") self.assertEqual(rec.solution, "solution") self.assertEqual(len(rec.references), 1) self.assertEqual(len(rec.collections), 1) ref = rec.references[0] self.assertEqual(ref.href, "href") self.assertEqual(ref.id, "id") self.assertEqual(ref.type, "type") self.assertEqual(ref.title, "title") col = rec.collections[0] self.assertEqual(col.shortname, "short name") self.assertEqual(col.name, "long name") self.assertEqual(len(col.packages), 0) def test_xml_dump_updaterecord(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.updated_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.reboot_suggested = True xml = cr.xml_dump_updaterecord(rec) self.assertEqual(xml, """ id title rights release pushcount severity summary description solution True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S")}) def test_xml_dump_updaterecord_no_updated_date(self): now = datetime.now() # Microseconds are always 0 in updateinfo now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second, 0) rec = cr.UpdateRecord() rec.fromstr = "from" rec.status = "status" rec.type = "type" rec.version = "version" rec.id = "id" rec.title = "title" rec.issued_date = now rec.rights = "rights" rec.release = "release" rec.pushcount = "pushcount" rec.severity = "severity" rec.summary = "summary" rec.description = "description" rec.solution = "solution" rec.reboot_suggested = True target_xml = \ """ id title rights release pushcount severity summary description solution True """ % {"now": now.strftime("%Y-%m-%d %H:%M:%S")} xml = cr.xml_dump_updaterecord(rec) self.assertEqual(xml, target_xml) # Setting it to None is the same as not setting it at all rec.updated_date = None xml = cr.xml_dump_updaterecord(rec) self.assertEqual(xml, target_xml) createrepo_c-0.17.0/tests/python/tests/test_updatereference.py000066400000000000000000000013061400672373200246350ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseUpdateReference(unittest.TestCase): def test_updatereference_setters(self): ref = cr.UpdateReference() self.assertTrue(ref) self.assertEqual(ref.href, None) self.assertEqual(ref.id, None) self.assertEqual(ref.type, None) self.assertEqual(ref.title, None) ref.href = "href" ref.id = "id" ref.type = "type" ref.title = "title" self.assertEqual(ref.href, "href") self.assertEqual(ref.id, "id") self.assertEqual(ref.type, "type") self.assertEqual(ref.title, "title") createrepo_c-0.17.0/tests/python/tests/test_version.py000066400000000000000000000004751400672373200231670ustar00rootroot00000000000000import unittest import createrepo_c as cr from . import fixtures class TestCaseVersion(unittest.TestCase): def test_version(self): self.assertTrue(isinstance(cr.VERSION_MAJOR, int)); self.assertTrue(isinstance(cr.VERSION_MINOR, int)); self.assertTrue(isinstance(cr.VERSION_PATCH, int)); createrepo_c-0.17.0/tests/python/tests/test_xml_file.py000066400000000000000000000335211400672373200232770ustar00rootroot00000000000000import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseXmlFile(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix="createrepo_ctest-") def tearDown(self): shutil.rmtree(self.tmpdir) def test_xmlfile_basic_operations(self): pri = cr.XmlFile(self.tmpdir+"/primary.xml.gz", cr.XMLFILE_PRIMARY, cr.GZ_COMPRESSION, None) self.assertTrue(pri) self.assertTrue(os.path.isfile(self.tmpdir+"/primary.xml.gz")) fil = cr.XmlFile(self.tmpdir+"/filelists.xml.gz", cr.XMLFILE_FILELISTS, cr.GZ_COMPRESSION, None) self.assertTrue(fil) self.assertTrue(os.path.isfile(self.tmpdir+"/filelists.xml.gz")) oth = cr.XmlFile(self.tmpdir+"/other.xml.gz", cr.XMLFILE_OTHER, cr.GZ_COMPRESSION, None) self.assertTrue(oth) self.assertTrue(os.path.isfile(self.tmpdir+"/other.xml.gz")) def test_xmlfile_operations_on_closed_file(self): # Already closed file path = os.path.join(self.tmpdir, "primary.xml.gz") pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) f = cr.PrimaryXmlFile(path, cr.GZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() self.assertRaises(cr.CreaterepoCError, f.set_num_of_pkgs, 1) self.assertRaises(cr.CreaterepoCError, f.add_pkg, pkg) self.assertRaises(cr.CreaterepoCError, f.add_chunk, "text") f.close() # No error should be raised del(f) # No error should be raised def test_xmlfile_error_cases(self): path = os.path.join(self.tmpdir, "foofile") self.assertFalse(os.path.exists(path)) # Bad file type self.assertRaises(ValueError, cr.XmlFile, path, 86, cr.GZ_COMPRESSION, None) self.assertFalse(os.path.exists(path)) # Bad compression type self.assertRaises(ValueError, cr.XmlFile, path, cr.XMLFILE_PRIMARY, 678, None) self.assertFalse(os.path.exists(path)) # Bad contentstat object self.assertRaises(TypeError, cr.XmlFile, path, cr.XMLFILE_PRIMARY, cr.GZ_COMPRESSION, "foo") self.assertFalse(os.path.exists(path)) # Non existing path self.assertRaises(cr.CreaterepoCError, cr.PrimaryXmlFile, "foobar/foo/xxx/cvydmaticxuiowe") # Already existing file open(path, "w").write("foobar") self.assertRaises(IOError, cr.PrimaryXmlFile, path) def test_xmlfile_no_compression(self): path = os.path.join(self.tmpdir, "primary.xml") f = cr.PrimaryXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() content = open(path).read() self.assertEqual(content, """ """) def test_xmlfile_gz_compression(self): path = os.path.join(self.tmpdir, "primary.xml.gz") f = cr.PrimaryXmlFile(path, cr.GZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() import gzip content = gzip.open(path).read().decode('utf-8') self.assertEqual(content, """ """) def test_xmlfile_bz2_compression(self): path = os.path.join(self.tmpdir, "primary.xml.bz2") f = cr.PrimaryXmlFile(path, cr.BZ2_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() import bz2 content = bz2.decompress(open(path, 'rb').read()).decode('utf-8') self.assertEqual(content, """ """) def test_xmlfile_xz_compression(self): path = os.path.join(self.tmpdir, "primary.xml.xz") f = cr.PrimaryXmlFile(path, cr.XZ_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() import subprocess p = subprocess.Popen(["unxz", "--stdout", path], stdout=subprocess.PIPE) content = p.stdout.read().decode('utf-8') self.assertEqual(content, """ """) def test_xmlfile_zck_compression(self): if cr.HAS_ZCK == 0: return path = os.path.join(self.tmpdir, "primary.xml.zck") f = cr.PrimaryXmlFile(path, cr.ZCK_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.close() import subprocess p = subprocess.Popen(["unzck", "--stdout", path], stdout=subprocess.PIPE) content = p.stdout.read().decode('utf-8') self.assertEqual(content, """ """) def test_xmlfile_set_num_of_pkgs(self): path = os.path.join(self.tmpdir, "primary.xml") f = cr.PrimaryXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.set_num_of_pkgs(22) f.close() content = open(path).read() self.assertEqual(content, """ """) def test_xmlfile_add_pkg(self): pkg = cr.package_from_rpm(PKG_ARCHER_PATH) self.assertTrue(pkg) pkg.time_file = 111 # Primary path = os.path.join(self.tmpdir, "primary.xml") f = cr.PrimaryXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_pkg(pkg) self.assertRaises(TypeError, f.add_pkg, None) self.assertRaises(TypeError, f.add_pkg, 123) self.assertRaises(TypeError, f.add_pkg, "foo") self.assertRaises(TypeError, f.add_pkg, [456]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ Archer x86_64 4e0b775220c67f0f2c1fd2177e626b9c863a098130224ff09778ede25cea9a9e Complex package. Archer package Sterling Archer http://soo_complex_package.eu/ """) # Filelists path = os.path.join(self.tmpdir, "filelists.xml") f = cr.FilelistsXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_pkg(pkg) self.assertRaises(TypeError, f.add_pkg, None) self.assertRaises(TypeError, f.add_pkg, 123) self.assertRaises(TypeError, f.add_pkg, "foo") self.assertRaises(TypeError, f.add_pkg, [456]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ /usr/bin/complex_a /usr/share/doc/Archer-3.4.5 /usr/share/doc/Archer-3.4.5/README """) # Other path = os.path.join(self.tmpdir, "other.xml") f = cr.OtherXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_pkg(pkg) self.assertRaises(TypeError, f.add_pkg, None) self.assertRaises(TypeError, f.add_pkg, 123) self.assertRaises(TypeError, f.add_pkg, "foo") self.assertRaises(TypeError, f.add_pkg, [456]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ - First changelog. - That was totally ninja! - 3. changelog. """) def test_xmlfile_add_chunk(self): chunk = " Some XML chunk\n" # Primary path = os.path.join(self.tmpdir, "primary.xml") f = cr.PrimaryXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_chunk(chunk) self.assertRaises(TypeError, f.add_chunk, None) self.assertRaises(TypeError, f.add_chunk, 123) self.assertRaises(TypeError, f.add_chunk, [1]) self.assertRaises(TypeError, f.add_chunk, ["foo"]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ Some XML chunk """) # Filelists path = os.path.join(self.tmpdir, "filelists.xml") f = cr.FilelistsXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_chunk(chunk) self.assertRaises(TypeError, f.add_chunk, None) self.assertRaises(TypeError, f.add_chunk, 123) self.assertRaises(TypeError, f.add_chunk, [1]) self.assertRaises(TypeError, f.add_chunk, ["foo"]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ Some XML chunk """) # Other path = os.path.join(self.tmpdir, "other.xml") f = cr.OtherXmlFile(path, cr.NO_COMPRESSION) self.assertTrue(f) self.assertTrue(os.path.isfile(path)) f.add_chunk(chunk) self.assertRaises(TypeError, f.add_chunk, None) self.assertRaises(TypeError, f.add_chunk, 123) self.assertRaises(TypeError, f.add_chunk, [1]) self.assertRaises(TypeError, f.add_chunk, ["foo"]) f.close() self.assertTrue(os.path.isfile(path)) self.assertEqual(open(path).read(), """ Some XML chunk """) createrepo_c-0.17.0/tests/python/tests/test_xml_parser.py000066400000000000000000001060651400672373200236600ustar00rootroot00000000000000import re import unittest import shutil import tempfile import os.path import createrepo_c as cr from .fixtures import * class TestCaseXmlParserPrimary(unittest.TestCase): def test_xml_parser_primary_repo01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_primary(REPO_01_PRIXML, newpkgcb, pkgcb, warningcb, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, "Test package") self.assertEqual(pkg.description, "This package has provides, requires, obsoletes, conflicts options.") self.assertEqual(pkg.url, "http://so_super_kernel.com/it_is_awesome/yep_it_really_is") self.assertEqual(pkg.time_file, 1334667003) self.assertEqual(pkg.time_build, 1334667003) self.assertEqual(pkg.rpm_license, "LGPLv2") self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, "Applications/System") self.assertEqual(pkg.rpm_buildhost, "localhost.localdomain") self.assertEqual(pkg.rpm_sourcerpm, "super_kernel-6.0.1-2.src.rpm") self.assertEqual(pkg.rpm_header_start, 280) self.assertEqual(pkg.rpm_header_end, 2637) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 2845) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 404) self.assertEqual(pkg.location_href, "super_kernel-6.0.1-2.x86_64.rpm") self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, "sha256") self.assertEqual(pkg.requires, [('bzip2', 'GE', '0', '1.0.0', None, True), ('expat', None, None, None, None, True), ('glib', 'GE', '0', '2.26.0', None, False), ('zlib', None, None, None, None, False)]) self.assertEqual(pkg.provides, [('not_so_super_kernel', 'LT', '0', '5.8.0', None, False), ('super_kernel', 'EQ', '0', '6.0.0', None, False), ('super_kernel', 'EQ', '0', '6.0.1', '2', False), ('super_kernel(x86-64)', 'EQ', '0', '6.0.1', '2', False)]) self.assertEqual(pkg.conflicts, [('kernel', None, None, None, None, False), ('super_kernel', 'EQ', '0', '5.0.0', None, False), ('super_kernel', 'LT', '0', '4.0.0', None, False)]) self.assertEqual(pkg.obsoletes, [('kernel', None, None, None, None, False), ('super_kernel', 'EQ', '0', '5.9.0', None, False)]) self.assertEqual(pkg.files, [(None, '/usr/bin/', 'super_kernel')]) self.assertEqual(pkg.changelogs, []) def test_xml_parser_primary_snippet01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(PRIMARY_SNIPPET_01).read() cr.xml_parse_primary_snippet(content, newpkgcb, pkgcb, warningcb, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, "Test package") self.assertEqual(pkg.description, "This package has provides, requires, obsoletes, conflicts options.") self.assertEqual(pkg.url, "http://so_super_kernel.com/it_is_awesome/yep_it_really_is") self.assertEqual(pkg.time_file, 1334667003) self.assertEqual(pkg.time_build, 1334667003) self.assertEqual(pkg.rpm_license, "LGPLv2") self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, "Applications/System") self.assertEqual(pkg.rpm_buildhost, "localhost.localdomain") self.assertEqual(pkg.rpm_sourcerpm, "super_kernel-6.0.1-2.src.rpm") self.assertEqual(pkg.rpm_header_start, 280) self.assertEqual(pkg.rpm_header_end, 2637) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 2845) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 404) self.assertEqual(pkg.location_href, "super_kernel-6.0.1-2.x86_64.rpm") self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, "sha256") self.assertEqual(pkg.requires, [('bzip2', 'GE', '0', '1.0.0', None, True), ('expat', None, None, None, None, True), ('glib', 'GE', '0', '2.26.0', None, False), ('zlib', None, None, None, None, False)]) self.assertEqual(pkg.provides, [('not_so_super_kernel', 'LT', '0', '5.8.0', None, False), ('super_kernel', 'EQ', '0', '6.0.0', None, False), ('super_kernel', 'EQ', '0', '6.0.1', '2', False), ('super_kernel(x86-64)', 'EQ', '0', '6.0.1', '2', False)]) self.assertEqual(pkg.conflicts, [('kernel', None, None, None, None, False), ('super_kernel', 'EQ', '0', '5.0.0', None, False), ('super_kernel', 'LT', '0', '4.0.0', None, False)]) self.assertEqual(pkg.obsoletes, [('kernel', None, None, None, None, False), ('super_kernel', 'EQ', '0', '5.9.0', None, False)]) self.assertEqual(pkg.files, [(None, '/usr/bin/', 'super_kernel')]) self.assertEqual(pkg.changelogs, []) def test_xml_parser_primary_repo02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(PRIMARY_SNIPPET_02).read() cr.xml_parse_primary_snippet(content, newpkgcb, pkgcb, warningcb, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_primary_snippet02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_primary(REPO_02_PRIXML, newpkgcb, pkgcb, warningcb, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_primary_repo02_only_pkgcb(self): pkgs = [] def pkgcb(pkg): pkgs.append(pkg) cr.xml_parse_primary(REPO_02_PRIXML, None, pkgcb, None, 1) self.assertEqual([pkg.name for pkg in pkgs], ['fake_bash', 'super_kernel']) def test_xml_parser_primary_repo02_no_cbs(self): self.assertRaises(ValueError, cr.xml_parse_primary, REPO_02_PRIXML, None, None, None, 1) def test_xml_parser_primary_warnings(self): userdata = { "pkgs": [], "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_primary(PRIMARY_MULTI_WARN_00_PATH, newpkgcb, None, warningcb, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["warnings"], [(0, 'Unknown element "fooelement"'), (1, 'Missing attribute "type" of a package element'), (0, 'Unknown element "foo"'), (3, 'Conversion of "foobar" to integer failed'), (2, 'Unknown file type "xxx"'), (0, 'Unknown element "bar"')]) def test_xml_parser_primary_error(self): userdata = { "pkgs": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg self.assertRaises(cr.CreaterepoCError, cr.xml_parse_primary, PRIMARY_ERROR_00_PATH, newpkgcb, None, None, 1) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash']) def test_xml_parser_primary_newpkgcb_abort(self): def newpkgcb(pkgId, name, arch): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_primary, REPO_02_PRIXML, newpkgcb, None, None, 1) def test_xml_parser_primary_pkgcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def pkgcb(pkg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_primary, REPO_02_PRIXML, newpkgcb, pkgcb, None, 1) def test_xml_parser_primary_warningcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def warningcb(type, msg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_primary, PRIMARY_MULTI_WARN_00_PATH, newpkgcb, None, warningcb, 1) class TestCaseXmlParserFilelists(unittest.TestCase): def test_xml_parser_filelists_repo01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_filelists(REPO_01_FILXML, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, None) self.assertEqual(pkg.description, None) self.assertEqual(pkg.url, None) self.assertEqual(pkg.time_file, 0) self.assertEqual(pkg.time_build, 0) self.assertEqual(pkg.rpm_license, None) self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, None) self.assertEqual(pkg.rpm_buildhost, None) self.assertEqual(pkg.rpm_sourcerpm, None) self.assertEqual(pkg.rpm_header_start, 0) self.assertEqual(pkg.rpm_header_end, 0) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 0) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 0) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, None) self.assertEqual(pkg.requires, []) self.assertEqual(pkg.provides, []) self.assertEqual(pkg.conflicts, []) self.assertEqual(pkg.obsoletes, []) self.assertEqual(pkg.files, [(None, '/usr/bin/', 'super_kernel'), (None, '/usr/share/man/', 'super_kernel.8.gz')]) self.assertEqual(pkg.changelogs, []) def test_xml_parser_filelists_snippet01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(FILELISTS_SNIPPET_01).read() cr.xml_parse_filelists_snippet(content, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, None) self.assertEqual(pkg.description, None) self.assertEqual(pkg.url, None) self.assertEqual(pkg.time_file, 0) self.assertEqual(pkg.time_build, 0) self.assertEqual(pkg.rpm_license, None) self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, None) self.assertEqual(pkg.rpm_buildhost, None) self.assertEqual(pkg.rpm_sourcerpm, None) self.assertEqual(pkg.rpm_header_start, 0) self.assertEqual(pkg.rpm_header_end, 0) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 0) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 0) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, None) self.assertEqual(pkg.requires, []) self.assertEqual(pkg.provides, []) self.assertEqual(pkg.conflicts, []) self.assertEqual(pkg.obsoletes, []) self.assertEqual(pkg.files, [(None, '/usr/bin/', 'super_kernel'), (None, '/usr/share/man/', 'super_kernel.8.gz')]) self.assertEqual(pkg.changelogs, []) def test_xml_parser_filelists_repo02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_filelists(REPO_02_FILXML, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_filelists_snippet02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(FILELISTS_SNIPPET_02).read() cr.xml_parse_filelists_snippet(content, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_filelists_snippet_huge(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) # generete huge filelists snippet content = """ """ for i in range(145951): content += "/usr/share/icons/Flat-Remix-Yellow/status/symbolic/user-available-symbolic.svg" content += "" cr.xml_parse_filelists_snippet(content, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['flat-remix-icon-theme']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) def test_xml_parser_filelists_repo02_only_pkgcb(self): pkgs = [] def pkgcb(pkg): pkgs.append(pkg) cr.xml_parse_filelists(REPO_02_FILXML, None, pkgcb, None) self.assertEqual([pkg.name for pkg in pkgs], ['fake_bash', 'super_kernel']) def test_xml_parser_filelists_repo02_no_cbs(self): self.assertRaises(ValueError, cr.xml_parse_filelists, REPO_02_FILXML, None, None, None) def test_xml_parser_filelists_warnings(self): userdata = { "pkgs": [], "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_filelists(FILELISTS_MULTI_WARN_00_PATH, newpkgcb, None, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["warnings"], [(1, 'Missing attribute "arch" of a package element'), (2, 'Unknown file type "xxx"'), (0, 'Unknown element "bar"')]) def test_xml_parser_filelists_error(self): userdata = { "pkgs": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg self.assertRaises(cr.CreaterepoCError, cr.xml_parse_filelists, FILELISTS_ERROR_00_PATH, newpkgcb, None, None) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], []) def test_xml_parser_filelists_newpkgcb_abort(self): def newpkgcb(pkgId, name, arch): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_filelists, REPO_02_FILXML, newpkgcb, None, None) def test_xml_parser_filelists_pkgcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def pkgcb(pkg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_filelists, REPO_02_FILXML, newpkgcb, pkgcb, None) def test_xml_parser_filelists_warningcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def warningcb(type, msg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_filelists, FILELISTS_MULTI_WARN_00_PATH, newpkgcb, None, warningcb) class TestCaseXmlParserOther(unittest.TestCase): def test_xml_parser_other_repo01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_other(REPO_01_OTHXML, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) self.assertEqual(userdata["warnings"], []) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, None) self.assertEqual(pkg.description, None) self.assertEqual(pkg.url, None) self.assertEqual(pkg.time_file, 0) self.assertEqual(pkg.time_build, 0) self.assertEqual(pkg.rpm_license, None) self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, None) self.assertEqual(pkg.rpm_buildhost, None) self.assertEqual(pkg.rpm_sourcerpm, None) self.assertEqual(pkg.rpm_header_start, 0) self.assertEqual(pkg.rpm_header_end, 0) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 0) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 0) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, None) self.assertEqual(pkg.requires, []) self.assertEqual(pkg.provides, []) self.assertEqual(pkg.conflicts, []) self.assertEqual(pkg.obsoletes, []) self.assertEqual(pkg.files, []) self.assertEqual(pkg.changelogs, [('Tomas Mlcoch - 6.0.1-1', 1334664000, '- First release'), ('Tomas Mlcoch - 6.0.1-2', 1334664001, '- Second release')]) def test_xml_parser_other_snippet01(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(OTHER_SNIPPET_01).read() cr.xml_parse_other_snippet(content, newpkgcb, pkgcb, warningcb) self.assertEqual(userdata["warnings"], []) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 1) pkg = userdata["pkgs"][0] self.assertEqual(pkg.pkgId, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf") self.assertEqual(pkg.name, "super_kernel") self.assertEqual(pkg.arch, "x86_64") self.assertEqual(pkg.version, "6.0.1") self.assertEqual(pkg.epoch, "0") self.assertEqual(pkg.release, "2") self.assertEqual(pkg.summary, None) self.assertEqual(pkg.description, None) self.assertEqual(pkg.url, None) self.assertEqual(pkg.time_file, 0) self.assertEqual(pkg.time_build, 0) self.assertEqual(pkg.rpm_license, None) self.assertEqual(pkg.rpm_vendor, None) self.assertEqual(pkg.rpm_group, None) self.assertEqual(pkg.rpm_buildhost, None) self.assertEqual(pkg.rpm_sourcerpm, None) self.assertEqual(pkg.rpm_header_start, 0) self.assertEqual(pkg.rpm_header_end, 0) self.assertEqual(pkg.rpm_packager, None) self.assertEqual(pkg.size_package, 0) self.assertEqual(pkg.size_installed, 0) self.assertEqual(pkg.size_archive, 0) self.assertEqual(pkg.location_href, None) self.assertEqual(pkg.location_base, None) self.assertEqual(pkg.checksum_type, None) self.assertEqual(pkg.requires, []) self.assertEqual(pkg.provides, []) self.assertEqual(pkg.conflicts, []) self.assertEqual(pkg.obsoletes, []) self.assertEqual(pkg.files, []) self.assertEqual(pkg.changelogs, [('Tomas Mlcoch - 6.0.1-1', 1334664000, '- First release'), ('Tomas Mlcoch - 6.0.1-2', 1334664001, '- Second release')]) def test_xml_parser_other_repo02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) content = open(OTHER_SNIPPET_02).read() cr.xml_parse_other_snippet(content, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_other_snippet02(self): userdata = { "pkgs": [], "pkgcb_calls": 0, "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def pkgcb(pkg): userdata["pkgcb_calls"] += 1 def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_other(REPO_02_OTHXML, newpkgcb, pkgcb, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], ['fake_bash', 'super_kernel']) self.assertEqual(userdata["pkgcb_calls"], 2) self.assertEqual(userdata["warnings"], []) def test_xml_parser_other_repo02_only_pkgcb(self): pkgs = [] def pkgcb(pkg): pkgs.append(pkg) cr.xml_parse_other(REPO_02_OTHXML, None, pkgcb, None) self.assertEqual([pkg.name for pkg in pkgs], ['fake_bash', 'super_kernel']) def test_xml_parser_other_repo02_no_cbs(self): self.assertRaises(ValueError, cr.xml_parse_other, REPO_02_OTHXML, None, None, None) def test_xml_parser_other_warnings(self): userdata = { "pkgs": [], "warnings": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg def warningcb(warn_type, msg): userdata["warnings"].append((warn_type, msg)) cr.xml_parse_other(OTHER_MULTI_WARN_00_PATH, newpkgcb, None, warningcb) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], [None, 'super_kernel']) self.assertEqual(userdata["warnings"], [(1, 'Missing attribute "name" of a package element'), (0, 'Unknown element "bar"'), (3, 'Conversion of "xxx" to integer failed')]) def test_xml_parser_other_error(self): userdata = { "pkgs": [] } def newpkgcb(pkgId, name, arch): pkg = cr.Package() userdata["pkgs"].append(pkg) return pkg self.assertRaises(cr.CreaterepoCError, cr.xml_parse_other, OTHER_ERROR_00_PATH, newpkgcb, None, None) self.assertEqual([pkg.name for pkg in userdata["pkgs"]], []) def test_xml_parser_other_newpkgcb_abort(self): def newpkgcb(pkgId, name, arch): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_other, REPO_02_OTHXML, newpkgcb, None, None) def test_xml_parser_other_pkgcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def pkgcb(pkg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_other, REPO_02_OTHXML, newpkgcb, pkgcb, None) def test_xml_parser_other_warningcb_abort(self): def newpkgcb(pkgId, name, arch): return cr.Package() def warningcb(type, msg): raise Error("Foo error") self.assertRaises(cr.CreaterepoCError, cr.xml_parse_other, OTHER_MULTI_WARN_00_PATH, newpkgcb, None, warningcb) class TestCaseXmlParserRepomd(unittest.TestCase): def test_xml_parser_repomd_bad_repomd_object(self): self.assertRaises(TypeError, cr.xml_parse_repomd, REPO_01_REPOMD, "foo", None) def test_xml_parser_repomd_repo01(self): warnings = [] def warningcb(warn_type, msg): warnings.append((warn_type, msg)) repomd = cr.Repomd() cr.xml_parse_repomd(REPO_01_REPOMD, repomd, warningcb) self.assertEqual(warnings, []) self.assertEqual(repomd.revision, "1334667230") self.assertEqual(repomd.repo_tags, []) self.assertEqual(repomd.distro_tags, []) self.assertEqual(repomd.content_tags, []) self.assertEqual(len(repomd.records), 3) self.assertEqual(repomd.records[0].type, "filelists") self.assertEqual(repomd.records[0].location_real, None) self.assertEqual(repomd.records[0].location_href, "repodata/c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c-filelists.xml.gz") self.assertEqual(repomd.records[0].checksum, "c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c") self.assertEqual(repomd.records[0].checksum_type, "sha256") self.assertEqual(repomd.records[0].checksum_open, "85bc611be5d81ac8da2fe01e98ef741d243d1518fcc46ada70660020803fbf09") self.assertEqual(repomd.records[0].checksum_open_type, "sha256") self.assertEqual(repomd.records[0].timestamp, 1334667230) self.assertEqual(repomd.records[0].size, 273) self.assertEqual(repomd.records[0].size_open, 389) self.assertEqual(repomd.records[0].db_ver, 0) self.assertEqual(repomd.records[1].type, "other") self.assertEqual(repomd.records[1].location_real, None) self.assertEqual(repomd.records[1].location_href, "repodata/b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz") self.assertEqual(repomd.records[1].checksum, "b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef") self.assertEqual(repomd.records[1].checksum_type, "sha256") self.assertEqual(repomd.records[1].checksum_open, "da6096c924349af0c326224a33be0cdb26897fbe3d25477ac217261652449445") self.assertEqual(repomd.records[1].checksum_open_type, "sha256") self.assertEqual(repomd.records[1].timestamp, 1334667230) self.assertEqual(repomd.records[1].size, 332) self.assertEqual(repomd.records[1].size_open, 530) self.assertEqual(repomd.records[1].db_ver, 0) self.assertEqual(repomd.records[2].type, "primary") self.assertEqual(repomd.records[2].location_real, None) self.assertEqual(repomd.records[2].location_href, "repodata/6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz") self.assertEqual(repomd.records[2].checksum, "6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e") self.assertEqual(repomd.records[2].checksum_type, "sha256") self.assertEqual(repomd.records[2].checksum_open, "0fc6cadf97d515e87491d24dc9712d8ddaf2226a21ae7f131ff42d71a877c496") self.assertEqual(repomd.records[2].checksum_open_type, "sha256") self.assertEqual(repomd.records[2].timestamp, 1334667230) self.assertEqual(repomd.records[2].size, 782) self.assertEqual(repomd.records[2].size_open, 2085) self.assertEqual(repomd.records[2].db_ver, 0) def test_xml_parser_repomd_repo01_nowarningcb(self): repomd = cr.Repomd() cr.xml_parse_repomd(REPO_01_REPOMD, repomd) self.assertEqual(repomd.revision, "1334667230") self.assertEqual(repomd.repo_tags, []) self.assertEqual(repomd.distro_tags, []) self.assertEqual(repomd.content_tags, []) self.assertEqual(len(repomd.records), 3) createrepo_c-0.17.0/tests/run_gtester.sh.in000077500000000000000000000013421400672373200207050ustar00rootroot00000000000000#!/bin/bash BINDIR="${CMAKE_BINARY_DIR}/tests/" RET=0 # Next line is a hack # Builds for epel6 doesn't have rpath setted and # tests fails with a "libcreaterepo_c.so.0: cannot open shared # object file: No such file or directory" error message. export "LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/src/:" function runtest { gtester --verbose --keep-going "$1" if [ $? -ne 0 ]; then RET=$(($RET+1)) ; fi } # Go to source dir (so test would be able to use testdata/ in this dir) cd ${CMAKE_CURRENT_SOURCE_DIR} # Iterate over compiled tests for i in "$BINDIR"/test_*; do # Run only executable regular files with "test_" prefix if [ -f $i -a -x $i ]; then runtest "$i" fi done echo "Number of fails: $RET" exit $RET createrepo_c-0.17.0/tests/test_checksum.c000066400000000000000000000123511400672373200204070ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include "fixtures.h" #include "createrepo/checksum.h" static void test_cr_checksum_file(void) { char *checksum; GError *tmp_err = NULL; checksum = cr_checksum_file(TEST_EMPTY_FILE, CR_CHECKSUM_MD5, NULL); g_assert_cmpstr(checksum, ==, "d41d8cd98f00b204e9800998ecf8427e"); g_free(checksum); checksum = cr_checksum_file(TEST_EMPTY_FILE, CR_CHECKSUM_SHA1, NULL); g_assert_cmpstr(checksum, ==, "da39a3ee5e6b4b0d3255bfef95601890afd80709"); g_free(checksum); checksum = cr_checksum_file(TEST_EMPTY_FILE, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649" "b934ca495991b7852b855"); g_free(checksum); checksum = cr_checksum_file(TEST_EMPTY_FILE, CR_CHECKSUM_SHA512, NULL); g_assert_cmpstr(checksum, ==, "cf83e1357eefb8bdf1542850d66d8007d620e4050b5" "715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd4741" "7a81a538327af927da3e"); g_free(checksum); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_MD5, &tmp_err); g_assert_cmpstr(checksum, ==, "d6d4da5c15f8fe7570ce6ab6b3503916"); g_assert(!tmp_err); g_free(checksum); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_SHA1, &tmp_err); g_assert_cmpstr(checksum, ==, "da048ee8fabfbef1b3d6d3f5a4be20029eecec77"); g_assert(!tmp_err); g_free(checksum); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_SHA256, &tmp_err); g_assert_cmpstr(checksum, ==, "2f395bdfa2750978965e4781ddf224c89646c7d7a15" "69b7ebb023b170f7bd8bb"); g_assert(!tmp_err); g_free(checksum); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_SHA512, &tmp_err); g_assert_cmpstr(checksum, ==, "6ef7c2fd003614033aab59a65164c897fd150cfa855" "1f2dd66828cc7a4d16afc3a35890f342eeaa424c1270fa8bbb4b792875b9deb34" "cd78ab9ded1c360de45c"); g_assert(!tmp_err); g_free(checksum); checksum = cr_checksum_file(TEST_BINARY_FILE, CR_CHECKSUM_MD5, NULL); g_assert_cmpstr(checksum, ==, "4f8b033d7a402927a20c9328fc0e0f46"); g_free(checksum); checksum = cr_checksum_file(TEST_BINARY_FILE, CR_CHECKSUM_SHA1, NULL); g_assert_cmpstr(checksum, ==, "3539fb660a41846352ac4fa9076d168a3c77070b"); g_free(checksum); checksum = cr_checksum_file(TEST_BINARY_FILE, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770" "b48f1a7282b015d6d883e"); g_free(checksum); checksum = cr_checksum_file(TEST_BINARY_FILE, CR_CHECKSUM_SHA512, NULL); g_assert_cmpstr(checksum, ==, "339877a8ce6cdb2df62f3f76c005cac4f50144197bd" "095cec21056d6ddde570fe5b16e3f1cd077ece799d5dd23dc6c9c1afed018384d" "840bd97233c320e60dfa"); g_free(checksum); // Corner cases checksum = cr_checksum_file(TEST_BINARY_FILE, 244, &tmp_err); g_assert(!checksum); g_assert(tmp_err); g_error_free(tmp_err); tmp_err = NULL; checksum = cr_checksum_file(NON_EXIST_FILE, CR_CHECKSUM_MD5, &tmp_err); g_assert(!checksum); g_assert(tmp_err); g_error_free(tmp_err); tmp_err = NULL; } static void test_cr_checksum_name_str(void) { const char *checksum_name; checksum_name = cr_checksum_name_str(CR_CHECKSUM_MD5); g_assert_cmpstr(checksum_name, ==, "md5"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA); g_assert_cmpstr(checksum_name, ==, "sha"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA1); g_assert_cmpstr(checksum_name, ==, "sha1"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA224); g_assert_cmpstr(checksum_name, ==, "sha224"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA256); g_assert_cmpstr(checksum_name, ==, "sha256"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA384); g_assert_cmpstr(checksum_name, ==, "sha384"); checksum_name = cr_checksum_name_str(CR_CHECKSUM_SHA512); g_assert_cmpstr(checksum_name, ==, "sha512"); checksum_name = cr_checksum_name_str(244); g_assert_cmpstr(checksum_name, ==, NULL); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/checksum/test_cr_checksum_file", test_cr_checksum_file); g_test_add_func("/checksum/test_cr_checksum_name_str", test_cr_checksum_name_str); return g_test_run(); } createrepo_c-0.17.0/tests/test_compression_wrapper.c000066400000000000000000000642571400672373200227220ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/misc.h" #include "createrepo/compression_wrapper.h" #define TMP_FILE_PATTERN "test_XXXXXX.txt" #define COMPRESSED_BUFFER_LEN 512 #define FILE_COMPRESSED_0_CONTENT "" #define FILE_COMPRESSED_0_CONTENT_LEN 0 #define FILE_COMPRESSED_0_PLAIN TEST_COMPRESSED_FILES_PATH"/00_plain.txt" #define FILE_COMPRESSED_0_GZ TEST_COMPRESSED_FILES_PATH"/00_plain.txt.gz" #define FILE_COMPRESSED_0_BZ2 TEST_COMPRESSED_FILES_PATH"/00_plain.txt.bz2" #define FILE_COMPRESSED_0_XZ TEST_COMPRESSED_FILES_PATH"/00_plain.txt.xz" #define FILE_COMPRESSED_0_PLAIN_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/00_plain.foo0" #define FILE_COMPRESSED_0_GZ_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/00_plain.foo1" #define FILE_COMPRESSED_0_BZ2_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/00_plain.foo2" #define FILE_COMPRESSED_0_XZ_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/00_plain.foo3" #define FILE_COMPRESSED_1_CONTENT "foobar foobar foobar foobar test test\nfolkjsaflkjsadokf\n" #define FILE_COMPRESSED_1_CONTENT_LEN 56 #define FILE_COMPRESSED_1_PLAIN TEST_COMPRESSED_FILES_PATH"/01_plain.txt" #define FILE_COMPRESSED_1_GZ TEST_COMPRESSED_FILES_PATH"/01_plain.txt.gz" #define FILE_COMPRESSED_1_BZ2 TEST_COMPRESSED_FILES_PATH"/01_plain.txt.bz2" #define FILE_COMPRESSED_1_XZ TEST_COMPRESSED_FILES_PATH"/01_plain.txt.xz" #define FILE_COMPRESSED_1_ZCK TEST_COMPRESSED_FILES_PATH"/01_plain.txt.zck" #define FILE_COMPRESSED_1_PLAIN_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/01_plain.foo0" #define FILE_COMPRESSED_1_GZ_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/01_plain.foo1" #define FILE_COMPRESSED_1_BZ2_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/01_plain.foo2" #define FILE_COMPRESSED_1_XZ_BAD_SUFFIX TEST_COMPRESSED_FILES_PATH"/01_plain.foo3" static void test_cr_contentstat(void) { cr_ContentStat *cs = NULL; GError *tmp_err = NULL; cs = cr_contentstat_new(CR_CHECKSUM_UNKNOWN, NULL); g_assert(cs); g_assert(!cs->checksum); cr_contentstat_free(cs, NULL); cs = NULL; cs = cr_contentstat_new(CR_CHECKSUM_UNKNOWN, &tmp_err); g_assert(cs); g_assert(!tmp_err); g_assert(!cs->checksum); cr_contentstat_free(cs, &tmp_err); g_assert(!tmp_err); cs = NULL; } static void test_cr_compression_suffix(void) { const char *suffix; suffix = cr_compression_suffix(CR_CW_AUTO_DETECT_COMPRESSION); g_assert(!suffix); suffix = cr_compression_suffix(CR_CW_UNKNOWN_COMPRESSION); g_assert(!suffix); suffix = cr_compression_suffix(CR_CW_NO_COMPRESSION); g_assert(!suffix); suffix = cr_compression_suffix(CR_CW_GZ_COMPRESSION); g_assert_cmpstr(suffix, ==, ".gz"); suffix = cr_compression_suffix(CR_CW_BZ2_COMPRESSION); g_assert_cmpstr(suffix, ==, ".bz2"); suffix = cr_compression_suffix(CR_CW_XZ_COMPRESSION); g_assert_cmpstr(suffix, ==, ".xz"); } static void test_cr_compression_type(void) { cr_CompressionType type; type = cr_compression_type(NULL); g_assert_cmpint(type, ==, CR_CW_UNKNOWN_COMPRESSION); type = cr_compression_type(""); g_assert_cmpint(type, ==, CR_CW_UNKNOWN_COMPRESSION); type = cr_compression_type("foo"); g_assert_cmpint(type, ==, CR_CW_UNKNOWN_COMPRESSION); type = cr_compression_type("gz"); g_assert_cmpint(type, ==, CR_CW_GZ_COMPRESSION); type = cr_compression_type("gzip"); g_assert_cmpint(type, ==, CR_CW_GZ_COMPRESSION); type = cr_compression_type("GZ"); g_assert_cmpint(type, ==, CR_CW_GZ_COMPRESSION); type = cr_compression_type("Gz"); g_assert_cmpint(type, ==, CR_CW_GZ_COMPRESSION); type = cr_compression_type("bz2"); g_assert_cmpint(type, ==, CR_CW_BZ2_COMPRESSION); type = cr_compression_type("bzip2"); g_assert_cmpint(type, ==, CR_CW_BZ2_COMPRESSION); type = cr_compression_type("xz"); g_assert_cmpint(type, ==, CR_CW_XZ_COMPRESSION); } static void test_cr_detect_compression(void) { cr_CompressionType ret; GError *tmp_err = NULL; // Plain ret = cr_detect_compression(FILE_COMPRESSED_0_PLAIN, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_NO_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_PLAIN, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_NO_COMPRESSION); g_assert(!tmp_err); // Gz ret = cr_detect_compression(FILE_COMPRESSED_0_GZ, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_GZ_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_GZ, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_GZ_COMPRESSION); g_assert(!tmp_err); // Bz2 ret = cr_detect_compression(FILE_COMPRESSED_0_BZ2, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_BZ2_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_BZ2, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_BZ2_COMPRESSION); g_assert(!tmp_err); // Xz ret = cr_detect_compression(FILE_COMPRESSED_0_XZ, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_XZ_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_XZ, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_XZ_COMPRESSION); g_assert(!tmp_err); } static void test_cr_detect_compression_bad_suffix(void) { cr_CompressionType ret; GError *tmp_err = NULL; // Plain ret = cr_detect_compression(FILE_COMPRESSED_0_PLAIN_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_NO_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_PLAIN_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_NO_COMPRESSION); g_assert(!tmp_err); // Gz ret = cr_detect_compression(FILE_COMPRESSED_0_GZ_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_GZ_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_GZ_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_GZ_COMPRESSION); g_assert(!tmp_err); // Bz2 ret = cr_detect_compression(FILE_COMPRESSED_0_BZ2_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_BZ2_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_BZ2_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_BZ2_COMPRESSION); g_assert(!tmp_err); // Xz ret = cr_detect_compression(FILE_COMPRESSED_0_XZ_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_XZ_COMPRESSION); g_assert(!tmp_err); ret = cr_detect_compression(FILE_COMPRESSED_1_XZ_BAD_SUFFIX, &tmp_err); g_assert_cmpint(ret, ==, CR_CW_XZ_COMPRESSION); g_assert(!tmp_err); } void test_helper_cw_input(const char *filename, cr_CompressionType ctype, const char *content, int len) { int ret; CR_FILE *file; char buffer[COMPRESSED_BUFFER_LEN+1]; GError *tmp_err = NULL; file = cr_open(filename, CR_CW_MODE_READ, ctype, &tmp_err); g_assert(file); g_assert(!tmp_err); ret = cr_read(file, buffer, COMPRESSED_BUFFER_LEN, &tmp_err); g_assert_cmpint(ret, ==, len); g_assert(!tmp_err); buffer[ret] = '\0'; g_assert_cmpstr(buffer, ==, content); ret = cr_close(file, &tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(!tmp_err); } static void test_cr_read_with_autodetection(void) { // Plain test_helper_cw_input(FILE_COMPRESSED_0_PLAIN, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_input(FILE_COMPRESSED_1_PLAIN, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Gz test_helper_cw_input(FILE_COMPRESSED_0_GZ, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_input(FILE_COMPRESSED_1_GZ, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Bzip2 test_helper_cw_input(FILE_COMPRESSED_0_BZ2, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_input(FILE_COMPRESSED_1_BZ2, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Xz test_helper_cw_input(FILE_COMPRESSED_0_XZ, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_input(FILE_COMPRESSED_1_XZ, CR_CW_AUTO_DETECT_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); } typedef struct { gchar *tmp_filename; } Outputtest; static void outputtest_setup(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data) { int fd; fd = g_file_open_tmp(TMP_FILE_PATTERN, &(outputtest->tmp_filename), NULL); close(fd); } static void outputtest_teardown(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data) { if (outputtest->tmp_filename) { remove(outputtest->tmp_filename); g_free(outputtest->tmp_filename); } } #define OUTPUT_TYPE_WRITE 0 #define OUTPUT_TYPE_PUTS 1 #define OUTPUT_TYPE_PRINTF 2 void test_helper_cw_output(int type, const char *filename, cr_CompressionType ctype, const char *content, int len) { int ret; CR_FILE *file; GError *tmp_err = NULL; file = cr_open(filename, CR_CW_MODE_WRITE, ctype, &tmp_err); g_assert(file); g_assert(!tmp_err); switch(type) { case OUTPUT_TYPE_WRITE: ret = cr_write(file, content, len, &tmp_err); g_assert_cmpint(ret, ==, len); g_assert(!tmp_err); break; case OUTPUT_TYPE_PUTS: ret = cr_puts(file, content, &tmp_err); g_assert_cmpint(ret, ==, len); g_assert(!tmp_err); break; case OUTPUT_TYPE_PRINTF: ret = cr_printf(&tmp_err, file, "%s", content); g_assert_cmpint(ret, ==, len); g_assert(!tmp_err); break; default: break; } ret = cr_close(file, &tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(!tmp_err); // Read and compare test_helper_cw_input(filename, ctype, content, len); } static void outputtest_cw_output(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data) { // Plain test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_NO_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Gz test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_GZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Bz2 test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_BZ2_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); // Xz test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_WRITE, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PUTS, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_0_CONTENT, FILE_COMPRESSED_0_CONTENT_LEN); test_helper_cw_output(OUTPUT_TYPE_PRINTF, outputtest->tmp_filename, CR_CW_XZ_COMPRESSION, FILE_COMPRESSED_1_CONTENT, FILE_COMPRESSED_1_CONTENT_LEN); } static void test_cr_error_handling(void) { GError *tmp_err = NULL; cr_CompressionType type; CR_FILE *f; type = cr_detect_compression("/filename/that/should/not/exists", &tmp_err); g_assert_cmpint(type, ==, CR_CW_UNKNOWN_COMPRESSION); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_NOFILE); g_error_free(tmp_err); tmp_err = NULL; type = cr_detect_compression("/", &tmp_err); g_assert_cmpint(type, ==, CR_CW_UNKNOWN_COMPRESSION); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_NOFILE); g_error_free(tmp_err); tmp_err = NULL; f = cr_open("/", CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, &tmp_err); g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_NOFILE); g_error_free(tmp_err); tmp_err = NULL; // Opening dir for writing f = cr_open("/", CR_CW_MODE_WRITE, CR_CW_NO_COMPRESSION, &tmp_err); g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_IO); g_error_free(tmp_err); tmp_err = NULL; f = cr_open("/", CR_CW_MODE_WRITE, CR_CW_GZ_COMPRESSION, &tmp_err); g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_GZ); g_error_free(tmp_err); tmp_err = NULL; f = cr_open("/", CR_CW_MODE_WRITE, CR_CW_BZ2_COMPRESSION, &tmp_err); g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_IO); g_error_free(tmp_err); tmp_err = NULL; f = cr_open("/", CR_CW_MODE_WRITE, CR_CW_XZ_COMPRESSION, &tmp_err); g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_XZ); g_error_free(tmp_err); tmp_err = NULL; // Opening plain text file as compressed char buf[256]; int ret; // gzread can read compressed as well as uncompressed, so this test // is useful. f = cr_open(FILE_COMPRESSED_1_PLAIN, CR_CW_MODE_READ, CR_CW_GZ_COMPRESSION, &tmp_err); g_assert(f); ret = cr_read(f, buf, 256, &tmp_err); g_assert_cmpint(ret, ==, FILE_COMPRESSED_1_CONTENT_LEN); g_assert(!tmp_err); ret = cr_close(f, &tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(!tmp_err); f = cr_open(FILE_COMPRESSED_1_PLAIN, CR_CW_MODE_READ, CR_CW_BZ2_COMPRESSION, &tmp_err); g_assert(f); ret = cr_read(f, buf, 256, &tmp_err); g_assert_cmpint(ret, ==, -1); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_BZ2); g_error_free(tmp_err); tmp_err = NULL; ret = cr_close(f, &tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(!tmp_err); f = cr_open(FILE_COMPRESSED_1_PLAIN, CR_CW_MODE_READ, CR_CW_XZ_COMPRESSION, &tmp_err); g_assert(f); ret = cr_read(f, buf, 256, &tmp_err); g_assert_cmpint(ret, ==, -1); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_XZ); g_error_free(tmp_err); tmp_err = NULL; ret = cr_close(f, &tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(!tmp_err); } static void test_contentstating_singlewrite(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data) { CR_FILE *f; int ret; cr_ContentStat *stat; GError *tmp_err = NULL; const char *content = "sdlkjowykjnhsadyhfsoaf\nasoiuyseahlndsf\n"; const int content_len = 39; const char *content_sha256 = "c9d112f052ab86270bfb484817a513d6ce188133ddc0" "7c0fc1ac32018b6da6c7"; // No compression stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); f = cr_sopen(outputtest->tmp_filename, CR_CW_MODE_WRITE, CR_CW_NO_COMPRESSION, stat, &tmp_err); g_assert(f); g_assert(!tmp_err); ret = cr_write(f, content, content_len, &tmp_err); g_assert_cmpint(ret, ==, content_len); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(stat->size, ==, content_len); g_assert_cmpstr(stat->checksum, ==, content_sha256); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); // Gz compression stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); f = cr_sopen(outputtest->tmp_filename, CR_CW_MODE_WRITE, CR_CW_GZ_COMPRESSION, stat, &tmp_err); g_assert(f); g_assert(!tmp_err); ret = cr_write(f, content, content_len, &tmp_err); g_assert_cmpint(ret, ==, content_len); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(stat->size, ==, content_len); g_assert_cmpstr(stat->checksum, ==, content_sha256); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); // Bz2 compression stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); f = cr_sopen(outputtest->tmp_filename, CR_CW_MODE_WRITE, CR_CW_BZ2_COMPRESSION, stat, &tmp_err); g_assert(f); g_assert(!tmp_err); ret = cr_write(f, content, content_len, &tmp_err); g_assert_cmpint(ret, ==, content_len); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(stat->size, ==, content_len); g_assert_cmpstr(stat->checksum, ==, content_sha256); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); // Xz compression stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); f = cr_sopen(outputtest->tmp_filename, CR_CW_MODE_WRITE, CR_CW_XZ_COMPRESSION, stat, &tmp_err); g_assert(f); g_assert(!tmp_err); ret = cr_write(f, content, content_len, &tmp_err); g_assert_cmpint(ret, ==, content_len); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(stat->size, ==, content_len); g_assert_cmpstr(stat->checksum, ==, content_sha256); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); } static void test_contentstating_multiwrite(Outputtest *outputtest, G_GNUC_UNUSED gconstpointer test_data) { CR_FILE *f; int ret; cr_ContentStat *stat; GError *tmp_err = NULL; const char *content = "sdlkjowykjnhsadyhfsoaf\nasoiuyseahlndsf\n"; const int content_len = 39; const char *content_sha256 = "c9d112f052ab86270bfb484817a513d6ce188133ddc0" "7c0fc1ac32018b6da6c7"; // Gz compression stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); f = cr_sopen(outputtest->tmp_filename, CR_CW_MODE_WRITE, CR_CW_GZ_COMPRESSION, stat, &tmp_err); g_assert(f); g_assert(!tmp_err); ret = cr_write(f, content, 10, &tmp_err); g_assert_cmpint(ret, ==, 10); g_assert(!tmp_err); ret = cr_write(f, content+10, 29, &tmp_err); g_assert_cmpint(ret, ==, 29); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(stat->size, ==, content_len); g_assert_cmpstr(stat->checksum, ==, content_sha256); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); } static void test_cr_get_zchunk_with_index(void) { gchar *output; CR_FILE *f; GError *tmp_err = NULL; f = cr_sopen(FILE_COMPRESSED_1_ZCK, CR_CW_MODE_READ, CR_CW_ZCK_COMPRESSION, NULL, &tmp_err); #ifdef WITH_ZCHUNK g_assert(f); g_assert(!tmp_err); // First zchunk is for dictionary g_assert_cmpint(cr_get_zchunk_with_index(f, 0, &output, &tmp_err), ==, 0); g_assert(!tmp_err); g_assert_cmpint(cr_get_zchunk_with_index(f, 1, &output, &tmp_err), ==, 56); g_assert(g_str_has_prefix(output, "foobar foobar foobar")); g_free(output); g_assert(!tmp_err); // There are no additional zchunks g_assert_cmpint(cr_get_zchunk_with_index(f, 2, &output, &tmp_err), ==, 0); g_assert(!tmp_err); g_assert_cmpint(cr_get_zchunk_with_index(f, 3, &output, &tmp_err), ==, 0); g_assert(!tmp_err); cr_close(f, &tmp_err); g_assert(!tmp_err); #else g_assert(!f); g_assert(tmp_err); g_assert_cmpint(tmp_err->code, ==, CRE_IO); #endif // WITH_ZCHUNK } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/compression_wrapper/test_cr_contentstat", test_cr_contentstat); g_test_add_func("/compression_wrapper/test_cr_compression_suffix", test_cr_compression_suffix); g_test_add_func("/compression_wrapper/test_cr_detect_compression", test_cr_detect_compression); g_test_add_func("/compression_wrapper/test_cr_compression_type", test_cr_compression_type); g_test_add_func("/compression_wrapper/test_cr_detect_compression_bad_suffix", test_cr_detect_compression_bad_suffix); g_test_add_func("/compression_wrapper/test_cr_read_with_autodetection", test_cr_read_with_autodetection); g_test_add("/compression_wrapper/outputtest_cw_output", Outputtest, NULL, outputtest_setup, outputtest_cw_output, outputtest_teardown); g_test_add_func("/compression_wrapper/test_cr_error_handling", test_cr_error_handling); g_test_add("/compression_wrapper/test_contentstating_singlewrite", Outputtest, NULL, outputtest_setup, test_contentstating_singlewrite, outputtest_teardown); g_test_add("/compression_wrapper/test_contentstating_multiwrite", Outputtest, NULL, outputtest_setup, test_contentstating_multiwrite, outputtest_teardown); g_test_add_func("/compression_wrapper/test_cr_get_zchunk_with_index", test_cr_get_zchunk_with_index); return g_test_run(); } createrepo_c-0.17.0/tests/test_koji.c000066400000000000000000000335651400672373200175530ustar00rootroot00000000000000/* * Copyright (C) 2018 Red Hat, Inc. * * Licensed under the GNU Lesser General Public License Version 2.1 * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include "fixtures.h" #include "createrepo/koji.h" #include "createrepo/load_metadata.h" // Tests static void test_koji_stuff_00(void) { gchar *template = g_strdup(TMPDIR_TEMPLATE); gchar *tmp = g_strconcat(mkdtemp(template), "/", NULL); struct KojiMergedReposStuff *koji_stuff = NULL; struct CmdOptions o = {.koji=1, .koji_simple=1, .blocked=NULL, .tmp_out_repo=tmp}; GSList *local_repos = NULL; struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) TEST_REPO_00, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_02, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_KOJI_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); int ret = koji_stuff_prepare(&koji_stuff, &o, local_repos); g_assert_cmpint(ret, ==, 0); //we have only 3 uniq srpm names g_assert_cmpint(g_hash_table_size(koji_stuff->include_srpms), ==, 3); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "dwm")); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "fake_bash")); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "super_kernel")); g_assert(!koji_stuff->blocked_srpms); g_assert(koji_stuff->simple); gchar *origins_file_path = g_strconcat(tmp, "pkgorigins.gz", NULL); g_assert(g_file_test(origins_file_path, G_FILE_TEST_EXISTS)); g_assert_cmpint(g_hash_table_size(koji_stuff->seen_rpms), ==, 0); koji_stuff_destroy(&koji_stuff); g_free(tmp); g_slist_free_full(local_repos, (GDestroyNotify) cr_metadatalocation_free); g_free(origins_file_path); g_free(template); } static void test_koji_stuff_01(void) { gchar *template = g_strdup(TMPDIR_TEMPLATE); gchar *tmp = g_strconcat(mkdtemp(template), "/", NULL); gchar *blocked_files_path = g_strconcat(tmp, "blocked.txt", NULL); FILE *blocked = fopen(blocked_files_path, "w"); fprintf(blocked, "super_kernel\nfake_kernel\nfake_bash"); fclose(blocked); struct KojiMergedReposStuff *koji_stuff = NULL; struct CmdOptions o = {.koji=0, .blocked=blocked_files_path, .tmp_out_repo=tmp}; GSList *local_repos = NULL; struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) TEST_REPO_00, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_02, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_KOJI_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); int ret = koji_stuff_prepare(&koji_stuff, &o, local_repos); g_assert_cmpint(ret, ==, 0); //we have only 3 uniq srpm names g_assert_cmpint(g_hash_table_size(koji_stuff->include_srpms), ==, 3); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "dwm")); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "fake_bash")); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "super_kernel")); g_assert(koji_stuff->blocked_srpms); g_assert_cmpint(g_hash_table_size(koji_stuff->blocked_srpms), ==, 3); g_assert(g_hash_table_contains(koji_stuff->blocked_srpms, "super_kernel")); g_assert(g_hash_table_contains(koji_stuff->blocked_srpms, "fake_kernel")); g_assert(g_hash_table_contains(koji_stuff->blocked_srpms, "fake_bash")); g_assert_cmpint(g_hash_table_size(koji_stuff->seen_rpms), ==, 0); gchar *origins_file_path = g_strconcat(tmp, "pkgorigins.gz", NULL); g_assert(g_file_test(origins_file_path, G_FILE_TEST_EXISTS)); g_assert(!koji_stuff->simple); koji_stuff_destroy(&koji_stuff); g_slist_free_full(local_repos, (GDestroyNotify) cr_metadatalocation_free); g_free(origins_file_path); g_free(blocked_files_path); g_free(tmp); g_free(template); } static void test_koji_stuff_02_get_newest_srpm_from_one_repo(void) { gchar *template = g_strdup(TMPDIR_TEMPLATE); gchar *tmp = g_strconcat(mkdtemp(template), "/", NULL); struct KojiMergedReposStuff *koji_stuff = NULL; struct CmdOptions o = {.koji=0, .blocked=NULL, .tmp_out_repo=tmp}; GSList *local_repos = NULL; struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) TEST_REPO_KOJI_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); int ret = koji_stuff_prepare(&koji_stuff, &o, local_repos); g_assert_cmpint(ret, ==, 0); g_assert_cmpint(g_hash_table_size(koji_stuff->include_srpms), ==, 1); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "dwm")); struct srpm_val *value = g_hash_table_lookup(koji_stuff->include_srpms, "dwm"); g_assert_cmpstr(value->sourcerpm, ==, "dwm-6.1-7.fc28.src.rpm"); koji_stuff_destroy(&koji_stuff); g_slist_free_full(local_repos, (GDestroyNotify) cr_metadatalocation_free); g_free(tmp); g_free(template); } static void test_koji_stuff_03_get_srpm_from_first_repo_even_if_its_older(void) { gchar *template = g_strdup(TMPDIR_TEMPLATE); gchar *tmp = g_strconcat(mkdtemp(template), "/", NULL); struct KojiMergedReposStuff *koji_stuff = NULL; struct CmdOptions o = {.koji=0, .blocked=NULL, .tmp_out_repo=tmp}; GSList *local_repos = NULL; struct cr_MetadataLocation *loc = cr_locate_metadata((gchar *) TEST_REPO_KOJI_01, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); loc = cr_locate_metadata((gchar *) TEST_REPO_KOJI_02, TRUE, NULL); local_repos = g_slist_prepend(local_repos, loc); int ret = koji_stuff_prepare(&koji_stuff, &o, local_repos); g_assert_cmpint(ret, ==, 0); g_assert_cmpint(g_hash_table_size(koji_stuff->include_srpms), ==, 1); g_assert(g_hash_table_contains(koji_stuff->include_srpms, "dwm")); struct srpm_val *value = g_hash_table_lookup(koji_stuff->include_srpms, "dwm"); g_assert_cmpstr(value->sourcerpm, ==, "dwm-5.8.2-2.src.rpm"); koji_stuff_destroy(&koji_stuff); g_slist_free_full(local_repos, (GDestroyNotify) cr_metadatalocation_free); g_free(tmp); g_free(template); } struct KojiMergedReposStuff * create_empty_koji_stuff_for_test(gboolean simple) { struct KojiMergedReposStuff *koji_stuff; koji_stuff = g_malloc0(sizeof(struct KojiMergedReposStuff)); koji_stuff->include_srpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, cr_srpm_val_destroy); koji_stuff->seen_rpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); koji_stuff->blocked_srpms = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); koji_stuff->simple = simple; return koji_stuff; } struct srpm_val * create_srpm_val(int repoid, gchar* rpm_source) { struct srpm_val *srpm_value_new; srpm_value_new = g_malloc0(sizeof(struct srpm_val)); srpm_value_new->repo_id = repoid; srpm_value_new->sourcerpm = g_strdup(rpm_source); return srpm_value_new; } void koji_stuff_destroy_after_test(struct KojiMergedReposStuff **koji_stuff_ptr) { struct KojiMergedReposStuff *koji_stuff; if (!koji_stuff_ptr || !*koji_stuff_ptr) return; koji_stuff = *koji_stuff_ptr; if (koji_stuff->blocked_srpms) g_hash_table_destroy(koji_stuff->blocked_srpms); g_hash_table_destroy(koji_stuff->include_srpms); g_hash_table_destroy(koji_stuff->seen_rpms); cr_close(koji_stuff->pkgorigins, NULL); g_free(koji_stuff); } static void test_koji_allowed_pkg_not_included(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(0); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("dwm"), create_srpm_val(0, "dwm-5.8.2-2.src.rpm")); g_assert(!koji_allowed(pkg, koji_stuff)); koji_stuff_destroy_after_test(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_pkg_included(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(0); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("foo"), create_srpm_val(0, "foo.src.rpm")); g_assert(koji_allowed(pkg, koji_stuff)); g_assert_cmpint(g_hash_table_size(koji_stuff->seen_rpms), ==, 1); g_assert(g_hash_table_contains(koji_stuff->seen_rpms, cr_package_nvra(pkg))); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_pkg_blocked(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(0); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("foo"), create_srpm_val(0, "foo.src.rpm")); g_hash_table_replace(koji_stuff->blocked_srpms, g_strdup_printf("foo"), NULL); g_assert(!koji_allowed(pkg, koji_stuff)); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_pkg_already_seen(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(0); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("foo"), create_srpm_val(0, "foo.src.rpm")); g_assert(koji_allowed(pkg, koji_stuff)); g_assert(!koji_allowed(pkg, koji_stuff)); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_simple_ignores_include(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(1); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("foo22"), create_srpm_val(0, "foo22.src.rpm")); g_assert(koji_allowed(pkg, koji_stuff)); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_simple_ignores_seen(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(1); g_hash_table_replace(koji_stuff->include_srpms, g_strdup_printf("foo22"), create_srpm_val(0, "foo22.src.rpm")); //we can add the same package more than once g_assert(koji_allowed(pkg, koji_stuff)); g_assert(koji_allowed(pkg, koji_stuff)); g_assert(koji_allowed(pkg, koji_stuff)); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } static void test_koji_allowed_simple_respects_blocked(void) { cr_Package *pkg = get_package(); struct KojiMergedReposStuff *koji_stuff = create_empty_koji_stuff_for_test(1); g_hash_table_replace(koji_stuff->blocked_srpms, g_strdup_printf("foo"), NULL); g_assert(!koji_allowed(pkg, koji_stuff)); koji_stuff_destroy(&koji_stuff); cr_package_free(pkg); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/mergerepo_c/test_koji_stuff_00", test_koji_stuff_00); g_test_add_func("/mergerepo_c/test_koji_stuff_01", test_koji_stuff_01); g_test_add_func("/mergerepo_c/test_koji_stuff_02_get_newest_srpm_from_one_repo", test_koji_stuff_02_get_newest_srpm_from_one_repo); g_test_add_func("/mergerepo_c/test_koji_stuff_03_get_srpm_from_first_repo_even_if_its_older", test_koji_stuff_03_get_srpm_from_first_repo_even_if_its_older); g_test_add_func("/mergerepo_c/test_koji_allowed_pkg_not_included", test_koji_allowed_pkg_not_included); g_test_add_func("/mergerepo_c/test_koji_allowed_pkg_included", test_koji_allowed_pkg_included); g_test_add_func("/mergerepo_c/test_koji_allowed_pkg_blocked", test_koji_allowed_pkg_blocked); g_test_add_func("/mergerepo_c/test_koji_allowed_pkg_already_seen", test_koji_allowed_pkg_already_seen); g_test_add_func("/mergerepo_c/test_koji_allowed_simple_ignores_include", test_koji_allowed_simple_ignores_include); g_test_add_func("/mergerepo_c/test_koji_allowed_simple_ignores_seen", test_koji_allowed_simple_ignores_seen); g_test_add_func("/mergerepo_c/test_koji_allowed_simple_respects_blocked", test_koji_allowed_simple_respects_blocked); return g_test_run(); } createrepo_c-0.17.0/tests/test_load_metadata.c000066400000000000000000000204151400672373200213640ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/package.h" #include "createrepo/misc.h" #include "createrepo/load_metadata.h" #include "createrepo/locate_metadata.h" #include "createrepo/metadata_internal.h" #define REPO_SIZE_00 0 static const char *REPO_HASH_KEYS_00[] = {}; static const char *REPO_NAME_KEYS_00[] = {}; static const char *REPO_FILENAME_KEYS_00[] = {}; #define REPO_SIZE_01 1 static const char *REPO_HASH_KEYS_01[] = {"152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf"}; static const char *REPO_NAME_KEYS_01[] = {"super_kernel"}; static const char *REPO_FILENAME_KEYS_01[] = {"super_kernel-6.0.1-2.x86_64.rpm"}; #define REPO_SIZE_02 2 static const char *REPO_HASH_KEYS_02[] = {"6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3", "90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7"}; static const char *REPO_NAME_KEYS_02[] = {"super_kernel", "fake_bash"}; static const char *REPO_FILENAME_KEYS_02[] = {"super_kernel-6.0.1-2.x86_64.rpm", "fake_bash-1.1.1-1.x86_64.rpm"}; #define REPO_SIZE_03 0 static const char *REPO_HASH_KEYS_03[] = {}; static const char *REPO_NAME_KEYS_03[] = {}; static const char *REPO_FILENAME_KEYS_03[] = {}; static void test_cr_metadata_new(void) { guint len; cr_Metadata *metadata = NULL; // Get new metadata object metadata = cr_metadata_new(CR_HT_KEY_DEFAULT, 0, NULL); g_assert(metadata); // Check if it is empty len = g_hash_table_size(cr_metadata_hashtable(metadata)); g_assert_cmpint(len, ==, 0); cr_metadata_free(metadata); } void test_helper_check_keys(const char *repopath, cr_HashTableKey key, guint repo_size, const char *keys[]) { int ret; guint i; guint size; gpointer value; cr_Metadata *metadata; metadata = cr_metadata_new(key, 0, NULL); g_assert(metadata); g_assert(cr_metadata_hashtable(metadata)); ret = cr_metadata_locate_and_load_xml(metadata, repopath, NULL); g_assert_cmpint(ret, ==, CRE_OK); size = g_hash_table_size(cr_metadata_hashtable(metadata)); g_assert_cmpuint(size, ==, repo_size); for (i=0; i < repo_size; i++) { value = g_hash_table_lookup(cr_metadata_hashtable(metadata), (gconstpointer) keys[i]); if (!value) g_critical("Key \"%s\" not present!", keys[i]); } cr_metadata_free(metadata); } static void test_cr_metadata_locate_and_load_xml(void) { test_helper_check_keys(TEST_REPO_00, CR_HT_KEY_HASH, REPO_SIZE_00, REPO_HASH_KEYS_00); test_helper_check_keys(TEST_REPO_00, CR_HT_KEY_NAME, REPO_SIZE_00, REPO_NAME_KEYS_00); test_helper_check_keys(TEST_REPO_00, CR_HT_KEY_FILENAME, REPO_SIZE_00, REPO_FILENAME_KEYS_00); test_helper_check_keys(TEST_REPO_01, CR_HT_KEY_HASH, REPO_SIZE_01, REPO_HASH_KEYS_01); test_helper_check_keys(TEST_REPO_01, CR_HT_KEY_NAME, REPO_SIZE_01, REPO_NAME_KEYS_01); test_helper_check_keys(TEST_REPO_01, CR_HT_KEY_FILENAME, REPO_SIZE_01, REPO_FILENAME_KEYS_01); test_helper_check_keys(TEST_REPO_02, CR_HT_KEY_HASH, REPO_SIZE_02, REPO_HASH_KEYS_02); test_helper_check_keys(TEST_REPO_02, CR_HT_KEY_NAME, REPO_SIZE_02, REPO_NAME_KEYS_02); test_helper_check_keys(TEST_REPO_02, CR_HT_KEY_FILENAME, REPO_SIZE_02, REPO_FILENAME_KEYS_02); #ifdef WITH_LIBMODULEMD test_helper_check_keys(TEST_REPO_03, CR_HT_KEY_HASH, REPO_SIZE_03, REPO_HASH_KEYS_03); test_helper_check_keys(TEST_REPO_03, CR_HT_KEY_NAME, REPO_SIZE_03, REPO_NAME_KEYS_03); test_helper_check_keys(TEST_REPO_03, CR_HT_KEY_FILENAME, REPO_SIZE_03, REPO_FILENAME_KEYS_03); #else /* If we don't have libmodulemd support, this should fail to locate and * return CRE_MODULEMD */ struct cr_MetadataLocation *ml; g_autoptr (GError) err = NULL; ml = cr_locate_metadata(TEST_REPO_03, TRUE, &err); g_assert_error (err, CREATEREPO_C_ERROR, CRE_MODULEMD); #endif /* WITH_LIBMODULEMD */ } static void test_cr_metadata_locate_and_load_xml_detailed(void) { int ret; guint size; cr_Package *pkg; cr_Metadata *metadata; metadata = cr_metadata_new(CR_HT_KEY_NAME, 0, NULL); g_assert(metadata); ret = cr_metadata_locate_and_load_xml(metadata, TEST_REPO_01, NULL); g_assert_cmpint(ret, ==, CRE_OK); size = g_hash_table_size(cr_metadata_hashtable(metadata)); g_assert_cmpuint(size, ==, REPO_SIZE_01); pkg = (cr_Package *) g_hash_table_lookup(cr_metadata_hashtable(metadata), "super_kernel"); g_assert(pkg); g_assert_cmpstr(pkg->pkgId, ==, "152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf"); g_assert_cmpstr(pkg->name, ==, "super_kernel"); g_assert_cmpstr(pkg->arch, ==, "x86_64"); g_assert_cmpstr(pkg->version, ==, "6.0.1"); g_assert_cmpstr(pkg->epoch, ==, "0"); g_assert_cmpstr(pkg->release, ==, "2"); g_assert_cmpstr(pkg->summary, ==, "Test package"); g_assert_cmpstr(pkg->description, ==, "This package has provides, requires, obsoletes, conflicts options."); g_assert_cmpstr(pkg->url, ==, "http://so_super_kernel.com/it_is_awesome/yep_it_really_is"); g_assert_cmpint(pkg->time_file, ==, 1334667003); g_assert_cmpint(pkg->time_build, ==, 1334667003); g_assert_cmpstr(pkg->rpm_license, ==, "LGPLv2"); g_assert_cmpstr(pkg->rpm_vendor, ==, NULL); g_assert_cmpstr(pkg->rpm_group, ==, "Applications/System"); g_assert_cmpstr(pkg->rpm_buildhost, ==, "localhost.localdomain"); g_assert_cmpstr(pkg->rpm_sourcerpm, ==, "super_kernel-6.0.1-2.src.rpm"); g_assert_cmpint(pkg->rpm_header_start, ==, 280); g_assert_cmpint(pkg->rpm_header_end, ==, 2637); g_assert_cmpstr(pkg->rpm_packager, ==, NULL); g_assert_cmpint(pkg->size_package, ==, 2845); g_assert_cmpint(pkg->size_installed, ==, 0); g_assert_cmpint(pkg->size_archive, ==, 404); g_assert_cmpstr(pkg->location_href, ==, "super_kernel-6.0.1-2.x86_64.rpm"); g_assert(!pkg->location_base); g_assert_cmpstr(pkg->checksum_type, ==, "sha256"); cr_metadata_free(metadata); } #ifdef WITH_LIBMODULEMD static void test_cr_metadata_locate_and_load_modulemd(void) { int ret; guint size; cr_Metadata *metadata; metadata = cr_metadata_new(CR_HT_KEY_NAME, 0, NULL); g_assert(metadata); ret = cr_metadata_locate_and_load_xml(metadata, TEST_REPO_03, NULL); g_assert_cmpint(ret, ==, CRE_OK); size = g_hash_table_size(cr_metadata_hashtable(metadata)); g_assert_cmpuint(size, ==, REPO_SIZE_03); /* If loaded successfully, the index should contain a module named * testmodule */ g_assert_nonnull (modulemd_module_index_get_module ( cr_metadata_modulemd(metadata), "testmodule")); } #endif /* WITH_LIBMODULEMD */ int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/load_metadata/test_cr_metadata_new", test_cr_metadata_new); g_test_add_func("/load_metadata/test_cr_metadata_locate_and_load_xml", test_cr_metadata_locate_and_load_xml); g_test_add_func("/load_metadata/test_cr_metadata_locate_and_load_xml_detailed", test_cr_metadata_locate_and_load_xml_detailed); #ifdef WITH_LIBMODULEMD g_test_add_func("/load_metadata/test_cr_metadata_locate_and_load_modulemd", test_cr_metadata_locate_and_load_modulemd); #endif /* WITH_LIBMODULEMD */ return g_test_run(); } createrepo_c-0.17.0/tests/test_locate_metadata.c000066400000000000000000000236371400672373200217250ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/package.h" #include "createrepo/misc.h" #include "createrepo/locate_metadata.h" static void test_cr_cmp_metadatum_type(void) { //compare equal with not allocated strings cr_Metadatum *m = g_malloc0(sizeof(cr_Metadatum)); m->name = "/some/name/somewhere"; m->type = "type"; int out = cr_cmp_metadatum_type(m, "type"); g_assert_cmpint(out, ==, 0); //compare equal with allocated strings m->name = g_strdup_printf("group"); m->type = g_strdup_printf("group"); gchar *type = g_strdup_printf("group"); out = cr_cmp_metadatum_type(m, type); g_assert_cmpint(out, ==, 0); cr_metadatum_free(m); g_free(type); //compare bigger with allocated strings m = g_malloc0(sizeof(cr_Metadatum)); m->name = g_strdup_printf("name"); m->type = g_strdup_printf("group"); type = g_strdup_printf("grou"); out = cr_cmp_metadatum_type(m, type); g_assert_cmpint(out, >, 0); cr_metadatum_free(m); g_free(type); //compare smaller with allocated strings m = g_malloc0(sizeof(cr_Metadatum)); m->name = g_strdup_printf("name"); m->type = g_strdup_printf("group"); type = g_strdup_printf("groupppppp"); out = cr_cmp_metadatum_type(m, type); g_assert_cmpint(out, <, 0); cr_metadatum_free(m); g_free(type); } static void test_cr_cmp_repomd_record_type(void) { cr_RepomdRecord *r; int out; gchar *type; //compare equal with not allocated strings r = g_malloc0(sizeof(cr_RepomdRecord)); r->location_real = "/some/name/somewhere"; r->type = "type"; out = cr_cmp_repomd_record_type(r, "type"); g_assert_cmpint(out, ==, 0); g_free(r); //compare equal with allocated strings r = cr_repomd_record_new("group", "/some/path/somewhere"); type = g_strdup_printf("group"); out = cr_cmp_repomd_record_type(r, type); g_assert_cmpint(out, ==, 0); cr_repomd_record_free(r); g_free(type); //compare bigger with allocated strings r = cr_repomd_record_new("group", "/some/path/somewhere"); type = g_strdup_printf("grou"); out = cr_cmp_repomd_record_type(r, type); g_assert_cmpint(out, >, 0); cr_repomd_record_free(r); g_free(type); //compare smaller with allocated strings r = cr_repomd_record_new("group", "/some/path/somewhere"); type = g_strdup_printf("groupppppp"); out = cr_cmp_metadatum_type(r, type); g_assert_cmpint(out, <, 0); cr_repomd_record_free(r); g_free(type); } static void test_cr_copy_metadatum(void) { //empty tmp_repo path char *tmp_dir, *tmp_repo, *out, *new_name; tmp_dir = g_strdup(TMPDIR_TEMPLATE); g_assert(mkdtemp(tmp_dir)); tmp_repo = g_strconcat(tmp_dir, "/", NULL); g_assert_cmpint(g_mkdir_with_parents(tmp_repo, 0777), ==, 0); GError *err = NULL; new_name = cr_copy_metadatum(TEST_REPO_00_PRIMARY, tmp_repo, &err); out = g_strconcat(tmp_repo, "1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz", NULL); g_assert_cmpstr(new_name, ==, out); g_assert_true(g_file_test(new_name, G_FILE_TEST_EXISTS)); cr_remove_dir(tmp_repo, NULL); g_free(new_name); g_free(out); g_free(tmp_repo); //tmp_repo is a folder tmp_repo = g_strconcat(tmp_dir, "/folder/", NULL); err = NULL; g_assert_cmpint(g_mkdir_with_parents(tmp_repo, 0777), ==, 0); g_assert_true(g_file_test(tmp_repo, G_FILE_TEST_EXISTS)); new_name = cr_copy_metadatum(TEST_REPO_00_PRIMARY, tmp_repo, &err); out = g_strconcat(tmp_repo, "1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz", NULL); g_assert_cmpstr(new_name, ==, out); g_assert_true(g_file_test(new_name, G_FILE_TEST_EXISTS)); cr_remove_dir(tmp_repo, NULL); g_free(new_name); g_free(out); g_free(tmp_dir); g_free(tmp_repo); } static void test_cr_insert_additional_metadatum(void) { //add to not allocated GSList GSList *d = NULL; cr_Metadatum *m; d = cr_insert_additional_metadatum("./test_path.xml", "group", d); g_assert_true(d); g_assert_cmpstr(((cr_Metadatum *) d->data)->type, ==, "group"); g_assert_cmpstr(((cr_Metadatum *) d->data)->name, ==, "./test_path.xml"); g_slist_free_full(d, (GDestroyNotify) cr_metadatum_free); d = NULL; //replace one in list of one m = g_malloc0(sizeof(cr_Metadatum)); m->name = g_strdup_printf("name"); m->type = g_strdup_printf("group"); d = g_slist_prepend(d, m); g_assert_cmpstr(((cr_Metadatum *) d->data)->type, ==, "group"); g_assert_cmpstr(((cr_Metadatum *) d->data)->name, ==, "name"); d = cr_insert_additional_metadatum("./test_path.xml", "group", d); g_assert_true(d); g_assert_cmpstr(((cr_Metadatum *) d->data)->type, ==, "group"); g_assert_cmpstr(((cr_Metadatum *) d->data)->name, ==, "./test_path.xml"); g_assert_cmpint(g_slist_length(d), ==, 1); g_slist_free_full(d, (GDestroyNotify) cr_metadatum_free); d = NULL; //add new one to list of one m = g_malloc0(sizeof(cr_Metadatum)); m->name = g_strdup_printf("name"); m->type = g_strdup_printf("primary"); d = g_slist_prepend(d, m); d = cr_insert_additional_metadatum("./test_path.xml", "group", d); g_assert_true(d); g_assert_cmpstr(((cr_Metadatum *) d->data)->type, ==, "group"); g_assert_cmpstr(((cr_Metadatum *) d->data)->name, ==, "./test_path.xml"); g_assert_cmpint(g_slist_length(d), ==, 2); m = g_slist_nth_data(d, 1); g_assert_cmpstr(m->type, ==, "primary"); g_assert_cmpstr(m->name, ==, "name"); g_slist_free_full(d, (GDestroyNotify) cr_metadatum_free); d = NULL; } static void test_cr_parse_repomd(void) { struct cr_MetadataLocation *ret = NULL; ret = cr_parse_repomd(TEST_REPO_00_REPOMD, TEST_REPO_00, 1); g_assert_cmpint(0, ==, g_slist_length(ret->additional_metadata)); g_assert_cmpstr(TEST_REPO_00_REPOMD, ==, ret->repomd); g_assert_cmpstr(TEST_REPO_00, ==, ret->local_path); g_assert_cmpint(0, ==, ret->tmp); g_assert_cmpstr(TEST_REPO_00_PRIMARY, ==, ret->pri_xml_href); g_assert_cmpstr(TEST_REPO_00_OTHER, ==, ret->oth_xml_href); g_assert_cmpstr(TEST_REPO_00_FILELISTS, ==, ret->fil_xml_href); } static void test_cr_parse_repomd_with_additional_metadata(void) { struct cr_MetadataLocation *ret = NULL; ret = cr_parse_repomd(TEST_REPO_WITH_ADDITIONAL_METADATA_REPOMD, TEST_REPO_WITH_ADDITIONAL_METADATA, 0); g_assert_cmpint(8, ==, g_slist_length(ret->additional_metadata)); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_REPOMD, ==, ret->repomd); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA, ==, ret->local_path); g_assert_cmpint(0, ==, ret->tmp); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_PRIMARY_XML_GZ, ==, ret->pri_xml_href); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_OTHER_XML_GZ, ==, ret->oth_xml_href); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_FILELISTS_XML_GZ, ==, ret->fil_xml_href); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_PRIMARY_SQLITE_BZ2, ==, ret->pri_sqlite_href); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_OTHER_SQLITE_BZ2, ==, ret->oth_sqlite_href); g_assert_cmpstr(TEST_REPO_WITH_ADDITIONAL_METADATA_FILELISTS_SQLITE_BZ2, ==, ret->fil_sqlite_href); cr_Metadatum *metadatum = g_slist_find_custom(ret->additional_metadata, "group", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "group_zck", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "group_gz", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "group_gz_zck", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "modules", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "modules_zck", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "updateinfo", cr_cmp_metadatum_type)->data; g_assert(metadatum); metadatum = g_slist_find_custom(ret->additional_metadata, "updateinfo_zck", cr_cmp_metadatum_type)->data; g_assert(metadatum); g_slist_free_full(ret->additional_metadata, (GDestroyNotify) cr_metadatum_free); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/locate_metadata/test_cr_cmp_metadatum_type", test_cr_cmp_metadatum_type); g_test_add_func("/locate_metadata/test_cr_cmp_repomd_record_type", test_cr_cmp_repomd_record_type); g_test_add_func("/locate_metadata/test_cr_copy_metadatum", test_cr_copy_metadatum); g_test_add_func("/locate_metadata/test_cr_insert_additional_metadatum", test_cr_insert_additional_metadatum); g_test_add_func("/locate_metadata/test_cr_parse_repomd", test_cr_parse_repomd); g_test_add_func("/locate_metadata/test_cr_parse_repomd_with_additional_metadata", test_cr_parse_repomd_with_additional_metadata); return g_test_run(); } createrepo_c-0.17.0/tests/test_misc.c000066400000000000000000001244221400672373200175430ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include "fixtures.h" #include "createrepo/checksum.h" #include "createrepo/misc.h" #include "createrepo/error.h" #define PACKAGE_01 TEST_PACKAGES_PATH"super_kernel-6.0.1-2.x86_64.rpm" #define PACKAGE_01_HEADER_START 280 #define PACKAGE_01_HEADER_END 2637 #define PACKAGE_02 TEST_PACKAGES_PATH"fake_bash-1.1.1-1.x86_64.rpm" #define PACKAGE_02_HEADER_START 280 #define PACKAGE_02_HEADER_END 2057 #define VALID_URL_01 "http://google.com/index.html" #define URL_FILENAME_01 "index.html" #define INVALID_URL "htp://foo.bar" static void test_cr_str_to_evr(void) { cr_EVR *evr; // V evr = cr_str_to_evr("5.0.0", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("6.1", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("7", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); // VR evr = cr_str_to_evr("5.0.0-2", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, "2"); cr_evr_free(evr); evr = cr_str_to_evr("6.1-3", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, "3"); cr_evr_free(evr); evr = cr_str_to_evr("7-4", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, "4"); cr_evr_free(evr); // EV evr = cr_str_to_evr("1:5.0.0", NULL); g_assert_cmpstr(evr->epoch, ==, "1"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("2:6.1", NULL); g_assert_cmpstr(evr->epoch, ==, "2"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("3:7", NULL); g_assert_cmpstr(evr->epoch, ==, "3"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); //cr_EVR evr = cr_str_to_evr("1:5.0.0-11", NULL); g_assert_cmpstr(evr->epoch, ==, "1"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, "11"); cr_evr_free(evr); evr = cr_str_to_evr("2:6.1-22", NULL); g_assert_cmpstr(evr->epoch, ==, "2"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, "22"); cr_evr_free(evr); evr = cr_str_to_evr("3:7-33", NULL); g_assert_cmpstr(evr->epoch, ==, "3"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, "33"); cr_evr_free(evr); // Bad strings evr = cr_str_to_evr(":", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr(":-", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); // Really bad values evr = cr_str_to_evr(NULL, NULL); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, NULL); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("", NULL); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, NULL); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("-", NULL); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("-:", NULL); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); evr = cr_str_to_evr("foo:bar", NULL); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, "bar"); g_assert_cmpstr(evr->release, ==, NULL); cr_evr_free(evr); } static void test_cr_str_to_evr_with_chunk(void) { cr_EVR *evr; GStringChunk *chunk; chunk = g_string_chunk_new(512); // V evr = cr_str_to_evr("5.0.0", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("6.1", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("7", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); // VR evr = cr_str_to_evr("5.0.0-2", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, "2"); g_free(evr); evr = cr_str_to_evr("6.1-3", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, "3"); g_free(evr); evr = cr_str_to_evr("7-4", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, "4"); g_free(evr); // EV evr = cr_str_to_evr("1:5.0.0", chunk); g_assert_cmpstr(evr->epoch, ==, "1"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("2:6.1", chunk); g_assert_cmpstr(evr->epoch, ==, "2"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("3:7", chunk); g_assert_cmpstr(evr->epoch, ==, "3"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); //cr_EVR evr = cr_str_to_evr("1:5.0.0-11", chunk); g_assert_cmpstr(evr->epoch, ==, "1"); g_assert_cmpstr(evr->version, ==, "5.0.0"); g_assert_cmpstr(evr->release, ==, "11"); g_free(evr); evr = cr_str_to_evr("2:6.1-22", chunk); g_assert_cmpstr(evr->epoch, ==, "2"); g_assert_cmpstr(evr->version, ==, "6.1"); g_assert_cmpstr(evr->release, ==, "22"); g_free(evr); evr = cr_str_to_evr("3:7-33", chunk); g_assert_cmpstr(evr->epoch, ==, "3"); g_assert_cmpstr(evr->version, ==, "7"); g_assert_cmpstr(evr->release, ==, "33"); g_free(evr); // Bad strings evr = cr_str_to_evr(":", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr(":-", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); // Really bad values evr = cr_str_to_evr(NULL, chunk); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, NULL); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("", chunk); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, NULL); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("-", chunk); g_assert_cmpstr(evr->epoch, ==, "0"); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("-:", chunk); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, ""); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); evr = cr_str_to_evr("foo:bar", chunk); g_assert_cmpstr(evr->epoch, ==, NULL); g_assert_cmpstr(evr->version, ==, "bar"); g_assert_cmpstr(evr->release, ==, NULL); g_free(evr); g_string_chunk_free(chunk); } static void test_cr_is_primary(void) { g_assert(cr_is_primary("/etc/foobar")); g_assert(cr_is_primary("/etc/")); g_assert(!cr_is_primary("/foo/etc/foobar")); g_assert(!cr_is_primary("/tmp/etc/")); g_assert(cr_is_primary("/sbin/foobar")); g_assert(cr_is_primary("/bin/bash")); g_assert(cr_is_primary("/usr/sbin/foobar")); g_assert(cr_is_primary("/usr/bin/foobar")); g_assert(cr_is_primary("/usr/share/locale/bin/LC_MESSAGES")); // Sad, but we have to reflect yum behavior g_assert(cr_is_primary("/usr/share/man/bin/man0p")); // my heart is bleeding g_assert(!cr_is_primary("/foo/bindir")); g_assert(!cr_is_primary("/foo/sbindir")); g_assert(cr_is_primary("/usr/lib/sendmail")); g_assert(!cr_is_primary("/tmp/usr/lib/sendmail")); g_assert(!cr_is_primary("")); } static void test_cr_get_header_byte_range(void) { struct cr_HeaderRangeStruct hdr_range; GError *tmp_err = NULL; hdr_range = cr_get_header_byte_range(PACKAGE_01, NULL); g_assert_cmpuint(hdr_range.start, ==, PACKAGE_01_HEADER_START); g_assert_cmpuint(hdr_range.end, ==, PACKAGE_01_HEADER_END); hdr_range = cr_get_header_byte_range(PACKAGE_02, &tmp_err); g_assert(!tmp_err); g_assert_cmpuint(hdr_range.start, ==, PACKAGE_02_HEADER_START); g_assert_cmpuint(hdr_range.end, ==, PACKAGE_02_HEADER_END); hdr_range = cr_get_header_byte_range(NON_EXIST_FILE, &tmp_err); g_assert(tmp_err); g_error_free(tmp_err); tmp_err = NULL; g_assert_cmpuint(hdr_range.start, ==, 0); g_assert_cmpuint(hdr_range.end, ==, 0); } static void test_cr_get_filename(void) { char *filename; filename = cr_get_filename("/fooo/bar/file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename("///fooo///bar///file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename("/file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename("///file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename("file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename("./file"); g_assert_cmpstr(filename, ==, "file"); filename = cr_get_filename(""); g_assert_cmpstr(filename, ==, ""); filename = cr_get_filename(NULL); g_assert_cmpstr(filename, ==, NULL); } static int read_file(char *f, cr_CompressionType compression, char* buffer, int amount) { int ret = CRE_OK; GError *tmp_err = NULL; CR_FILE *orig = NULL; orig = cr_open(f, CR_CW_MODE_READ, compression, &tmp_err); if (!orig) { ret = tmp_err->code; return ret; } cr_read(orig, buffer, amount, &tmp_err); if (orig) cr_close(orig, NULL); return ret; } #define DST_FILE "b" typedef struct { gchar *tmp_dir; gchar *dst_file; } Copyfiletest; static void copyfiletest_setup(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { copyfiletest->tmp_dir = g_strdup(TMPDIR_TEMPLATE); mkdtemp(copyfiletest->tmp_dir); copyfiletest->dst_file = g_strconcat(copyfiletest->tmp_dir, "/", DST_FILE, NULL); } static void copyfiletest_teardown(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { remove(copyfiletest->dst_file); rmdir(copyfiletest->tmp_dir); g_free(copyfiletest->tmp_dir); g_free(copyfiletest->dst_file); } static void copyfiletest_test_empty_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; char *checksum; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_copy_file(TEST_EMPTY_FILE, copyfiletest->dst_file, &tmp_err); g_assert(ret); g_assert(!tmp_err); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); g_free(checksum); } static void copyfiletest_test_text_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; char *checksum; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_copy_file(TEST_TEXT_FILE, copyfiletest->dst_file, NULL); g_assert(ret); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb"); g_free(checksum); } static void copyfiletest_test_binary_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; char *checksum; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, &tmp_err); g_assert(!tmp_err); g_assert(ret); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e"); g_free(checksum); } static void copyfiletest_test_rewrite(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; char *checksum; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, NULL); g_assert(ret); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e"); g_free(checksum); ret = cr_copy_file(TEST_TEXT_FILE, copyfiletest->dst_file, &tmp_err); g_assert(!tmp_err); g_assert(ret); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "2f395bdfa2750978965e4781ddf224c89646c7d7a1569b7ebb023b170f7bd8bb"); g_free(checksum); } static void copyfiletest_test_corner_cases(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); // Without GError ret = cr_copy_file(NON_EXIST_FILE, copyfiletest->dst_file, NULL); g_assert(!ret); g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); // With GError ret = cr_copy_file(NON_EXIST_FILE, copyfiletest->dst_file, &tmp_err); g_assert(tmp_err); g_error_free(tmp_err); g_assert(!ret); g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); } static void compressfile_test_text_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; char *checksum; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_compress_file(TEST_TEXT_FILE, copyfiletest->dst_file, CR_CW_GZ_COMPRESSION, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "8909fde88a5747d800fd2562b0f22945f014aa7df64" "cf1c15c7933ae54b72ab6"); g_free(checksum); } static void compressfile_with_stat_test_text_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; char *checksum; cr_ContentStat *stat; GError *tmp_err = NULL; stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_compress_file_with_stat(TEST_TEXT_FILE, copyfiletest->dst_file, CR_CW_GZ_COMPRESSION, stat, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(stat->checksum, ==, checksum); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); } static void compressfile_with_stat_test_gz_file_gz_output(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; char *checksum; cr_ContentStat *stat; GError *tmp_err = NULL; stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); char * dst_full_name = g_strconcat(copyfiletest->dst_file, ".gz", NULL); g_assert(!g_file_test(dst_full_name, G_FILE_TEST_EXISTS)); ret = cr_compress_file_with_stat(TEST_TEXT_FILE_GZ, dst_full_name, CR_CW_GZ_COMPRESSION, stat, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(dst_full_name, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(TEST_TEXT_FILE, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(stat->checksum, ==, checksum); //assert content is readable after decompression and recompression char buf[30]; read_file(dst_full_name, CR_CW_GZ_COMPRESSION, buf, 30); g_assert(g_strrstr(buf, "Lorem ipsum dolor sit amet")); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); free(dst_full_name); } static void compressfile_test_gz_file_xz_output(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; GError *tmp_err = NULL; char * dst_full_name = g_strconcat(copyfiletest->dst_file, ".xz", NULL); g_assert(!g_file_test(dst_full_name, G_FILE_TEST_EXISTS)); ret = cr_compress_file(TEST_TEXT_FILE_GZ, dst_full_name, CR_CW_XZ_COMPRESSION, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(dst_full_name, G_FILE_TEST_IS_REGULAR)); //assert content is readable after decompression and recompression char buf[30]; read_file(dst_full_name, CR_CW_XZ_COMPRESSION, buf, 30); g_assert(g_strrstr(buf, "Lorem ipsum dolor sit amet")); g_assert(!tmp_err); free(dst_full_name); } static void compressfile_test_xz_file_gz_output(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; GError *tmp_err = NULL; char * dst_full_name = g_strconcat(copyfiletest->dst_file, ".gz", NULL); g_assert(!g_file_test(dst_full_name, G_FILE_TEST_EXISTS)); ret = cr_compress_file(TEST_TEXT_FILE_XZ, dst_full_name, CR_CW_GZ_COMPRESSION, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(dst_full_name, G_FILE_TEST_IS_REGULAR)); //assert content is readable after decompression and recompression char buf[30]; read_file(dst_full_name, CR_CW_GZ_COMPRESSION, buf, 30); g_assert(g_strrstr(buf, "Lorem ipsum dolor sit amet")); g_assert(!tmp_err); free(dst_full_name); } static void compressfile_test_sqlite_file_gz_output(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; GError *tmp_err = NULL; char * dst_full_name = g_strconcat(copyfiletest->dst_file, ".gz", NULL); g_assert(!g_file_test(dst_full_name, G_FILE_TEST_EXISTS)); ret = cr_compress_file(TEST_SQLITE_FILE, dst_full_name, CR_CW_GZ_COMPRESSION, NULL, FALSE, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(dst_full_name, G_FILE_TEST_EXISTS)); g_assert(!tmp_err); } static void decompressfile_with_stat_test_text_file(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { int ret; cr_ContentStat *stat; GError *tmp_err = NULL; stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &tmp_err); g_assert(stat); g_assert(!tmp_err); g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_decompress_file_with_stat(TEST_TEXT_FILE_GZ, copyfiletest->dst_file, CR_CW_GZ_COMPRESSION, stat, &tmp_err); g_assert(!tmp_err); g_assert_cmpint(ret, ==, CRE_OK); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); g_assert_cmpstr(stat->checksum, ==, TEST_TEXT_FILE_SHA256SUM); cr_contentstat_free(stat, &tmp_err); g_assert(!tmp_err); } static void test_cr_better_copy_file_local(Copyfiletest *copyfiletest, G_GNUC_UNUSED gconstpointer test_data) { gboolean ret; char *checksum; GError *tmp_err = NULL; g_assert(!g_file_test(copyfiletest->dst_file, G_FILE_TEST_EXISTS)); ret = cr_better_copy_file(TEST_BINARY_FILE, copyfiletest->dst_file, &tmp_err); g_assert(!tmp_err); g_assert(ret); g_assert(g_file_test(copyfiletest->dst_file, G_FILE_TEST_IS_REGULAR)); checksum = cr_checksum_file(copyfiletest->dst_file, CR_CHECKSUM_SHA256, NULL); g_assert_cmpstr(checksum, ==, "bf68e32ad78cea8287be0f35b74fa3fecd0eaa91770b48f1a7282b015d6d883e"); g_free(checksum); } static void test_cr_remove_dir(void) { char *tmp_dir; char *subdir01, *subdir02, *subsubdir011, *subsubsubdir0111; gchar *tmp_file_1, *tmp_file_2, *tmp_file_3; tmp_dir = g_strdup(TMPDIR_TEMPLATE); g_assert(mkdtemp(tmp_dir)); subdir01 = g_strconcat(tmp_dir, "/subdir01", NULL); subdir02 = g_strconcat(tmp_dir, "/subdir02", NULL); subsubdir011 = g_strconcat(subdir01, "/subsubdir011", NULL); subsubsubdir0111 = g_strconcat(subsubdir011, "/subsubsubdir0111", NULL); g_assert_cmpint(g_mkdir_with_parents(subdir02, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH), ==, 0); g_assert_cmpint(g_mkdir_with_parents(subsubsubdir0111, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH), ==, 0); tmp_file_1 = g_strconcat(subsubsubdir0111, "/file_0111", NULL); tmp_file_2 = g_strconcat(subsubdir011, "/file_011", NULL); tmp_file_3 = g_strconcat(subdir02, "/file_02", NULL); g_assert(g_file_test(tmp_dir, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_file_1, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_file_2, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_file_3, G_FILE_TEST_EXISTS)); FILE *f; f = fopen(tmp_file_1, "w"); fputs("foo\n", f); fclose(f); f = fopen(tmp_file_2, "w"); fputs("bar\n", f); fclose(f); f = fopen(tmp_file_3, "w"); fputs("foobar\n", f); fclose(f); g_assert(g_file_test(tmp_file_1, G_FILE_TEST_EXISTS)); g_assert(g_file_test(tmp_file_2, G_FILE_TEST_EXISTS)); g_assert(g_file_test(tmp_file_3, G_FILE_TEST_EXISTS)); cr_remove_dir(tmp_dir, NULL); g_assert(!g_file_test(tmp_file_1, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_file_2, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_file_3, G_FILE_TEST_EXISTS)); g_assert(!g_file_test(tmp_dir, G_FILE_TEST_EXISTS)); g_free(tmp_dir); g_free(subdir01); g_free(subdir02); g_free(subsubdir011); g_free(subsubsubdir0111); g_free(tmp_file_1); g_free(tmp_file_2); g_free(tmp_file_3); } static void test_cr_normalize_dir_path(void) { char *normalized; normalized = cr_normalize_dir_path("/////////"); g_assert_cmpstr(normalized, ==, "/"); g_free(normalized); normalized = cr_normalize_dir_path("///foo///bar///"); g_assert_cmpstr(normalized, ==, "///foo///bar/"); g_free(normalized); normalized = cr_normalize_dir_path("bar"); g_assert_cmpstr(normalized, ==, "bar/"); g_free(normalized); normalized = cr_normalize_dir_path(".////////////bar"); g_assert_cmpstr(normalized, ==, ".////////////bar/"); g_free(normalized); normalized = cr_normalize_dir_path("////////////bar"); g_assert_cmpstr(normalized, ==, "////////////bar/"); g_free(normalized); normalized = cr_normalize_dir_path("bar//////"); g_assert_cmpstr(normalized, ==, "bar/"); g_free(normalized); normalized = cr_normalize_dir_path(""); g_assert_cmpstr(normalized, ==, "./"); g_free(normalized); normalized = cr_normalize_dir_path(NULL); g_assert_cmpstr(normalized, ==, NULL); g_free(normalized); } static void test_cr_str_to_version(void) { struct cr_Version ver; ver = cr_str_to_version(NULL); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version(""); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("abcd"); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "abcd"); g_free(ver.suffix); ver = cr_str_to_version("0.0.0"); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("9"); g_assert_cmpint(ver.major, ==, 9); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("3beta"); g_assert_cmpint(ver.major, ==, 3); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "beta"); g_free(ver.suffix); ver = cr_str_to_version("5.2gamma"); g_assert_cmpint(ver.major, ==, 5); g_assert_cmpint(ver.minor, ==, 2); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "gamma"); g_free(ver.suffix); ver = cr_str_to_version("0.0.0b"); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "b"); g_free(ver.suffix); ver = cr_str_to_version("2.3.4"); g_assert_cmpint(ver.major, ==, 2); g_assert_cmpint(ver.minor, ==, 3); g_assert_cmpint(ver.patch, ==, 4); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("11.33.123"); g_assert_cmpint(ver.major, ==, 11); g_assert_cmpint(ver.minor, ==, 33); g_assert_cmpint(ver.patch, ==, 123); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("1234567.0987654.45678"); g_assert_cmpint(ver.major, ==, 1234567); g_assert_cmpint(ver.minor, ==, 987654); g_assert_cmpint(ver.patch, ==, 45678); g_assert_cmpstr(ver.suffix, ==, NULL); ver = cr_str_to_version("1.0.2i"); g_assert_cmpint(ver.major, ==, 1); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 2); g_assert_cmpstr(ver.suffix, ==, "i"); g_free(ver.suffix); ver = cr_str_to_version("1..3"); g_assert_cmpint(ver.major, ==, 1); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 3); g_assert_cmpstr(ver.suffix, ==, NULL); g_free(ver.suffix); ver = cr_str_to_version("..alpha"); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "alpha"); g_free(ver.suffix); ver = cr_str_to_version("alpha"); g_assert_cmpint(ver.major, ==, 0); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "alpha"); g_free(ver.suffix); ver = cr_str_to_version("1-2-3"); g_assert_cmpint(ver.major, ==, 1); g_assert_cmpint(ver.minor, ==, 0); g_assert_cmpint(ver.patch, ==, 0); g_assert_cmpstr(ver.suffix, ==, "-2-3"); g_free(ver.suffix); } static void test_cr_cmp_version_str(void) { int ret; ret = cr_cmp_version_str(NULL, NULL); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("", ""); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str(NULL, ""); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("", NULL); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("3", "3"); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("1", "2"); g_assert_cmpint(ret, ==, 2); ret = cr_cmp_version_str("99", "8"); g_assert_cmpint(ret, ==, 1); ret = cr_cmp_version_str("5.4.3", "5.4.3"); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("5.3.2", "5.3.1"); g_assert_cmpint(ret, ==, 1); ret = cr_cmp_version_str("5.3.5", "5.3.6"); g_assert_cmpint(ret, ==, 2); ret = cr_cmp_version_str("6.3.2a", "6.3.2b"); g_assert_cmpint(ret, ==, 2); ret = cr_cmp_version_str("6.3.2azb", "6.3.2abc"); g_assert_cmpint(ret, ==, 1); ret = cr_cmp_version_str("1.2beta", "1.2beta"); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("n", "n"); g_assert_cmpint(ret, ==, 0); ret = cr_cmp_version_str("c", "b"); g_assert_cmpint(ret, ==, 1); ret = cr_cmp_version_str("c", "f"); g_assert_cmpint(ret, ==, 2); ret = cr_cmp_version_str("2.1", "2.1.3"); g_assert_cmpint(ret, ==, 2); } static void test_cr_split_rpm_filename(void) { cr_NEVRA *res; res = cr_split_rpm_filename(NULL); g_assert(!res); res = cr_split_rpm_filename("foo-1.0-1.i386"); g_assert(res); g_assert_cmpstr(res->name, ==, "foo"); g_assert_cmpstr(res->version, ==, "1.0"); g_assert_cmpstr(res->release, ==, "1"); g_assert(!res->epoch); g_assert_cmpstr(res->arch, ==, "i386"); cr_nevra_free(res); res = cr_split_rpm_filename("1:bar-9-123a.ia64.rpm"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "1"); g_assert_cmpstr(res->arch, ==, "ia64"); cr_nevra_free(res); res = cr_split_rpm_filename("bar-2:9-123a.ia64.rpm"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "2"); g_assert_cmpstr(res->arch, ==, "ia64"); cr_nevra_free(res); res = cr_split_rpm_filename("bar-9-123a:3.ia64.rpm"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "3"); g_assert_cmpstr(res->arch, ==, "ia64"); cr_nevra_free(res); res = cr_split_rpm_filename("bar-9-123a.ia64.rpm:4"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "4"); g_assert_cmpstr(res->arch, ==, "ia64"); cr_nevra_free(res); res = cr_split_rpm_filename("bar-9-123a.ia64:5"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "5"); g_assert_cmpstr(res->arch, ==, "ia64"); cr_nevra_free(res); res = cr_split_rpm_filename("b"); g_assert(res); g_assert_cmpstr(res->name, ==, "b"); g_assert(!res->version); g_assert(!res->release); g_assert(!res->epoch); g_assert(!res->arch); cr_nevra_free(res); } static void test_cr_str_to_nevr(void) { cr_NEVR *res; res = cr_str_to_nevr(NULL); g_assert(!res); res = cr_str_to_nevr("createrepo-0.9.9-22.fc20"); g_assert(res); g_assert_cmpstr(res->name, ==, "createrepo"); g_assert_cmpstr(res->version, ==, "0.9.9"); g_assert_cmpstr(res->release, ==, "22.fc20"); g_assert(!res->epoch); cr_nevr_free(res); res = cr_str_to_nevr("bar-4:9-123a"); g_assert(res); g_assert_cmpstr(res->name, ==, "bar"); g_assert_cmpstr(res->version, ==, "9"); g_assert_cmpstr(res->release, ==, "123a"); g_assert_cmpstr(res->epoch, ==, "4"); cr_nevr_free(res); res = cr_str_to_nevr("3:foo-2-el.6"); g_assert(res); g_assert_cmpstr(res->name, ==, "foo"); g_assert_cmpstr(res->version, ==, "2"); g_assert_cmpstr(res->release, ==, "el.6"); g_assert_cmpstr(res->epoch, ==, "3"); cr_nevr_free(res); res = cr_str_to_nevr("foo-2-el.6:3"); g_assert(res); g_assert_cmpstr(res->name, ==, "foo"); g_assert_cmpstr(res->version, ==, "2"); g_assert_cmpstr(res->release, ==, "el.6"); g_assert_cmpstr(res->epoch, ==, "3"); cr_nevr_free(res); res = cr_str_to_nevr("b-1-2"); g_assert(res); g_assert_cmpstr(res->name, ==, "b"); g_assert_cmpstr(res->version, ==, "1"); g_assert_cmpstr(res->release, ==, "2"); g_assert(!res->epoch); cr_nevr_free(res); res = cr_str_to_nevr("b"); g_assert(res); g_assert_cmpstr(res->name, ==, "b"); g_assert(!res->version); g_assert(!res->release); g_assert(!res->epoch); cr_nevr_free(res); } static void test_cr_str_to_nevra(void) { cr_NEVRA *res; res = cr_str_to_nevra(NULL); g_assert(!res); res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20.x86_64"); g_assert(res); g_assert_cmpstr(res->name, ==, "crypto-utils"); g_assert_cmpstr(res->version, ==, "2.4.1"); g_assert_cmpstr(res->release, ==, "52.fc20"); g_assert(!res->epoch); g_assert_cmpstr(res->arch, ==, "x86_64"); cr_nevra_free(res); res = cr_str_to_nevra("crypto-utils-1:2.4.1-52.fc20.x86_64"); g_assert(res); g_assert_cmpstr(res->name, ==, "crypto-utils"); g_assert_cmpstr(res->version, ==, "2.4.1"); g_assert_cmpstr(res->release, ==, "52.fc20"); g_assert_cmpstr(res->epoch, ==, "1"); g_assert_cmpstr(res->arch, ==, "x86_64"); cr_nevra_free(res); res = cr_str_to_nevra("2:crypto-utils-2.4.1-52.fc20.x86_64"); g_assert(res); g_assert_cmpstr(res->name, ==, "crypto-utils"); g_assert_cmpstr(res->version, ==, "2.4.1"); g_assert_cmpstr(res->release, ==, "52.fc20"); g_assert_cmpstr(res->epoch, ==, "2"); g_assert_cmpstr(res->arch, ==, "x86_64"); cr_nevra_free(res); res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20:3.x86_64"); g_assert(res); g_assert_cmpstr(res->name, ==, "crypto-utils"); g_assert_cmpstr(res->version, ==, "2.4.1"); g_assert_cmpstr(res->release, ==, "52.fc20"); g_assert_cmpstr(res->epoch, ==, "3"); g_assert_cmpstr(res->arch, ==, "x86_64"); cr_nevra_free(res); res = cr_str_to_nevra("crypto-utils-2.4.1-52.fc20.x86_64:4"); g_assert(res); g_assert_cmpstr(res->name, ==, "crypto-utils"); g_assert_cmpstr(res->version, ==, "2.4.1"); g_assert_cmpstr(res->release, ==, "52.fc20"); g_assert_cmpstr(res->epoch, ==, "4"); g_assert_cmpstr(res->arch, ==, "x86_64"); cr_nevra_free(res); res = cr_str_to_nevra("a"); g_assert(res); g_assert_cmpstr(res->name, ==, "a"); g_assert(!res->version); g_assert(!res->release); g_assert(!res->epoch); g_assert(!res->arch); cr_nevra_free(res); } static void test_cr_cmp_evr(void) { int res; res = cr_cmp_evr(NULL, "2", "1", "0", "2", "1"); g_assert_cmpint(res, ==, 0); res = cr_cmp_evr(NULL, "2", "2", "0", "2", "1"); g_assert_cmpint(res, ==, 1); res = cr_cmp_evr("0", "2", "2", "1", "2", "1"); g_assert_cmpint(res, ==, -1); res = cr_cmp_evr(NULL, "22", "2", "0", "2", "2"); g_assert_cmpint(res, ==, 1); res = cr_cmp_evr(NULL, "13", "2", "0", "2", "2"); g_assert_cmpint(res, ==, 1); res = cr_cmp_evr(NULL, "55", "2", NULL, "55", "2"); g_assert_cmpint(res, ==, 0); res = cr_cmp_evr(NULL, "0", "2a", "0", "0", "2b"); g_assert_cmpint(res, ==, -1); res = cr_cmp_evr(NULL, "0", "2", "0", NULL, "3"); g_assert_cmpint(res, ==, 1); } static void test_cr_cut_dirs(void) { char *res; res = cr_cut_dirs(NULL, 1); g_assert_cmpstr(res, ==, NULL); res = cr_cut_dirs("", 1); g_assert_cmpstr(res, ==, ""); res = cr_cut_dirs("foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("/foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("//foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("///foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("bar/foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("/bar/foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("bar//foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("//bar//foo.rpm", 1); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("///a///b/foo.rpm", 1); g_assert_cmpstr(res, ==, "b/foo.rpm"); res = cr_cut_dirs("a/b/c/foo.rpm", 1); g_assert_cmpstr(res, ==, "b/c/foo.rpm"); res = cr_cut_dirs("a/b/c/foo.rpm", 2); g_assert_cmpstr(res, ==, "c/foo.rpm"); res = cr_cut_dirs("a/b/c/foo.rpm", 3); g_assert_cmpstr(res, ==, "foo.rpm"); res = cr_cut_dirs("a///b///c///foo.rpm", 3); g_assert_cmpstr(res, ==, "foo.rpm"); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/misc/test_cr_str_to_evr", test_cr_str_to_evr); g_test_add_func("/misc/test_cr_str_to_evr_with_chunk", test_cr_str_to_evr_with_chunk); g_test_add_func("/misc/test_cr_is_primary", test_cr_is_primary); g_test_add_func("/misc/test_cr_get_header_byte_range", test_cr_get_header_byte_range); g_test_add_func("/misc/test_cr_get_filename", test_cr_get_filename); g_test_add("/misc/copyfiletest_test_empty_file", Copyfiletest, NULL, copyfiletest_setup, copyfiletest_test_empty_file, copyfiletest_teardown); g_test_add("/misc/copyfiletest_test_text_file", Copyfiletest, NULL, copyfiletest_setup, copyfiletest_test_text_file, copyfiletest_teardown); g_test_add("/misc/copyfiletest_test_binary_file", Copyfiletest, NULL, copyfiletest_setup, copyfiletest_test_binary_file, copyfiletest_teardown); g_test_add("/misc/copyfiletest_test_rewrite", Copyfiletest, NULL, copyfiletest_setup, copyfiletest_test_rewrite, copyfiletest_teardown); g_test_add("/misc/copyfiletest_test_corner_cases", Copyfiletest, NULL, copyfiletest_setup, copyfiletest_test_corner_cases, copyfiletest_teardown); g_test_add("/misc/compressfile_test_text_file", Copyfiletest, NULL, copyfiletest_setup, compressfile_test_text_file, copyfiletest_teardown); g_test_add("/misc/compressfile_with_stat_test_text_file", Copyfiletest, NULL, copyfiletest_setup, compressfile_with_stat_test_text_file, copyfiletest_teardown); g_test_add("/misc/compressfile_with_stat_test_gz_file_gz_output", Copyfiletest, NULL, copyfiletest_setup, compressfile_with_stat_test_gz_file_gz_output, copyfiletest_teardown); g_test_add("/misc/compressfile_test_gz_file_xz_output", Copyfiletest, NULL, copyfiletest_setup, compressfile_test_gz_file_xz_output, copyfiletest_teardown); g_test_add("/misc/compressfile_test_xz_file_gz_output", Copyfiletest, NULL, copyfiletest_setup, compressfile_test_xz_file_gz_output, copyfiletest_teardown); g_test_add("/misc/compressfile_test_sqlite_file_gz_output", Copyfiletest, NULL, copyfiletest_setup, compressfile_test_sqlite_file_gz_output, copyfiletest_teardown); g_test_add("/misc/decompressfile_with_stat_test_text_file", Copyfiletest, NULL, copyfiletest_setup, decompressfile_with_stat_test_text_file, copyfiletest_teardown); g_test_add("/misc/test_cr_better_copy_file_local", Copyfiletest, NULL, copyfiletest_setup, test_cr_better_copy_file_local, copyfiletest_teardown); g_test_add_func("/misc/test_cr_normalize_dir_path", test_cr_normalize_dir_path); g_test_add_func("/misc/test_cr_remove_dir", test_cr_remove_dir); g_test_add_func("/misc/test_cr_str_to_version", test_cr_str_to_version); g_test_add_func("/misc/test_cr_cmp_version_str", test_cr_cmp_version_str); g_test_add_func("/misc/test_cr_split_rpm_filename", test_cr_split_rpm_filename); g_test_add_func("/misc/test_cr_str_to_nevr", test_cr_str_to_nevr); g_test_add_func("/misc/test_cr_str_to_nevra", test_cr_str_to_nevra); g_test_add_func("/misc/test_cr_cmp_evr", test_cr_cmp_evr); g_test_add_func("/misc/test_cr_cut_dirs", test_cr_cut_dirs); return g_test_run(); } createrepo_c-0.17.0/tests/test_modifyrepo_shared.c000066400000000000000000000110431400672373200223050ustar00rootroot00000000000000/* * Copyright (C) 2018 Red Hat, Inc. * * Licensed under the GNU Lesser General Public License Version 2.1 * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include "fixtures.h" #include "createrepo/misc.h" #include "createrepo/modifyrepo_shared.h" static void copy_repo_TEST_REPO_00(const gchar *target_path, const gchar *tmp){ g_assert(!g_mkdir_with_parents(target_path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)); gchar *md = g_strconcat(tmp, "/", TEST_REPO_00_REPOMD, NULL); gchar *prim = g_strconcat(tmp, "/", TEST_REPO_00_PRIMARY, NULL); gchar *file = g_strconcat(tmp, "/", TEST_REPO_00_FILELISTS, NULL); gchar *oth = g_strconcat(tmp, "/", TEST_REPO_00_OTHER, NULL); g_assert(cr_copy_file(TEST_REPO_00_REPOMD, md, NULL)); g_assert(cr_copy_file(TEST_REPO_00_PRIMARY, prim, NULL)); g_assert(cr_copy_file(TEST_REPO_00_FILELISTS, file, NULL)); g_assert(cr_copy_file(TEST_REPO_00_OTHER, oth, NULL)); g_free(md); g_free(prim); g_free(file); g_free(oth); } static void test_cr_remove_compression_suffix_with_none(void) { GError **err = NULL; gchar *out = cr_remove_compression_suffix_if_present(TEST_TEXT_FILE, err); g_assert_cmpstr(out, ==, "testdata/test_files/text_file"); g_free(out); } static void test_cr_remove_compression_suffix(void) { GError **err = NULL; gchar *out = cr_remove_compression_suffix_if_present(TEST_TEXT_FILE_GZ, err); g_assert_cmpstr(out, ==, "testdata/test_files/text_file"); g_free(out); out = cr_remove_compression_suffix_if_present(TEST_TEXT_FILE_XZ, err); g_assert_cmpstr(out, ==, "testdata/test_files/text_file"); g_free(out); out = cr_remove_compression_suffix_if_present(TEST_SQLITE_FILE, err); g_assert_cmpstr(out, ==, "testdata/test_files/sqlite_file.sqlite"); g_free(out); } static void test_cr_write_file(void) { char *tmp_dir; tmp_dir = g_strdup(TMPDIR_TEMPLATE); g_assert(mkdtemp(tmp_dir)); gchar *repopath = g_strconcat(tmp_dir, "/", TEST_REPO_00, "repodata", NULL); copy_repo_TEST_REPO_00(repopath, tmp_dir); cr_ModifyRepoTask *task = cr_modifyrepotask_new(); task->path = TEST_TEXT_FILE; task->compress = 1; GError **err = NULL; cr_write_file(repopath, task, CR_CW_GZ_COMPRESSION, err); //bz1639287 file should not be named text_file.gz.gz gchar *dst = g_strconcat(repopath, "/", "text_file.gz" , NULL); g_assert(g_file_test(dst, G_FILE_TEST_EXISTS)); cr_modifyrepotask_free(task); g_free(repopath); g_free(tmp_dir); g_free(dst); } static void test_cr_write_file_with_gz_file(void) { char *tmp_dir; tmp_dir = g_strdup(TMPDIR_TEMPLATE); g_assert(mkdtemp(tmp_dir)); gchar *repopath = g_strconcat(tmp_dir, "/", TEST_REPO_00, "repodata", NULL); copy_repo_TEST_REPO_00(repopath, tmp_dir); cr_ModifyRepoTask *task = cr_modifyrepotask_new(); task->path = TEST_TEXT_FILE_GZ; task->compress = 1; GError **err = NULL; char * out = cr_write_file(repopath, task, CR_CW_GZ_COMPRESSION, err); //bz1639287 file should not be named text_file.gz.gz gchar *dst = g_strconcat(repopath, "/", "text_file.gz" , NULL); g_assert_cmpstr(out, ==, dst); g_assert(g_file_test(dst, G_FILE_TEST_EXISTS)); cr_modifyrepotask_free(task); g_free(repopath); g_free(tmp_dir); g_free(dst); g_free(out); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/modifyrepo_shared/test_cr_remove_compression_suffix", test_cr_remove_compression_suffix); g_test_add_func("/modifyrepo_shared/test_cr_remove_compression_suffix_with_none", test_cr_remove_compression_suffix_with_none); g_test_add_func("/modifyrepo_shared/test_cr_write_file", test_cr_write_file); g_test_add_func("/modifyrepo_shared/test_cr_write_file_with_gz_file", test_cr_write_file_with_gz_file); return g_test_run(); } createrepo_c-0.17.0/tests/test_sqlite.c000066400000000000000000000142351400672373200201110ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2012 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include #include #include #include "fixtures.h" #include "createrepo/misc.h" #include "createrepo/package.h" #include "createrepo/sqlite.h" #include "createrepo/parsepkg.h" #include "createrepo/constants.h" #define TMP_DIR_PATTERN "/tmp/createrepo_test_XXXXXX" #define TMP_PRIMARY_NAME "primary.sqlite" #define TMP_FILELISTS_NAME "filelists.sqlite" #define TMP_OTHER_NAME "other.sqlite" #define EMPTY_PKG TEST_PACKAGES_PATH"empty-0-0.x86_64.rpm" #define EMPTY_PKG_SRC TEST_PACKAGES_PATH"empty-0-0.src.rpm" typedef struct { gchar *tmp_dir; } TestData; static void testdata_setup(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { testdata->tmp_dir = g_strdup(TMP_DIR_PATTERN); mkdtemp(testdata->tmp_dir); } static void testdata_teardown(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { cr_remove_dir(testdata->tmp_dir, NULL); g_free(testdata->tmp_dir); } static void test_cr_open_db(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { GError *err = NULL; gchar *path = NULL; cr_SqliteDb *db; // Create new db path = g_strconcat(testdata->tmp_dir, "/", TMP_PRIMARY_NAME, NULL); db = cr_db_open_primary(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); g_free(path); cr_db_close(db, &err); g_assert(!err); path = g_strconcat(testdata->tmp_dir, "/", TMP_FILELISTS_NAME, NULL); db = cr_db_open_filelists(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); g_free(path); cr_db_close(db, &err); g_assert(!err); path = g_strconcat(testdata->tmp_dir, "/", TMP_OTHER_NAME, NULL); db = cr_db_open_other(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); g_free(path); cr_db_close(db, &err); g_assert(!err); } static void test_cr_db_add_primary_pkg(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { GError *err = NULL; gchar *path; cr_SqliteDb *db; cr_Package *pkg; GTimer *timer = g_timer_new(); // Create new db path = g_strconcat(testdata->tmp_dir, "/", TMP_PRIMARY_NAME, NULL); db = cr_db_open_primary(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); // Load package pkg = get_package(); // Add package cr_db_add_pkg(db, pkg, &err); g_assert(!err); cr_db_close(db, &err); // Cleanup g_timer_stop(timer); g_timer_destroy(timer); g_free(path); g_assert(!err); } static void test_cr_db_dbinfo_update(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { GError *err = NULL; gchar *path; cr_SqliteDb *db; cr_Package *pkg; // Create new db path = g_strconcat(testdata->tmp_dir, "/", TMP_PRIMARY_NAME, NULL); db = cr_db_open_primary(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); // Try cr_db_dbinfo_update cr_db_dbinfo_update(db, "foochecksum", &err); g_assert(!err); // Load package pkg = get_package(); // Add package cr_db_add_pkg(db, pkg, &err); g_assert(!err); // Try cr_db_dbinfo_update again cr_db_dbinfo_update(db, "foochecksum", &err); g_assert(!err); // Cleanup cr_package_free(pkg); g_free(path); cr_db_close(db, &err); g_assert(!err); } static void test_all(TestData *testdata, G_GNUC_UNUSED gconstpointer test_data) { GError *err = NULL; gchar *path; cr_SqliteDb *db = NULL; cr_Package *pkg, *pkg2 = NULL; // Create new db path = g_strconcat(testdata->tmp_dir, "/", TMP_PRIMARY_NAME, NULL); db = cr_db_open_primary(path, &err); g_assert(db); g_assert(!err); g_assert(g_file_test(path, G_FILE_TEST_EXISTS)); // Try cr_db_dbinfo_update cr_db_dbinfo_update(db, "foochecksum", &err); g_assert(!err); // Load package cr_package_parser_init(); pkg = cr_package_from_rpm(EMPTY_PKG, CR_CHECKSUM_SHA256, EMPTY_PKG, NULL, 5, NULL, CR_HDRR_NONE, NULL); g_assert(pkg); cr_package_parser_cleanup(); pkg2 = get_empty_package(); // Add package cr_db_add_pkg(db, pkg, &err); g_assert(!err); cr_db_add_pkg(db, pkg2, &err); g_assert(!err); // Try cr_db_dbinfo_update again cr_db_dbinfo_update(db, "foochecksum", &err); g_assert(!err); // Cleanup cr_package_free(pkg); cr_package_free(pkg2); cr_db_close(db, &err); g_assert(!err); g_free(path); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add("/sqlite/test_cr_open_db", TestData, NULL, testdata_setup, test_cr_open_db, testdata_teardown); g_test_add("/sqlite/test_cr_db_add_primary_pkg", TestData, NULL, testdata_setup, test_cr_db_add_primary_pkg, testdata_teardown); g_test_add("/sqlite/test_cr_db_dbinfo_update", TestData, NULL, testdata_setup, test_cr_db_dbinfo_update, testdata_teardown); g_test_add("/sqlite/test_all", TestData, NULL, testdata_setup, test_all, testdata_teardown); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_dump.c000066400000000000000000000075541400672373200204430ustar00rootroot00000000000000 #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/package.h" #include "createrepo/misc.h" #include "createrepo/xml_dump.h" // Tests static void test_cr_prepend_protocol_00(void) { const gchar *url_to_be_prepended = "/path/to/package.noarch.rpm"; gchar *prepended_url = cr_prepend_protocol(url_to_be_prepended); g_assert_cmpstr(prepended_url, ==, "file:///path/to/package.noarch.rpm"); g_free(prepended_url); } static void test_cr_prepend_protocol_01(void) { const gchar *url_to_be_prepended = "http://url/to/package.noarch.rpm"; gchar *prepended_url = cr_prepend_protocol(url_to_be_prepended); g_assert_cmpstr(prepended_url, ==, "http://url/to/package.noarch.rpm"); g_free(prepended_url); } static void test_cr_Package_contains_forbidden_control_chars_01(void) { cr_Package *p = get_package(); g_assert(!cr_Package_contains_forbidden_control_chars(p)); } static void test_cr_Package_contains_forbidden_control_chars_02(void) { cr_Package *p = get_package(); p->name = "foo"; g_assert(cr_Package_contains_forbidden_control_chars(p)); } static void test_cr_Package_contains_forbidden_control_chars_03(void) { cr_Package *p = get_package(); p->summary = "foo"; g_assert(cr_Package_contains_forbidden_control_chars(p)); } static void test_cr_Package_contains_forbidden_control_chars_04(void) { cr_Package *p = get_package(); cr_Dependency *dep = p->requires->data; dep->name = "foobar_dep"; g_assert(cr_Package_contains_forbidden_control_chars(p)); } static void test_cr_Package_contains_forbidden_control_chars_05(void) { cr_Package *p = get_package(); cr_PackageFile *file = p->files->data; file->name = "obar_dep"; g_assert(cr_Package_contains_forbidden_control_chars(p)); } static void test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_01(void) { cr_Package *p = get_package(); cr_Dependency *dep = p->requires->data; dep->name = "foobar_dep"; g_assert(cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(p->requires)); } static void test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_02(void) { cr_Package *p = get_package(); cr_Dependency *dep = p->requires->data; dep->name = "fo badep"; g_assert(!cr_GSList_of_cr_Dependency_contains_forbidden_control_chars(p->requires)); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/xml_dump/test_cr_prepend_protocol_00", test_cr_prepend_protocol_00); g_test_add_func("/xml_dump/test_cr_prepend_protocol_01", test_cr_prepend_protocol_01); g_test_add_func("/xml_dump/test_cr_Package_contains_forbidden_control_chars_01", test_cr_Package_contains_forbidden_control_chars_01); g_test_add_func("/xml_dump/test_cr_Package_contains_forbidden_control_chars_02", test_cr_Package_contains_forbidden_control_chars_02); g_test_add_func("/xml_dump/test_cr_Package_contains_forbidden_control_chars_03", test_cr_Package_contains_forbidden_control_chars_03); g_test_add_func("/xml_dump/test_cr_Package_contains_forbidden_control_chars_04", test_cr_Package_contains_forbidden_control_chars_04); g_test_add_func("/xml_dump/test_cr_Package_contains_forbidden_control_chars_05", test_cr_Package_contains_forbidden_control_chars_05); g_test_add_func("/xml_dump/test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_01", test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_01); g_test_add_func("/xml_dump/test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_02", test_cr_GSList_of_cr_Dependency_contains_forbidden_control_chars_02); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_dump_primary.c000066400000000000000000000354201400672373200221770ustar00rootroot00000000000000#include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/load_metadata.h" #include "createrepo/misc.h" #include "createrepo/xml_dump_primary.c" #define IF_NULL_EMPTY(x) (x) ? x : "" xmlNodePtr cmp_package_files_and_xml(GSList *files, xmlNodePtr current, int only_primary_files) { if (!current || !files) { return current; } GSList *element = NULL; for(element = files; element; element=element->next) { cr_PackageFile *entry = (cr_PackageFile*) element->data; if (!(entry->path) || !(entry->name)) { continue; } gchar *fullname; fullname = g_strconcat(entry->path, entry->name, NULL); if (!fullname) { continue; } if (only_primary_files && !cr_is_primary(fullname)) { g_free(fullname); continue; } g_assert_cmpstr((char *) current->name, ==, "file"); g_assert_cmpstr((char *) current->children->content, ==, fullname); if (entry->type && entry->type[0] != '\0' && strcmp(entry->type, "file")) { g_assert_cmpstr((char *) current->properties->name, ==, "type"); g_assert_cmpstr((char *) current->properties->children->content, ==, IF_NULL_EMPTY(entry->type)); } } return current->next; } xmlNodePtr cmp_package_pco_and_xml(GSList *pco_list, xmlNodePtr current, PcoType pcotype) { const char *elem_name; if (pcotype >= PCO_TYPE_SENTINEL) return NULL; elem_name = pco_info[pcotype].elemname; GSList *elem; cr_Dependency *item; int is_first = 1; if (pco_list){ for(elem = pco_list; elem; elem = elem->next) { item = elem->data; if (!item->name || item->name[0] == '\0') { continue; } if (is_first){ g_assert_cmpstr((char *) current->name, ==, elem_name); current = current->children; is_first = 0; }else{ current = current->next; } g_assert_cmpstr((char *) current->name, ==, "rpm:entry"); xmlAttrPtr current_attrs; current_attrs = current->properties; g_assert_cmpstr((char *) current_attrs->name, ==, "name"); g_assert_cmpstr((char *) current_attrs->children->content, ==, item->name); if (item->flags && item->flags[0] != '\0') { current_attrs = current_attrs->next; g_assert_cmpstr((char *) current_attrs->name, ==, "flags"); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(item->flags)); if (item->epoch && item->epoch[0] != '\0') { current_attrs = current_attrs->next; g_assert_cmpstr((char *) current_attrs->name, ==, "epoch"); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(item->epoch)); } if (item->version && item->version[0] != '\0') { current_attrs = current_attrs->next; g_assert_cmpstr((char *) current_attrs->name, ==, "ver"); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(item->version)); } if (item->release && item->release[0] != '\0') { current_attrs = current_attrs->next; g_assert_cmpstr((char *) current_attrs->name, ==, "rel"); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(item->release)); } } if (pcotype == PCO_TYPE_REQUIRES && item->pre) { current_attrs = current_attrs->next; g_assert_cmpstr((char *) current_attrs->name, ==, "pre"); g_assert_cmpstr((char *) current_attrs->children->content, ==, "1"); } } } if (!is_first){ current = current->parent->next; } return current; } void cmp_package_and_xml_node(cr_Package *pkg, xmlNodePtr node) { xmlNodePtr current; current = node->children; g_assert_cmpstr((char *) current->name, ==, "name"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->name)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "arch"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->arch)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "version"); g_assert_cmpstr((char *) current->properties->name, ==, "epoch"); g_assert_cmpstr((char *) current->properties->children->content, ==, IF_NULL_EMPTY(pkg->epoch)); g_assert_cmpstr((char *) current->properties->next->name, ==, "ver"); g_assert_cmpstr((char *) current->properties->next->children->content, ==, IF_NULL_EMPTY(pkg->version)); g_assert_cmpstr((char *) current->properties->next->next->name, ==, "rel"); g_assert_cmpstr((char *) current->properties->next->next->children->content, ==, IF_NULL_EMPTY(pkg->release)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "checksum"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->pkgId)); g_assert_cmpstr((char *) current->properties->name, ==, "type"); g_assert_cmpstr((char *) current->properties->children->content, ==, IF_NULL_EMPTY(pkg->checksum_type)); g_assert_cmpstr((char *) current->properties->next->name, ==, "pkgid"); g_assert_cmpstr((char *) current->properties->next->children->content, ==, "YES" ); current = current->next; g_assert_cmpstr((char *) current->name, ==, "summary"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->summary)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "description"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->description)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "packager"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_packager)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "url"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->url)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "time"); g_assert_cmpstr((char *) current->properties->name, ==, "file"); gchar *tmp = g_strdup_printf("%i", (gint32) pkg->time_file); g_assert_cmpstr((char *) current->properties->children->content, ==, tmp); g_free(tmp); g_assert_cmpstr((char *) current->properties->next->name, ==, "build"); tmp = g_strdup_printf("%i", (gint32) pkg->time_build); g_assert_cmpstr((char *) current->properties->next->children->content, ==, tmp); g_free(tmp); current = current->next; g_assert_cmpstr((char *) current->name, ==, "size"); g_assert_cmpstr((char *) current->properties->name, ==, "package"); tmp = g_strdup_printf("%i", (gint32) pkg->size_package); g_assert_cmpstr((char *) current->properties->children->content, ==, tmp); g_free(tmp); g_assert_cmpstr((char *) current->properties->next->name, ==, "installed"); tmp = g_strdup_printf("%i", (gint32) pkg->size_installed); g_assert_cmpstr((char *) current->properties->next->children->content, ==, tmp); g_free(tmp); g_assert_cmpstr((char *) current->properties->next->next->name, ==, "archive"); tmp = g_strdup_printf("%i", (gint32) pkg->size_archive); g_assert_cmpstr((char *) current->properties->next->next->children->content, ==, tmp); g_free(tmp); current = current->next; g_assert_cmpstr((char *) current->name, ==, "location"); xmlAttrPtr current_attrs = current->properties; if (pkg->location_base){ g_assert_cmpstr((char *) current_attrs->name, ==, "xml:base"); gchar *location_base_with_protocol = NULL; location_base_with_protocol = cr_prepend_protocol(pkg->location_base); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(location_base_with_protocol)); g_free(location_base_with_protocol); current_attrs = current_attrs->next; } g_assert_cmpstr((char *) current_attrs->name, ==, "href"); g_assert_cmpstr((char *) current_attrs->children->content, ==, IF_NULL_EMPTY(pkg->location_href)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "format"); current = current->children; g_assert_cmpstr((char *) current->name, ==, "rpm:license"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_license)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "rpm:vendor"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_vendor)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "rpm:group"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_group)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "rpm:buildhost"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_buildhost)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "rpm:sourcerpm"); g_assert_cmpstr((char *) current->children->content, ==, IF_NULL_EMPTY(pkg->rpm_sourcerpm)); current = current->next; g_assert_cmpstr((char *) current->name, ==, "rpm:header-range"); g_assert_cmpstr((char *) current->properties->name, ==, "start"); tmp = g_strdup_printf("%i", (gint32) pkg->rpm_header_start); g_assert_cmpstr((char *) current->properties->children->content, ==, tmp); g_free(tmp); g_assert_cmpstr((char *) current->properties->next->name, ==, "end"); tmp = g_strdup_printf("%i", (gint32) pkg->rpm_header_end); g_assert_cmpstr((char *) current->properties->next->children->content, ==, tmp); g_free(tmp); current = current->next; current = cmp_package_pco_and_xml(pkg->provides, current, PCO_TYPE_PROVIDES); current = cmp_package_pco_and_xml(pkg->requires, current, PCO_TYPE_REQUIRES); current = cmp_package_pco_and_xml(pkg->conflicts, current, PCO_TYPE_CONFLICTS); current = cmp_package_pco_and_xml(pkg->obsoletes, current, PCO_TYPE_OBSOLETES); current = cmp_package_pco_and_xml(pkg->suggests, current, PCO_TYPE_SUGGESTS); current = cmp_package_pco_and_xml(pkg->enhances, current, PCO_TYPE_ENHANCES); current = cmp_package_pco_and_xml(pkg->recommends, current, PCO_TYPE_RECOMMENDS); current = cmp_package_pco_and_xml(pkg->supplements, current, PCO_TYPE_SUPPLEMENTS); //for primary.xml we always want just primary files int only_primary_files = 1; current = cmp_package_files_and_xml(pkg->files, current, only_primary_files); } // Tests static void test_cr_xml_dump_primary_dump_pco_00(void) { cr_Package *p; p = cr_package_new(); cr_Dependency *dep; dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = NULL; dep->pre = FALSE; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = "LE"; dep->pre = 1; dep->epoch = "44"; dep->version = "1.2.3"; p->requires = (g_slist_prepend(p->requires, dep)); xmlNodePtr node; node = xmlNewNode(NULL, BAD_CAST "wrapper"); cr_xml_dump_primary_dump_pco(node, p, PCO_TYPE_REQUIRES); node = node->children; node = cmp_package_pco_and_xml(p->requires, node, PCO_TYPE_REQUIRES); } static void test_cr_xml_dump_primary_dump_pco_01(void) { cr_Package *p; p = cr_package_new(); cr_Dependency *dep; dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = NULL; dep->pre = FALSE; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = "LE"; dep->pre = 1; dep->epoch = "44"; dep->version = "1.2.3"; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = NULL; dep->pre = 0; dep->epoch = "44"; dep->version = "1.2.3"; p->requires = (g_slist_prepend(p->requires, dep)); dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = "LE"; dep->pre = 1; dep->epoch = "44"; dep->version = "1.2.3"; p->obsoletes = (g_slist_prepend(p->obsoletes, dep)); dep = cr_dependency_new(); dep->name = "foobar_provide"; dep->flags = ""; dep->pre = 0; dep->epoch = "12"; dep->version = "1.2.3"; p->obsoletes = (g_slist_prepend(p->obsoletes, dep)); xmlNodePtr node; node = xmlNewNode(NULL, BAD_CAST "wrapper"); cr_xml_dump_primary_dump_pco(node, p, PCO_TYPE_REQUIRES); cr_xml_dump_primary_dump_pco(node, p, PCO_TYPE_OBSOLETES); node = node->children; node = cmp_package_pco_and_xml(p->requires, node, PCO_TYPE_REQUIRES); node = cmp_package_pco_and_xml(p->obsoletes, node, PCO_TYPE_OBSOLETES); } static void test_cr_xml_dump_primary_base_items_00(void) { xmlNodePtr node = xmlNewNode(NULL, BAD_CAST "package"); cr_Package *pkg = NULL; pkg = get_package(); g_assert(pkg); cr_xml_dump_primary_base_items(node, pkg); cmp_package_and_xml_node(pkg, node); cr_package_free(pkg); } static void test_cr_xml_dump_primary_base_items_01(void) { xmlNodePtr node = xmlNewNode(NULL, BAD_CAST "package"); cr_Package *pkg = NULL; pkg = get_package(); pkg->location_base = "http://url/"; g_assert(pkg); cr_xml_dump_primary_base_items(node, pkg); cmp_package_and_xml_node(pkg, node); cr_package_free(pkg); } static void test_cr_xml_dump_primary_base_items_02(void) { xmlNodePtr node = xmlNewNode(NULL, BAD_CAST "package"); cr_Package *pkg = NULL; pkg = get_empty_package(); g_assert(pkg); cr_xml_dump_primary_base_items(node, pkg); cmp_package_and_xml_node(pkg, node); cr_package_free(pkg); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/xml_dump_primary/test_cr_xml_dump_primary_base_items_00", test_cr_xml_dump_primary_base_items_00); g_test_add_func("/xml_dump_primary/test_cr_xml_dump_primary_base_items_01", test_cr_xml_dump_primary_base_items_02); g_test_add_func("/xml_dump_primary/test_cr_xml_dump_primary_base_items_02", test_cr_xml_dump_primary_base_items_01); g_test_add_func("/xml_dump_primary/test_cr_xml_dump_primary_dump_pco_00", test_cr_xml_dump_primary_dump_pco_00); g_test_add_func("/xml_dump_primary/test_cr_xml_dump_primary_dump_pco_01", test_cr_xml_dump_primary_dump_pco_01); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_file.c000066400000000000000000000107251400672373200204070ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #define _XOPEN_SOURCE 700 #include #include #include #include #include #include "fixtures.h" #include "createrepo/misc.h" #include "createrepo/xml_file.h" #include "createrepo/compression_wrapper.h" typedef struct { gchar *tmpdir; } TestFixtures; static void fixtures_setup(TestFixtures *fixtures, G_GNUC_UNUSED gconstpointer test_data) { gchar *template = g_strdup(TMPDIR_TEMPLATE); fixtures->tmpdir = mkdtemp(template); g_assert(fixtures->tmpdir); } static void fixtures_teardown(TestFixtures *fixtures, G_GNUC_UNUSED gconstpointer test_data) { if (!fixtures->tmpdir) return; cr_remove_dir(fixtures->tmpdir, NULL); g_free(fixtures->tmpdir); } static void test_no_packages(TestFixtures *fixtures, G_GNUC_UNUSED gconstpointer test_data) { cr_XmlFile *f; gchar *path; gchar contents[2048]; int ret; GError *err = NULL; g_assert(g_file_test(fixtures->tmpdir, G_FILE_TEST_IS_DIR)); // Try primary.xml path = g_build_filename(fixtures->tmpdir, "primary.xml.gz", NULL); f = cr_xmlfile_open_primary(path, CR_CW_GZ_COMPRESSION, &err); g_assert(f); g_assert(err == NULL); cr_xmlfile_close(f, &err); CR_FILE *crf = cr_open(path, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, NULL); g_assert(crf); ret = cr_read(crf, &contents, 2047, NULL); g_assert(ret != CR_CW_ERR); contents[ret] = '\0'; cr_close(crf, NULL); g_assert_cmpstr(contents, ==, "\n" "\n"); g_free(path); } static void test_rewrite_header_pacakge_count(TestFixtures *fixtures, G_GNUC_UNUSED gconstpointer test_data) { cr_XmlFile *f; gchar *path; gchar contents[2048]; int ret; GError *err = NULL; g_assert(g_file_test(fixtures->tmpdir, G_FILE_TEST_IS_DIR)); // Try primary.xml path = g_build_filename(fixtures->tmpdir, "primary.xml.gz", NULL); f = cr_xmlfile_open_primary(path, CR_CW_GZ_COMPRESSION, &err); g_assert(f); g_assert(err == NULL); cr_xmlfile_close(f, &err); cr_ContentStat *stat; stat = cr_contentstat_new(CR_CHECKSUM_SHA256, &err); cr_rewrite_header_package_count(path, CR_CW_GZ_COMPRESSION, 9, 0, stat, NULL, &err); g_assert(!err); g_assert_cmpint(stat->size, >=, 100); cr_contentstat_free(stat, &err); CR_FILE *crf = cr_open(path, CR_CW_MODE_READ, CR_CW_AUTO_DETECT_COMPRESSION, NULL); g_assert(crf); ret = cr_read(crf, &contents, 2047, NULL); g_assert(ret != CR_CW_ERR); contents[ret] = '\0'; cr_close(crf, NULL); g_assert_cmpstr(contents, ==, "\n" "\n"); g_free(path); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add("/xml_file/test_no_packages", TestFixtures, NULL, fixtures_setup, test_no_packages, fixtures_teardown); g_test_add("/xml_file/test_write_modified_header", TestFixtures, NULL, fixtures_setup, test_rewrite_header_pacakge_count, fixtures_teardown); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_parser_filelists.c000066400000000000000000000331041400672373200230360ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/package.h" #include "createrepo/misc.h" #include "createrepo/xml_parser.h" #include "createrepo/xml_parser_internal.h" //This functions assumes there is enough space in the buffer for the read file plus a terminating NULL static int read_file(char *f, cr_CompressionType compression, char* buffer, int amount) { int ret = CRE_OK; GError *tmp_err = NULL; CR_FILE *orig = NULL; orig = cr_open(f, CR_CW_MODE_READ, compression, &tmp_err); if (!orig) { ret = tmp_err->code; return ret; } int read = cr_read(orig, buffer, amount, &tmp_err); buffer[read] = 0; if (orig) cr_close(orig, NULL); return ret; } // Callbacks static int pkgcb(cr_Package *pkg, void *cbdata, GError **err) { g_assert(pkg); g_assert(!err || *err == NULL); if (cbdata) *((int *)cbdata) += 1; cr_package_free(pkg); return CR_CB_RET_OK; } static int pkgcb_interrupt(cr_Package *pkg, void *cbdata, GError **err) { g_assert(pkg); g_assert(!err || *err == NULL); if (cbdata) *((int *)cbdata) += 1; cr_package_free(pkg); return CR_CB_RET_ERR; } static int newpkgcb_skip_fake_bash(cr_Package **pkg, G_GNUC_UNUSED const char *pkgId, const char *name, G_GNUC_UNUSED const char *arch, G_GNUC_UNUSED void *cbdata, GError **err) { g_assert(pkg != NULL); g_assert(*pkg == NULL); g_assert(pkgId != NULL); g_assert(!err || *err == NULL); if (!g_strcmp0(name, "fake_bash")) return CRE_OK; *pkg = cr_package_new(); return CR_CB_RET_OK; } static int newpkgcb_interrupt(cr_Package **pkg, G_GNUC_UNUSED const char *pkgId, G_GNUC_UNUSED const char *name, G_GNUC_UNUSED const char *arch, G_GNUC_UNUSED void *cbdata, GError **err) { g_assert(pkg != NULL); g_assert(*pkg == NULL); g_assert(pkgId != NULL); g_assert(!err || *err == NULL); if (cbdata) *((int *)cbdata) += 1; return CR_CB_RET_ERR; } static int warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type, G_GNUC_UNUSED char *msg, void *cbdata, G_GNUC_UNUSED GError **err) { g_assert(type < CR_XML_WARNING_SENTINEL); g_assert(!err || *err == NULL); g_string_append((GString *) cbdata, msg); g_string_append((GString *) cbdata, ";"); return CR_CB_RET_OK; } static int warningcb_interrupt(G_GNUC_UNUSED cr_XmlParserWarningType type, G_GNUC_UNUSED char *msg, G_GNUC_UNUSED void *cbdata, G_GNUC_UNUSED GError **err) { g_assert(type < CR_XML_WARNING_SENTINEL); g_assert(!err || *err == NULL); if (cbdata) *((int *)cbdata) += 1; return CR_CB_RET_ERR; } // Tests static void test_cr_xml_parse_filelists_00(void) { GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_00_FILELISTS, NULL, NULL, pkgcb, NULL, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); } static void test_cr_xml_parse_filelists_01(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_01_FILELISTS, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 1); } static void test_cr_xml_parse_filelists_02(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_02_FILELISTS, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); } static void test_cr_xml_parse_filelists_unknown_element_00(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_UE_FIL_00, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); } static void test_cr_xml_parse_filelists_unknown_element_01(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_UE_FIL_01, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 1); } static void test_cr_xml_parse_filelists_unknown_element_02(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_UE_FIL_02, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); } static void test_cr_xml_parse_filelists_no_pkgid(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_NO_PKGID_FIL, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err != NULL); g_error_free(tmp_err); g_assert_cmpint(ret, ==, CRE_BADXMLFILELISTS); } static void test_cr_xml_parse_filelists_skip_fake_bash_00(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_UE_FIL_00, newpkgcb_skip_fake_bash, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 1); } static void test_cr_xml_parse_filelists_skip_fake_bash_01(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_UE_FIL_01, newpkgcb_skip_fake_bash, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 0); } static void test_cr_xml_parse_filelists_pkgcb_interrupt(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_02_FILELISTS, NULL, NULL, pkgcb_interrupt, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err != NULL); g_error_free(tmp_err); g_assert_cmpint(ret, ==, CRE_CBINTERRUPTED); g_assert_cmpint(parsed, ==, 1); } static void test_cr_xml_parse_filelists_newpkgcb_interrupt(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_02_FILELISTS, newpkgcb_interrupt, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err != NULL); g_error_free(tmp_err); g_assert_cmpint(ret, ==, CRE_CBINTERRUPTED); g_assert_cmpint(parsed, ==, 0); } static void test_cr_xml_parse_filelists_warningcb_interrupt(void) { int parsed = 0, numofwarnings = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_BAD_TYPE_FIL, NULL, NULL, pkgcb, &parsed, warningcb_interrupt, &numofwarnings, &tmp_err); g_assert(tmp_err != NULL); g_error_free(tmp_err); g_assert_cmpint(ret, ==, CRE_CBINTERRUPTED); g_assert_cmpint(parsed, ==, 1); g_assert_cmpint(numofwarnings, ==, 1); } static void test_cr_xml_parse_filelists_bad_file_type_00(void) { int parsed = 0; GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_BAD_TYPE_FIL, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); } static void test_cr_xml_parse_filelists_bad_file_type_01(void) { char *warnmsgs; int parsed = 0; GString *warn_strings = g_string_new(0); GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_MRF_BAD_TYPE_FIL, NULL, NULL, pkgcb, &parsed, warningcb, warn_strings, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); warnmsgs = g_string_free(warn_strings, FALSE); g_assert_cmpstr(warnmsgs, ==, "Unknown file type \"foo\";"); g_free(warnmsgs); } static void test_cr_xml_parse_different_md_type(void) { char *warnmsgs; int parsed = 0; GString *warn_strings = g_string_new(0); GError *tmp_err = NULL; int ret = cr_xml_parse_filelists(TEST_REPO_01_OTHER, NULL, NULL, pkgcb, &parsed, warningcb, warn_strings, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 0); warnmsgs = g_string_free(warn_strings, FALSE); g_assert_cmpstr(warnmsgs, ==, "Unknown element \"otherdata\";" "The target doesn't contain the expected element \"\" - " "The target probably isn't a valid filelists xml;"); g_free(warnmsgs); } static void test_cr_xml_parse_filelists_snippet_snippet_01(void) { int parsed = 0; GError *tmp_err = NULL; char buf[400]; read_file(TEST_FILELISTS_SNIPPET_01, CR_CW_AUTO_DETECT_COMPRESSION, buf, 400); int ret = cr_xml_parse_filelists_snippet(buf, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 1); } static void test_cr_xml_parse_filelists_snippet_snippet_02(void) { int parsed = 0; GError *tmp_err = NULL; char buf[600]; read_file(TEST_FILELISTS_SNIPPET_02, CR_CW_AUTO_DETECT_COMPRESSION, buf, 600); int ret = cr_xml_parse_filelists_snippet(buf, NULL, NULL, pkgcb, &parsed, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(parsed, ==, 2); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_00", test_cr_xml_parse_filelists_00); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_01", test_cr_xml_parse_filelists_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_02", test_cr_xml_parse_filelists_02); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_unknown_element_00", test_cr_xml_parse_filelists_unknown_element_00); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_unknown_element_01", test_cr_xml_parse_filelists_unknown_element_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_unknown_element_02", test_cr_xml_parse_filelists_unknown_element_02); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_no_pgkid", test_cr_xml_parse_filelists_no_pkgid); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_skip_fake_bash_00", test_cr_xml_parse_filelists_skip_fake_bash_00); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_skip_fake_bash_01", test_cr_xml_parse_filelists_skip_fake_bash_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_pkgcb_interrupt", test_cr_xml_parse_filelists_pkgcb_interrupt); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_newpkgcb_interrupt", test_cr_xml_parse_filelists_newpkgcb_interrupt); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_warningcb_interrupt", test_cr_xml_parse_filelists_warningcb_interrupt); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_bad_file_type_00", test_cr_xml_parse_filelists_bad_file_type_00); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_bad_file_type_01", test_cr_xml_parse_filelists_bad_file_type_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_different_md_type", test_cr_xml_parse_different_md_type); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_snippet_snippet_01", test_cr_xml_parse_filelists_snippet_snippet_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_filelists_snippet_snippet_02", test_cr_xml_parse_filelists_snippet_snippet_02); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_parser_repomd.c000066400000000000000000000101101400672373200223160ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/repomd.h" #include "createrepo/misc.h" #include "createrepo/xml_parser.h" // Callbacks static int warningcb(G_GNUC_UNUSED cr_XmlParserWarningType type, G_GNUC_UNUSED char *msg, void *cbdata, G_GNUC_UNUSED GError **err) { g_assert(type < CR_XML_WARNING_SENTINEL); g_assert(!err || *err == NULL); g_string_append((GString *) cbdata, msg); g_string_append((GString *) cbdata, ";"); return CR_CB_RET_OK; } static int warningcb_interrupt(G_GNUC_UNUSED cr_XmlParserWarningType type, G_GNUC_UNUSED char *msg, G_GNUC_UNUSED void *cbdata, G_GNUC_UNUSED GError **err) { g_assert(type < CR_XML_WARNING_SENTINEL); g_assert(!err || *err == NULL); if (cbdata) *((int *)cbdata) += 1; return CR_CB_RET_ERR; } // Tests static void test_cr_xml_parse_repomd_00(void) { GError *tmp_err = NULL; cr_Repomd *repomd = cr_repomd_new(); int ret = cr_xml_parse_repomd(TEST_REPO_00_REPOMD, repomd, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); cr_repomd_free(repomd); } static void test_cr_xml_parse_repomd_01(void) { GError *tmp_err = NULL; cr_Repomd *repomd = cr_repomd_new(); int ret = cr_xml_parse_repomd(TEST_REPO_01_REPOMD, repomd, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); cr_repomd_free(repomd); } static void test_cr_xml_parse_repomd_02(void) { GError *tmp_err = NULL; char *warnmsgs; cr_Repomd *repomd = cr_repomd_new(); GString *warn_strings = g_string_new(0); int ret = cr_xml_parse_repomd(TEST_REPO_02_REPOMD, repomd, warningcb, warn_strings, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); cr_repomd_free(repomd); warnmsgs = g_string_free(warn_strings, FALSE); g_assert_cmpstr(warnmsgs, ==, ""); g_free(warnmsgs); } static void test_cr_xml_parse_repomd_warningcb_interrupt(void) { int numofwarnings = 0; GError *tmp_err = NULL; cr_Repomd *repomd = cr_repomd_new(); int ret = cr_xml_parse_repomd(TEST_MRF_MISSING_TYPE_REPOMD, repomd, warningcb_interrupt, &numofwarnings, &tmp_err); g_assert(tmp_err != NULL); g_error_free(tmp_err); g_assert_cmpint(ret, ==, CRE_CBINTERRUPTED); g_assert_cmpint(numofwarnings, ==, 1); cr_repomd_free(repomd); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_repomd_00", test_cr_xml_parse_repomd_00); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_repomd_01", test_cr_xml_parse_repomd_01); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_repomd_02", test_cr_xml_parse_repomd_02); g_test_add_func("/xml_parser_filelists/test_cr_xml_parse_repomd_warningcb_interrupt", test_cr_xml_parse_repomd_warningcb_interrupt); return g_test_run(); } createrepo_c-0.17.0/tests/test_xml_parser_updateinfo.c000066400000000000000000000240251400672373200232000ustar00rootroot00000000000000/* createrepo_c - Library of routines for manipulation with repodata * Copyright (C) 2013 Tomas Mlcoch * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include #include #include #include "fixtures.h" #include "createrepo/error.h" #include "createrepo/misc.h" #include "createrepo/xml_parser.h" #include "createrepo/updateinfo.h" // Tests static void test_cr_xml_parse_updateinfo_00(void) { GError *tmp_err = NULL; cr_UpdateInfo *ui = cr_updateinfo_new(); int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_00, ui, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); cr_updateinfo_free(ui); } static void test_cr_xml_parse_updateinfo_01(void) { GError *tmp_err = NULL; cr_UpdateInfo *ui = cr_updateinfo_new(); cr_UpdateRecord *update; cr_UpdateReference *ref; cr_UpdateCollection *col; cr_UpdateCollectionPackage *pkg; int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_01, ui, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(g_slist_length(ui->updates), ==, 1); update = ui->updates->data; g_assert_cmpstr(update->from, ==, "secresponseteam@foo.bar"); g_assert_cmpstr(update->status, ==, "final"); g_assert_cmpstr(update->type, ==, "enhancement"); g_assert_cmpstr(update->version, ==, "3"); g_assert_cmpstr(update->id, ==, "foobarupdate_1"); g_assert_cmpstr(update->title, ==, "title_1"); g_assert_cmpstr(update->issued_date, ==, "2012-12-12 00:00:00"); g_assert_cmpstr(update->updated_date, ==, "2012-12-12 00:00:00"); g_assert_cmpstr(update->rights, ==, "rights_1"); g_assert_cmpstr(update->release, ==, "release_1"); g_assert_cmpstr(update->pushcount, ==, "pushcount_1"); g_assert_cmpstr(update->severity, ==, "severity_1"); g_assert_cmpstr(update->summary, ==, "summary_1"); g_assert_cmpstr(update->description, ==, "description_1"); g_assert_cmpstr(update->solution, ==, "solution_1"); g_assert(update->reboot_suggested); g_assert_cmpint(g_slist_length(update->references), ==, 1); ref = update->references->data; g_assert_cmpstr(ref->href, ==, "https://foobar/foobarupdate_1"); g_assert_cmpstr(ref->id, ==, "1"); g_assert_cmpstr(ref->type, ==, "self"); g_assert_cmpstr(ref->title, ==, "update_1"); g_assert_cmpint(g_slist_length(update->collections), ==, 1); col = update->collections->data; g_assert_cmpstr(col->shortname, ==, "foo.component"); g_assert_cmpstr(col->name, ==, "Foo component"); g_assert_cmpint(g_slist_length(col->packages), ==, 1); pkg = col->packages->data; g_assert_cmpstr(pkg->name, ==, "bar"); g_assert_cmpstr(pkg->version, ==, "2.0.1"); g_assert_cmpstr(pkg->release, ==, "3"); g_assert_cmpstr(pkg->epoch, ==, "0"); g_assert_cmpstr(pkg->arch, ==, "noarch"); g_assert_cmpstr(pkg->src, ==, "bar-2.0.1-3.src.rpm"); g_assert_cmpstr(pkg->filename, ==, "bar-2.0.1-3.noarch.rpm"); g_assert_cmpstr(pkg->sum, ==, "29be985e1f652cd0a29ceed6a1c49964d3618bddd22f0be3292421c8777d26c8"); g_assert_cmpint(pkg->sum_type, ==, CR_CHECKSUM_SHA256); g_assert(pkg->reboot_suggested); g_assert(pkg->restart_suggested); g_assert(pkg->relogin_suggested); cr_updateinfo_free(ui); } static void test_cr_xml_parse_updateinfo_02(void) { GError *tmp_err = NULL; cr_UpdateInfo *ui = cr_updateinfo_new(); cr_UpdateRecord *update; cr_UpdateReference *ref; cr_UpdateCollection *col; cr_UpdateCollectionPackage *pkg; int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_02, ui, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(g_slist_length(ui->updates), ==, 1); update = ui->updates->data; g_assert(!update->from); g_assert(!update->status); g_assert(!update->type); g_assert(!update->version); g_assert(!update->id); g_assert(!update->title); g_assert(!update->issued_date); g_assert(!update->updated_date); g_assert(!update->rights); g_assert(!update->release); g_assert(!update->pushcount); g_assert(!update->severity); g_assert(!update->summary); g_assert(!update->reboot_suggested); g_assert(!update->description); g_assert(!update->solution); g_assert_cmpint(g_slist_length(update->references), ==, 1); ref = update->references->data; g_assert(!ref->href); g_assert(!ref->id); g_assert(!ref->type); g_assert(!ref->title); g_assert_cmpint(g_slist_length(update->collections), ==, 1); col = update->collections->data; g_assert(!col->shortname); g_assert(!col->name); g_assert_cmpint(g_slist_length(col->packages), ==, 1); pkg = col->packages->data; g_assert(!pkg->name); g_assert(!pkg->version); g_assert(!pkg->release); g_assert(!pkg->epoch); g_assert(!pkg->arch); g_assert(!pkg->src); g_assert(!pkg->filename); g_assert(!pkg->sum); g_assert_cmpint(pkg->sum_type, ==, CR_CHECKSUM_UNKNOWN); g_assert(!pkg->reboot_suggested); g_assert(!pkg->restart_suggested); g_assert(!pkg->relogin_suggested); cr_updateinfo_free(ui); } //Test for module support static void test_cr_xml_parse_updateinfo_03(void) { GError *tmp_err = NULL; cr_UpdateInfo *ui = cr_updateinfo_new(); cr_UpdateRecord *update; cr_UpdateCollection *col; cr_UpdateCollectionModule *module; cr_UpdateCollectionPackage *pkg; int ret = cr_xml_parse_updateinfo(TEST_UPDATEINFO_03, ui, NULL, NULL, &tmp_err); g_assert(tmp_err == NULL); g_assert_cmpint(ret, ==, CRE_OK); g_assert_cmpint(g_slist_length(ui->updates), ==, 6); update = g_slist_nth_data(ui->updates, 2); g_assert(!update->reboot_suggested); update = g_slist_nth_data(ui->updates, 3); g_assert_cmpstr(update->from, ==, "errata@redhat.com"); g_assert_cmpstr(update->status, ==, "stable"); g_assert_cmpstr(update->type, ==, "enhancement"); g_assert_cmpstr(update->version, ==, "1"); g_assert_cmpstr(update->id, ==, "RHEA-2012:0058"); g_assert_cmpstr(update->title, ==, "Gorilla_Erratum"); g_assert_cmpstr(update->description, ==, "Gorilla_Erratum"); g_assert(update->reboot_suggested); update = g_slist_nth_data(ui->updates, 4); g_assert_cmpstr(update->id, ==, "RHEA-2012:0059"); g_assert_cmpstr(update->title, ==, "Duck_Kangaroo_Erratum"); g_assert_cmpstr(update->description, ==, "Duck_Kangaro_Erratum description"); g_assert_cmpstr(update->issued_date, ==, "2018-01-27 16:08:09"); g_assert_cmpstr(update->updated_date, ==, "2018-07-20 06:00:01 UTC"); g_assert_cmpstr(update->release, ==, "1"); g_assert(update->reboot_suggested); g_assert_cmpint(g_slist_length(update->references), ==, 0); g_assert_cmpint(g_slist_length(update->collections), ==, 2); col = g_slist_nth_data(update->collections, 0); g_assert_cmpstr(col->shortname, ==, ""); g_assert_cmpstr(col->name, ==, "coll_name1"); module = col->module; g_assert_cmpstr(module->name, ==, "kangaroo"); g_assert_cmpstr(module->stream, ==, "0"); g_assert_cmpuint(module->version, ==, 20180730223407); g_assert_cmpstr(module->context, ==, "deadbeef"); g_assert_cmpstr(module->arch, ==, "noarch"); g_assert_cmpint(g_slist_length(col->packages), ==, 1); pkg = col->packages->data; g_assert_cmpstr(pkg->name, ==, "kangaroo"); g_assert_cmpstr(pkg->version, ==, "0.3"); g_assert_cmpstr(pkg->release, ==, "1"); g_assert(!pkg->epoch); g_assert_cmpstr(pkg->arch, ==, "noarch"); g_assert_cmpstr(pkg->src, ==, "http://www.fedoraproject.org"); g_assert_cmpstr(pkg->filename, ==, "kangaroo-0.3-1.noarch.rpm"); g_assert(!pkg->sum); g_assert(!pkg->sum_type); col = g_slist_nth_data(update->collections, 1); g_assert_cmpstr(col->shortname, ==, ""); g_assert_cmpstr(col->name, ==, "coll_name2"); module = col->module; g_assert_cmpstr(module->name, ==, "duck"); g_assert_cmpstr(module->stream, ==, "0"); g_assert_cmpuint(module->version, ==, 20180730233102); g_assert_cmpstr(module->context, ==, "deadbeef"); g_assert_cmpstr(module->arch, ==, "noarch"); g_assert_cmpint(g_slist_length(col->packages), ==, 1); pkg = col->packages->data; g_assert_cmpstr(pkg->name, ==, "duck"); g_assert_cmpstr(pkg->version, ==, "0.7"); g_assert_cmpstr(pkg->filename, ==, "duck-0.7-1.noarch.rpm"); update = g_slist_nth_data(ui->updates, 5); g_assert_cmpstr(update->id, ==, "RHEA-2012:0060"); g_assert_cmpstr(update->issued_date, ==, "1555429284"); g_assert_cmpstr(update->updated_date, ==, "2018-07-29 06:00:01 UTC"); cr_updateinfo_free(ui); } int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_00", test_cr_xml_parse_updateinfo_00); g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_01", test_cr_xml_parse_updateinfo_01); g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_02", test_cr_xml_parse_updateinfo_02); g_test_add_func("/xml_parser_updateinfo/test_cr_xml_parse_updateinfo_03", test_cr_xml_parse_updateinfo_03); return g_test_run(); } createrepo_c-0.17.0/tests/testdata/000077500000000000000000000000001400672373200172115ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/compressed_files/000077500000000000000000000000001400672373200225375ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/compressed_files/00_plain.foo0000066400000000000000000000000001400672373200247140ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/compressed_files/00_plain.foo1000066400000000000000000000000411400672373200247220ustar00rootroot00000000000000O00_plain.txtcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.foo2000066400000000000000000000000161400672373200247250ustar00rootroot00000000000000BZh9rE8Pcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.foo3000066400000000000000000000000401400672373200247230ustar00rootroot000000000000007zXZִFD!}YZcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.foo4000066400000000000000000000000001400672373200247200ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/compressed_files/00_plain.txt000066400000000000000000000000001400672373200246700ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/compressed_files/00_plain.txt.bz2000066400000000000000000000000161400672373200253730ustar00rootroot00000000000000BZh9rE8Pcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.txt.gz000066400000000000000000000000411400672373200253140ustar00rootroot00000000000000O00_plain.txtcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.txt.xz000066400000000000000000000000401400672373200253340ustar00rootroot000000000000007zXZִFD!}YZcreaterepo_c-0.17.0/tests/testdata/compressed_files/00_plain.txt.zck000066400000000000000000000001371400672373200254710ustar00rootroot00000000000000ZCK16G5ؕV&R{W=U-OBșo$'AdLxRUcreaterepo_c-0.17.0/tests/testdata/compressed_files/01_plain.foo0000066400000000000000000000000701400672373200247240ustar00rootroot00000000000000foobar foobar foobar foobar test test folkjsaflkjsadokf createrepo_c-0.17.0/tests/testdata/compressed_files/01_plain.foo1000066400000000000000000000001001400672373200247170ustar00rootroot00000000000000`O01_plain.txtKOJ,RHF 400I58createrepo_c-0.17.0/tests/testdata/compressed_files/01_plain.foo2000066400000000000000000000001051400672373200247250ustar00rootroot00000000000000BZh91AY&SYδ р@7 1Ѧ6g$r ~!v8AI}YZcreaterepo_c-0.17.0/tests/testdata/compressed_files/01_plain.foo4000066400000000000000000000002511400672373200247310ustar00rootroot00000000000000ZCK1Өza6:W+l7/kĄWt>LK 0jŵ[ 3^<ӖsOX}A2va(/ 8%foobar ftest folkjsafdokf createrepo_c-0.17.0/tests/testdata/compressed_files/01_plain.txt000066400000000000000000000000701400672373200247000ustar00rootroot00000000000000foobar foobar foobar foobar test test folkjsaflkjsadokf createrepo_c-0.17.0/tests/testdata/compressed_files/01_plain.txt.bz2000066400000000000000000000001051400672373200253730ustar00rootroot00000000000000BZh91AY&SYδ р@7 1Ѧ6g$r ~!v8AI}YZcreaterepo_c-0.17.0/tests/testdata/compressed_files/01_plain.txt.zck000066400000000000000000000002361400672373200254720ustar00rootroot00000000000000ZCK1o@ߙyyQܖրDa/]5jYmX>LK 0jŵ[ 3^<Ӗswb33L!a:E{(/ 8%foobar ftest folkjsafdokf createrepo_c-0.17.0/tests/testdata/comps_files/000077500000000000000000000000001400672373200215145ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/comps_files/comps_00.xml000066400000000000000000000011761400672373200236630ustar00rootroot00000000000000 test-group <_name>Test Group <_description/> false True Archer test category <_name>Test Category <_description>Description of test category. 99 test-group createrepo_c-0.17.0/tests/testdata/modified_repo_files/000077500000000000000000000000001400672373200232005ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/modified_repo_files/bad_file_type-filelists.xml000066400000000000000000000011711400672373200305040ustar00rootroot00000000000000 /usr/bin/fake_bash /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz /usr/file_with_bad_type createrepo_c-0.17.0/tests/testdata/modified_repo_files/error_00-filelists.xml000066400000000000000000000010721400672373200273460ustar00rootroot00000000000000 /usr/bin/fake_bash foo_element /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/error_00-other.xml000066400000000000000000000014421400672373200264720ustar00rootroot00000000000000 - First release foo element - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/error_00-primary.xml000066400000000000000000000061461400672373200270420ustar00rootroot00000000000000 fake_bash x86_64 Fake bash Fake bash package http://fake_bash_shell.com/ super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/modified_repo_files/missing_type-repomd.xml000066400000000000000000000027701400672373200277260ustar00rootroot00000000000000 1334667230 c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c 85bc611be5d81ac8da2fe01e98ef741d243d1518fcc46ada70660020803fbf09 1334667230 273 389 b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef da6096c924349af0c326224a33be0cdb26897fbe3d25477ac217261652449445 1334667230 332 530 6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e 0fc6cadf97d515e87491d24dc9712d8ddaf2226a21ae7f131ff42d71a877c496 1334667230 782 2085 createrepo_c-0.17.0/tests/testdata/modified_repo_files/multiple_warnings_00-filelists.xml000066400000000000000000000013741400672373200317650ustar00rootroot00000000000000 /usr/bin/fake_bash foo_element /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/multiple_warnings_00-other.xml000066400000000000000000000017311400672373200311050ustar00rootroot00000000000000 - First release foo element - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/multiple_warnings_00-primary.xml000066400000000000000000000070041400672373200314460ustar00rootroot00000000000000 fake_bash x86_64 90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7 Fake bash Fake bash package foo element http://fake_bash_shell.com/ foo element super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/modified_repo_files/no_pkgid-filelists.xml000066400000000000000000000007761400672373200275220ustar00rootroot00000000000000 /usr/bin/fake_bash /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/no_pkgid-other.xml000066400000000000000000000013771400672373200266430ustar00rootroot00000000000000 - First release foo element - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_00-filelists.xml000066400000000000000000000011401400672373200314210ustar00rootroot00000000000000 /usr/bin/fake_bash foo_element /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_00-other.xml000066400000000000000000000015101400672373200305450ustar00rootroot00000000000000 - First release foo element - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_00-primary.xml000066400000000000000000000063151400672373200311170ustar00rootroot00000000000000 fake_bash x86_64 90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7 Fake bash Fake bash package foo element http://fake_bash_shell.com/ super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_01-filelists.xml000066400000000000000000000011361400672373200314270ustar00rootroot00000000000000 /usr/bin/fake_bash /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_01-other.xml000066400000000000000000000015061400672373200305530ustar00rootroot00000000000000 - First release - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_01-primary.xml000066400000000000000000000064311400672373200311170ustar00rootroot00000000000000 fake_bash x86_64 90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7 Fake bash Fake bash package http://fake_bash_shell.com/ super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_02-filelists.xml000066400000000000000000000012211400672373200314230ustar00rootroot00000000000000 /usr/bin/fake_bash foo_element /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_02-other.xml000066400000000000000000000015711400672373200305560ustar00rootroot00000000000000 - First release foo element - First release - Second release createrepo_c-0.17.0/tests/testdata/modified_repo_files/unknown_element_02-primary.xml000066400000000000000000000064161400672373200311230ustar00rootroot00000000000000 fake_bash x86_64 90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7 Fake bash Fake bash package foo element http://fake_bash_shell.com/ foo element super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/other_metadata/000077500000000000000000000000001400672373200221725ustar00rootroot000000000000000402e012013246e7e2f2f638c8a6046efdfd71ae1cbeff9c391d9c1ae5d7d431-comps-f19.xml.bz2000066400000000000000000000015761400672373200360630ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/other_metadataBZh91AY&SYݬ9)_PrW`("PKB4 @42db`ɂdф``'F4q&LF&L&@ F)!BzAhbzLC#jPm""H DFGx? >._|*{t^ :L߯Q"G__l_qΊQ~[h2?O;VVk[vG$Fk*(L vr#_lDwUU^N\>Nsn:tW2ObGL|Ɍ7bԵnw!>>c%QH  36c4f2d6dda3b015b4a7da59552c7c76eff99a59979178513cb51341f6eead44-comps-f19.xml000066400000000000000000000044741400672373200352700ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/other_metadata 3d-printing 3D Printing 3D printing software false true blender cura gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 base-x 3d-printing gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 firefox gnome-desktop gnome-games epiphany libreoffice 71d988ca33b31a18b0d775478d6f59b40583c794ae76393284850ee97bfba4dc-comps-f19.xml.gz000066400000000000000000000013701400672373200354730ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/other_metadata19Rbcomps-f19.xmlVn0)%nd@Rmԟj%8!'lz`;i7rWl6n H{go39۽IRB!}5xI HBMۡ{6:߿8> e/N |=w/ȱ? #T}Z)6!lbt+\8؎05q_;A="UYjg 62"sxw 0XrK$\ PokgnŠPBLd< Haf q K W`E)emBfܪgaNg^ᣳ2K*y+y`lx Ŏkn u1j mӝT}a 'rjrpULʡl-Hɷ|o6YY!'OQpN$YwЈvku(VXm*w_$p+Ekl:D\|Gƴf?i ^#gAzr,([LW)7F]0}"lD_^sן`}R# $<~Ř߽ lv<.JI!=Xj:8x블=R pjC g~/- %.;߀ejPY3pWLO*_6J!;oyzU*}Ӱb9)XNIfH?e( +~: ɺ< c941e2c77a27cdca324c0e0bc3190e134fddddb36a03fb5517e55d156b955205-comps-f19.xml.xz000066400000000000000000000014441400672373200357230ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/other_metadata7zXZִF!t/ ;]ˇf{3(1v%f M* mj)p80V ג'<Wv6#Nymީ H6POڽ:dbA@1*H[W UҰ4#΍Hs7>~N%{Py+Gb΃͇Ի 11zC5 Xď屋<бW8l| c'7_n_7(J[v[7 uXe] Xe Msb B(}s_̅>a\l0̧PU6\b0C_Ѻ@?n'* .ɢ@;ЀTE@Nct.w4 gqlW0M lI+,1^KG 3d-printing 3D Printing 3D printing software false true blender cura gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 base-x 3d-printing gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 firefox gnome-desktop gnome-games epiphany libreoffice createrepo_c-0.17.0/tests/testdata/packages/000077500000000000000000000000001400672373200207675ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/packages/Archer-3.4.5-6.x86_64.rpm000066400000000000000000000060351400672373200244640ustar00rootroot00000000000000Archer-2:3.4.5-6T>D @ (Ao>KH92dbe81e64b4f75ca4aa9b7bf27d1112b725a650 >@ ?d   )8<TX]a q    W \hw   (8 9:BFGHI(X,YHZt[\]^bd~ltuvwxzefCArcher3.4.56Complex package.Archer packageQb localhost.localdomainISISGPLSterling ArcherDevelopment/Toolshttp://soo_complex_package.eu/linuxx86_64A큤QbQb Qbe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootrootrootrootrootArcher-3.4.5-6.src.rpmbarabarbbarcbardbareArcherArcher(x86-64)     fooafoobfoocfoodfooefoofrpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(PayloadIsXz)21.0.0-134563.0.4-14.6.0-14.0-15.2-1 bbabbbbbcbbdbbe22221111.2222.3333-44443333444455554.9.1.3Qb@OO[@Tomas Mlcoch - 3.3.3-3Tomas Mlcoch - 2.2.2-2Tomas Mlcoch - 1.1.1-1- 3. changelog.- That was totally ninja!- First changelog.aaaaabaacaadaaelocalhost.localdomain 1365416480poplpw 2211.22.33-443344552:3.4.5-62:3.4.5-6 222111.2.3-4333444555complex_aArcher-3.4.5README/usr/bin//usr/share/doc//usr/share/doc/Archer-3.4.5/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryemptylrr.p먎)ߧxz2?7zXZ !#,] c2 1O8۟Wџ+̼u0桶ϋ_= hg֞uy_:_ `յń__GMi \{Rؚ&N˳ʋ 'jТbJ#Pz kFނ" W^u=~:mI YZcreaterepo_c-0.17.0/tests/testdata/packages/Rimmer-1.0.2-2.x86_64.rpm000066400000000000000000000060301400672373200244710ustar00rootroot00000000000000Rimmer-1.0.2-2T>D ,0@17046e89e47a886ce35b5b606337e6e50fadb808 Sb 蔳B>Jt?dd   )DHTX\ j|    K P\kzBdhl(n8x9:BnFrGHIXYZ[\]^b-defltuvwxz 048<@DHLPTX\`CRimmer1.0.22Package with weak deps.Package with weak deps.S:6terminatorGPLArnold RimmerDevelopment/Toolshttp://pkgwithweakdeps.eu/linuxx86_64A큤S:5S:5S:5e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootrootrootrootrootRimmer-1.0.2-2.src.rpmRimmerRimmer(x86-64)pro     reqreqprerpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(PayloadIsXz)123.0.4-14.6.0-14.0-15.2-1 con54.11.90OO[@Tomas Mlcoch - 2.2.2-2Tomas Mlcoch - 1.1.1-1- Look, we've all got something to contribute to this discussion. And I think what you should contribute from now on is silence.- First changelog.obsterminator 1396363574 1.0.2-21.0.2-23 4complex_aRimmer-1.0.2README/usr/bin//usr/share/doc//usr/share/doc/Rimmer-1.0.2/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=genericcpioxz2x86_64-redhat-linux-gnuemptydirectory?,3gDƷGrec8sug6sup9enh7?`7zXZ !#,] b2u Q{JwPiϵC\VKhG}]\ZgQjүz_A4;@шOLq#.>D @ƂjTȶP 396e192eb4623372169b958561b0f87389f54433|>"?d  /PTlp t  (89:FXY(b9dlzefCbalicek-iso885911.1.11Balicek s ""divny o: "" divny u: QP7localhost.localdomainGPLSystem Environment/Shellshttp://fake_bash_shell.com/linuxx86_64balicek-iso88591-1.1.1-1.src.rpmBalicekbalicek-iso88591balicek-iso88591(x86-64)    rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(CompressedFileNames)rpmlib(PayloadIsXz)4.6.0-14.0-13.0.4-15.2-14.9.1.3O[@Toms Mlcoch - 1.1.1-1- Nejak comment ""localhost.localdomain 13642116211.1.1-11.1.1-1-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnuĺYL\Lb xz2?7zXZ !#,{] b3;APtqB&DZ.#B+O(qrtpy z.Q|)K YZcreaterepo_c-0.17.0/tests/testdata/packages/balicek-iso88592-1.1.1-1.x86_64.rpm000066400000000000000000000033151400672373200260420ustar00rootroot00000000000000balicek-iso88592-1.1.1-1T>D @ci{"?d  2X\tx |((89:FX4Y@bQdlzefCbalicek-iso885921.1.11Balek s ""Bl k pl dy n n. ""QP3localhost.localdomainGPLSystem Environment/Shellshttp://fake_bash_shell.com/linuxx86_64balicek-iso88592-1.1.1-1.src.rpmBalekbalicek-iso88592balicek-iso88592(x86-64)    blkrpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(CompressedFileNames)rpmlib(PayloadIsXz)4.6.0-14.0-13.0.4-15.2-14.9.1.3O[@Tom Mloch - 1.1.1-1- Njak comment "خ"localhost.localdomain 13642105861.1.1-11.1.1-1-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnu9?2Զ(ůxz2?7zXZ !#,{] b3;APtqB&DZ.#B+O(qrtpy z.Q|)K YZcreaterepo_c-0.17.0/tests/testdata/packages/balicek-utf8-1.1.1-1.x86_64.rpm000066400000000000000000000033751400672373200255240ustar00rootroot00000000000000balicek-utf8-1.1.1-1T>D @47d .87589efbf0323da216415c7db90854f9c49b0abe4|>"A?1d  :tx $8(89:FAXdYpbdlz(e,f/Cbalicek-utf81.1.11Balíček s "ěščřžýáíéů"Bílý kůň pěl ódy ná ná. "ěščřžýáíéů"QP3ulocalhost.localdomainGPLSystem Environment/Shellshttp://fake_bash_shell.com/linuxx86_64balicek-utf8-1.1.1-1.src.rpmBalíčekbalicek-utf8balicek-utf8(x86-64)    bílýkůňrpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(CompressedFileNames)rpmlib(PayloadIsXz)4.6.0-14.0-13.0.4-15.2-14.9.1.3O[@Tomáš Mlčoch - 1.1.1-1- Nějaký comment "ěščřžýáíéůúÁŠČŘŽÝÁÍÉŮÚ"localhost.localdomain 13642105491.1.1-11.1.1-1-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnuԎ0[1Hh*xz2?7zXZ !#,{] b3;APtqB&DZ.#B+O(qrtpy z.Q|)K YZcreaterepo_c-0.17.0/tests/testdata/packages/empty-0-0.src.rpm000066400000000000000000000027201400672373200237260ustar00rootroot00000000000000empty-0-0T>D @nIuO2cd068653a6a35807cb304ad20448cdf091bde8f>'e?Ud   ,0 5AGPT V X \  (GH I\]^#$d(e-f2F4Cempty00""Ohlocalhost.localdomainLGPLUnspecifiedlinuxx86_64Oh3dc81ed5769274941e37f5c60424f453b943ba55151a06afb07e38cbdd62845d tmlcochtmlcoch  rpmlib(FileDigests)rpmlib(CompressedFileNames)4.6.0-13.0.4-14.9.1.3> 4empty.speccpiogzip9localhost.localdomain 1340696582?PM P켿"y+$¢SW{ȧ}O3\vwfvm@$d t +^z:ƜҢj6ppa) *%ϳA@xHOpӔ:PsyI]#aɋJPSLVLՅn\l'yj&w5MkzZscreaterepo_c-0.17.0/tests/testdata/packages/empty-0-0.x86_64.rpm000066400000000000000000000025711400672373200241010ustar00rootroot00000000000000empty-0-0T>D @anT:4 ] ګvucRccf1d5917bca399e64de9878a32c6e75d44a3b4e|>?d   ,0 5AGN`t(F X,Y4b<dlzefCempty00""Ohlocalhost.localdomainLGPLUnspecifiedlinuxx86_64empty-0-0.src.rpmemptyempty(x86-64)    rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)rpmlib(CompressedFileNames)rpmlib(PayloadIsXz)4.6.0-14.0-13.0.4-15.2-14.9.1.3localhost.localdomain 13406965820-00-0-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnunIuOxz2? 7zXZ !#,{] b3;APtqB&DZ.#B+O(qrtpy z.Q|)K YZcreaterepo_c-0.17.0/tests/testdata/packages/fake_bash-1.1.1-1.x86_64.rpm000066400000000000000000000042751400672373200251510ustar00rootroot00000000000000fake_bash-1.1.1-1T>D @3fZUw5OK10d85b55d2a06084f617504ee92dd725aa98643b>7q?ad   04LP Tn     8L(89:F$GHHLIPXTY`\t]x^bdl t$u(v,w@xDzHXe\f_Cfake_bash1.1.11Fake bashFake bash packageOulocalhost.localdomainGPLSystem Environment/Shellshttp://fake_bash_shell.com/linuxx86_64Oue3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootfake_bash-1.1.1-1.src.rpmbashfake_bashfake_bash(x86-64)    rpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)super_kernelrpmlib(PayloadIsXz)3.0.4-14.6.0-14.0-15.2-14.9.1.2O[@Tomas Mlcoch - 1.1.1-1- First releaselocalhost.localdomain 1334670842V|1.1.1-11.1.1-1fake_bash/usr/bin/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryemptyJnyA\*册xz2?7zXZ !#,Z] c7@r#Yr ]Sgu "Vrƹ x"\\CZ:-͇_Hle%_I`(\eز-!h0ezR4 YZcreaterepo_c-0.17.0/tests/testdata/packages/super_kernel-6.0.1-2.x86_64.rpm000066400000000000000000000054351400672373200257500ustar00rootroot00000000000000super_kernel-6.0.1-2T>D @ ayY^Oe05e7db64bdecde91ac31cac5236065d499e2746>=U?Ed  $hl      0(89 :xBFGHIXYZ[\ ](^Gbadltuvwx$z,<e@fCCsuper_kernel6.0.12Test packageThis package has provides, requires, obsoletes, conflicts options.Oulocalhost.localdomainLGPLv2Applications/Systemhttp://so_super_kernel.com/it_is_awesome/yep_it_really_islinuxx86_64OuOue3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855rootrootrootrootsuper_kernel-6.0.1-2.src.rpmnot_so_super_kernelsuper_kernelsuper_kernelsuper_kernel(x86-64)     bzip2expatglibrpmlib(CompressedFileNames)rpmlib(FileDigests)rpmlib(PayloadFilesHavePrefix)zlibrpmlib(PayloadIsXz)1.0.02.26.03.0.4-14.6.0-14.0-15.2-1kernelsuper_kernelsuper_kernel5.0.04.0.04.9.1.2O[@O[@Tomas Mlcoch - 6.0.1-2Tomas Mlcoch - 6.0.1-1- Second release- First releasesuper_kernelkernellocalhost.localdomain 1334670843VV~5.8.06.0.06.0.1-26.0.1-25.9.0super_kernelsuper_kernel.8.gz/usr/bin//usr/share/man/-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=genericcpiox86_64-redhat-linux-gnudirectoryempty_?bd?· xz2?07zXZ !#,v] c7@``;O -ꡏ q_ ]3L;J]|rEn}8|:7Ry@ŨLs{T}^$燉Ov YZcreaterepo_c-0.17.0/tests/testdata/repo_00/000077500000000000000000000000001400672373200204555ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_00/repodata/000077500000000000000000000000001400672373200222545ustar00rootroot000000000000001cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz000066400000000000000000000002061400672373200361360ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_00/repodataA н /]cDw=&Jtx4~95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz000066400000000000000000000001731400672373200366160ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_00/repodataE;0 @wNyoB7T8".djikKg]<e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595-primary.xml.zck000066400000000000000000000004151400672373200357330ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_00/repodataZCK1$;|$F7~%|0BTǺeFqk8W2BፊtJ-Gx4=[؊i}_[C H֎:TP(/ ep&o_;kҒJUShI90BمSi74ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz000066400000000000000000000001731400672373200353070ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_00/repodata=10 WDމa*/ p*OtMwmq(j[a29 }-3Lr:U7$]֧-+>NBg_A^ߴ7q4ycreaterepo_c-0.17.0/tests/testdata/repo_00/repodata/repomd.xml000066400000000000000000000064651400672373200242770ustar00rootroot00000000000000 1533242352 1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f 1533242352 134 167 95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f bf9808b81cb2dbc54b4b8e35adc584ddcaa73bd81f7088d73bf7dbbada961310 1533242352 123 125 ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6 e0ed5e0054194df036cf09c1a911e15bf2a4e7f26f2a788b6f47d53e80717ccc 1533242352 123 121 e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595 e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f 243baf7c02f5241d46f2e8c237ebc7ea7e257ca993d9cfe1304254c7ba7f6546 1533242352 269 167 132 2e7db4492173b6c437fd1299dc335e63d09f24cbdadeac5175a61b787c2f7a44 bf9808b81cb2dbc54b4b8e35adc584ddcaa73bd81f7088d73bf7dbbada961310 f6232be7311fb31184a849895ae0030caf5b84e628b04be879dccf37e6ad2121 1533242352 262 125 131 a939c4765106655c3f7a13fb41d0f239824efa66bcd6c1e6c044a854012bda75 e0ed5e0054194df036cf09c1a911e15bf2a4e7f26f2a788b6f47d53e80717ccc 482e0dcc2e58d0e616f9b00c7789948e6c41f89488e7f4ad2fc3bcc7b549c6d5 1533242352 263 121 131 createrepo_c-0.17.0/tests/testdata/repo_01/000077500000000000000000000000001400672373200204565ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_01/repodata/000077500000000000000000000000001400672373200222555ustar00rootroot000000000000006c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e-primary.xml.gz000066400000000000000000000014171400672373200355200ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_01/repodataqyOUɒ0WtJbdٞreIs%1*D$x-[DzZ%E7ۺB 3x&rW14*AӜj Z7ׄT['o@ޒE2Qׂ}-ٚOIEz@-"B(ⴆD $*"ˮQv.C?"ݏ!hDVh4ؙ`$IJ֪IUI 4;[Xo8Ӣp) /|w̦+ Y,di8w<:/O""C.T(=T ""rPd6EԀB%5f-hkf˴B#PNDN,N&9*ٷMi`%J@v,GY 4Հ VrN<xa\}{A0b\i y\CrG#{&UW[W!`mN~%b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef-other.xml.gz000066400000000000000000000005141400672373200355770ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_01/repodata=s0 l|p)[sN3_岴c=Ff*i䇔Z-eR m;4,WKI;k {>|\ -l`S2Esy^ 1334667230 c7db035d0e6f1b2e883a7fa3229e2d2be70c05a8b8d2b57dbb5f9c1a67483b6c 85bc611be5d81ac8da2fe01e98ef741d243d1518fcc46ada70660020803fbf09 1334667230 273 389 b752a73d9efd4006d740f943db5fb7c2dd77a8324bd99da92e86bd55a2c126ef da6096c924349af0c326224a33be0cdb26897fbe3d25477ac217261652449445 1334667230 332 530 6c662d665c24de9a0f62c17d8fa50622307739d7376f0d19097ca96c6d7f5e3e 0fc6cadf97d515e87491d24dc9712d8ddaf2226a21ae7f131ff42d71a877c496 1334667230 782 2085 createrepo_c-0.17.0/tests/testdata/repo_02/000077500000000000000000000000001400672373200204575ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_02/repodata/000077500000000000000000000000001400672373200222565ustar00rootroot000000000000003b7e6ecd01af9cb674aff6458186911d7081bb5676d5562a21a963afc8a8bcc7-filelists.xml.gz000066400000000000000000000005251400672373200363140ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_02/repodatar }a_EĎ]]gpPK/LM6. ؟⼴DZ"h뒾X.('P3^y$&hB^#G9k|`&sUMep>t8"~#WbK3~#o/(ЁUab5d3edeea50f9b4ec5ee13e4d25c147e318e3a433dbabc94d3461f58ac28255-other.xml.gz000066400000000000000000000006231400672373200357060ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_02/repodata?s0$ aٕTqEZrEf%=i(^C~ⲶXr pkǺ?>ޏ\`ג7!qܷvan ⛞l5>r[W\x-tRʤZKrSRTIz'1QUC ֆ3XlS(Wܻc8O&uH+{48[á 5S` Di}-eO}x í~[5"; OuxaGv;!OwiVB~>vYb?rNeJA &4`&^*elSR!\fuj߀m2b)F:F3}d&e=;it08G3bcde64b04916a2a72fdc257d61bc922c70b3d58e953499180585f7a360ce86cf-primary.xml.gz000066400000000000000000000017151400672373200356440ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_02/repodataIo6+Z`,j3-=dCd=dVRr>Jq) [ɏN,ueSURZ-~>%" ^۶aUh,H;bI]uEՔ@J оKnJm j4O(!yY*ɣǀ/~b4u/(&*~(QP^5!YyԹpZdhng܁^ ̱nԝ 4N=,H`dmȜg!;=w)6!,%?e[PQơwv9b!3 F%Ly>فRwHODC|}43כQ"+݊/ܘE,$n :&^+t gL'YJњ!NP|S* ѷ?Bv,8lJkŎ$+UwMt-HUW%T-7eЃA鰩;u 3QZBVæa; jD jDgZ,__L,kOg8F'a턘s3x︰pp4g&q`rcDR{@Ҽ\wLELXfU׵դ hC=dz)],QYŅ3λ̸8/18I!6{IGӠrf~.&;1W\{x6\g9{ d/8d'ٸon/<6}ު5 x2=F4rE34zsvβM?2Gt=}5  createrepo_c-0.17.0/tests/testdata/repo_02/repodata/repomd.xml000066400000000000000000000027311400672373200242710ustar00rootroot00000000000000 1334670929 3b7e6ecd01af9cb674aff6458186911d7081bb5676d5562a21a963afc8a8bcc7 5e4aa5495551f66ced7fbfb127e922d6af864fd357d4b72490dff9efdee36869 1334670929 341 597 ab5d3edeea50f9b4ec5ee13e4d25c147e318e3a433dbabc94d3461f58ac28255 85200c23b9cffddbb1a8f07ef0b0454b7d28877ffbea1d2d0a8694b18909fcb8 1334670929 403 819 bcde64b04916a2a72fdc257d61bc922c70b3d58e953499180585f7a360ce86cf a93dfbbab1d02b0d3a49a000a6d0970dd6529f0e2897c434d67d97da2624e3a0 1334670929 973 3252 createrepo_c-0.17.0/tests/testdata/repo_03/000077500000000000000000000000001400672373200204605ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodata/000077500000000000000000000000001400672373200222575ustar00rootroot000000000000001cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae-primary.xml.gz000066400000000000000000000002061400672373200361410ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodataA н /]cDw=&Jtx4~95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f-filelists.xml.gz000066400000000000000000000001731400672373200366210ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodataE;0 @wNyoB7T8".djikKg]<:۪|h )E~BHmwۭQ X-_.y|?# SdzE yCèrqGK,Tdc UcҌ9_:;@ XN6=[QOBj\D[*t@*%]dnto{[atmܦ CV+{%Z=PO0#\oWH_c <$&d<ロnWul+KsFkpnl A(3xC}C45^`p+jmՇkjH0xFITX23 /IDjj |?țLi/ G]>|é+=kWX"p2['[7~.OW!1f]xr3>#.LG/&wyo}\x0~9:ЎHo+n2Dg^=;i00NUDE1ѽZ>0 YZa939c4765106655c3f7a13fb41d0f239824efa66bcd6c1e6c044a854012bda75-other.xml.zck000066400000000000000000000004071400672373200353410ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodataZCK1H. .X wlA/üǵIV)C$.=_u:_ 5*iCC;G"Ze1y|7UX/(/ m5Bp #Hh!-a@,F(GU)87Q]iSmE4%7ԀV"7f1 + ‹N= QSL)Pf(/ ae0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595-primary.xml.zck000066400000000000000000000004151400672373200357360ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodataZCK1$;|$F7~%|0BTǺeFqk8W2BፊtJ-Gx4=[؊i}_[C H֎:TP(/ ep&o_;kҒJUShI90BمSi74ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6-other.xml.gz000066400000000000000000000001731400672373200353120ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_03/repodata=10 WDމa*/ p*OtMwmq(j[a29 }-3Lr:U7$]֧-+>NBg_A^ߴ7q4ycreaterepo_c-0.17.0/tests/testdata/repo_03/repodata/repomd.xml000066400000000000000000000073761400672373200243040ustar00rootroot00000000000000 1533242352 1cb61ea996355add02b1426ed4c1780ea75ce0c04c5d1107c025c3fbd7d8bcae e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f 1533242352 134 167 95a4415d859d7120efb6b3cf964c07bebbff9a5275ca673e6e74a97bcbfb2a5f bf9808b81cb2dbc54b4b8e35adc584ddcaa73bd81f7088d73bf7dbbada961310 1533242352 123 125 ef3e20691954c3d1318ec3071a982da339f4ed76967ded668b795c9e070aaab6 e0ed5e0054194df036cf09c1a911e15bf2a4e7f26f2a788b6f47d53e80717ccc 1533242352 123 121 e0ac03cd77e95e724dbf90ded0dba664e233315a8940051dd8882c56b9878595 e1e2ffd2fb1ee76f87b70750d00ca5677a252b397ab6c2389137a0c33e7b359f 243baf7c02f5241d46f2e8c237ebc7ea7e257ca993d9cfe1304254c7ba7f6546 1533242352 269 167 132 2e7db4492173b6c437fd1299dc335e63d09f24cbdadeac5175a61b787c2f7a44 bf9808b81cb2dbc54b4b8e35adc584ddcaa73bd81f7088d73bf7dbbada961310 f6232be7311fb31184a849895ae0030caf5b84e628b04be879dccf37e6ad2121 1533242352 262 125 131 a939c4765106655c3f7a13fb41d0f239824efa66bcd6c1e6c044a854012bda75 e0ed5e0054194df036cf09c1a911e15bf2a4e7f26f2a788b6f47d53e80717ccc 482e0dcc2e58d0e616f9b00c7789948e6c41f89488e7f4ad2fc3bcc7b549c6d5 1533242352 263 121 131 a850093e240506c728d6ce26a6fc51d6a7fe10730c67988d13afa7dd82df82d5 e2cf33aa8066a090dcb3e24c23e86ea62e9c36ba0f0641d9ac8b787c8686a2d7 1554130302 1044 2289 createrepo_c-0.17.0/tests/testdata/repo_koji_01/000077500000000000000000000000001400672373200214725ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_01/repodata/000077500000000000000000000000001400672373200232715ustar00rootroot000000000000005d039d9ec332d158c69d083f04ac88a187c6b68444472b802eaf0249aec83294-other.xml.gz000066400000000000000000000014601400672373200360720ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_01/repodataXK6W T`OEJ(uMȞ4X$@o%^_ߡ|37!~Z +Šx?z5留W[;mwB=Z嬂ރQ;U!هy9.aUx 4bf,")<(d1&iRq LqƥeY*zۧ6 `} peF_({`u"^X+l l oQF&}􈆘^ MLvZh?TW9$V),y7>y禵P560ܾSӴ=>Xޣ5pݞ'Z bVvU )lFQr` AO8KoecxPї/qlT[`R[rn[{oO')d"#&ڂ|F"CLN1KN{y(wI~iK?/fTPqH{wd65:"TskyMǖ>&Gzf}ckP鲡?D)TDu9pD KVxf5pD 3kا'#Ϲ1fC^ sN7q&ʗ/=9x#'z ئ:8ޗ8ax" SSE ZD: ył.tzzzO7Y) 3o߇b153f86915d1dacbb89b244da26e50adb204195262fd3562290de26725daa21d-primary.xml.gz000066400000000000000000000017571400672373200366170ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_01/repodataW[o6~ϯ nn[oaC{ (9KFRN_CInKOų۶A;T ^[W ?+ϊVXʩY፵7n=>l'P>Sm:yUl3adhQsIe$;@$$KmԜFzalSϛs>q\ uGxMtF?\/#d',νWGZ/ Leg 0[z3q&ȑ2f-Ӯ]on#=CW#xQfN'0'udnB/4`@ݵ>m:-3I4aFuCװm^O[u9p'_rxGZ6HX m CzFyT>  дȩUgj>m_xu7}7jw_GԊu/ܚ(h?hrR?![t'AA rւ y82K0IISZ5ϣ0cB1cd_,[= <,sM#eDe+ Ni_A |lQe7%udd&[w+`yc0e88e9a81f76341e91cd06f8ded80d4c289bdb4977e7624068802654b6da506-filelists.xml.gz000066400000000000000000000007471400672373200370600ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_01/repodatan0y χ`vwk5M$2T[")ݺAҦ0+(F֛En_Y|>6& 2^c&]+( lF=Q[@>qtnHš8ZZ(x@%(kN^ctn ^VxgG-s.fti+wWrւaC3b &{zzNCO|jI97r/ݔ+;$O MM8K`"oq.\@u -ּ@¦>{ssjlR #wo_]^ 1552569257 b153f86915d1dacbb89b244da26e50adb204195262fd3562290de26725daa21d c5153dc63472e2ec2e070c9e1d3f5623fc1f4c3aff2fa58df64b069ddf250ca1 1552569257 1007 3605 c0e88e9a81f76341e91cd06f8ded80d4c289bdb4977e7624068802654b6da506 df6b6e5f656226b05458d09a866fac2a0f042af9df7922dd898114632285d7b3 1552569257 487 1555 5d039d9ec332d158c69d083f04ac88a187c6b68444472b802eaf0249aec83294 fc10db1e2b697aeaddbca25d22b1efe53e68370bf0550c402829f3819544b99c 1552569257 816 4272 createrepo_c-0.17.0/tests/testdata/repo_koji_02/000077500000000000000000000000001400672373200214735ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_02/repodata/000077500000000000000000000000001400672373200232725ustar00rootroot00000000000000118e790330a62cfb167ce670478b25f4bfc58d7fc671096e4fd294fe40cee201-filelists.xml.gz000066400000000000000000000005221400672373200371620ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_02/repodata_K0w?EȻIӮi+A}&irڴ4~z9"/;/m^a3f$6uNSnY佱^9ןQh3n@@'p~j}+|R4&^寃0bG_ Gmz|**Cfeb0b5fdcb0ee1cf981b041f0749ec381145cd9f6b4397991ef5dafcb6807bba-other.xml.gz000066400000000000000000000012571400672373200370310ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_koji_02/repodata[o0}##M X[xm7bBPxDn6Qs9 k7MSyأu&1a%ʸJr>&׿"ryq2Ķ0_6cm}>Y٭[%ݠpSm@mㅝ;3^̳ ӳRΙXlJTN!hO m U8֜F3L;pc 阬#GI-nVq0N|L8lqj˹˫9خM+L};}b?p8y;ll–ϖ6t! +%.瑠\[f=5J3V؝;۸~sLA`TSKuuzg~z>ih߇<#_.q>[׬5K!Ws1ǭKr%#Zu]Ah+(Qxq#a5pFQa=SGH!1d%s'\=/NnjOn";7z`R9&vw=֐g1/Wh{-{lC#&i?lדa$Ɨ%4*K0.jae}m@)ΦO6-^>lL,@+l=5U~`F:g |c vd߼q/V& 1552649422 ee9261d1d2916d841cf24bb01743198ec63ed596f410784602eca2350e430e17 13c7b6a7e412d27938b297013d831bfd82bb6d32ee784f3155038b7879a4fc3d 1552649422 913 1859 118e790330a62cfb167ce670478b25f4bfc58d7fc671096e4fd294fe40cee201 43bd8d69184c33764c6e892b69ff8080e46aff1b2498ea730283cf8919185375 1552649422 338 602 feb0b5fdcb0ee1cf981b041f0749ec381145cd9f6b4397991ef5dafcb6807bba 5f475283d119496183e9c23fc8b21fdf3e40b75ae6ef568ea0f962f3ea4728f0 1552649422 687 1956 createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/000077500000000000000000000000001400672373200252415ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodata/000077500000000000000000000000001400672373200270405ustar00rootroot000000000000000219a2f1f9f32af6b7873905269ac1bc27b03e0caf3968c929a49e5a939e8935-updateinfo_01.xml.gz.zck000066400000000000000000000012201400672373200440310ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1d$4f k @d.8F"uÖX zjVeP#̀=Wjsok'Z(/`ZVS Pi&mM?q,G|D_IIH~9l01{#7} wupFxa (`Ɛ 3i#%MSgɦD7H7l (ji#*#G~}u&@5Qv˜Vb@4.q0e-#̜w99C1ffZ>rR1ԝoՏ`lyf0.~0 VZ) F՝ix bP:44JAE BYKߢJ(M҉̢+ (څX3f= , S=ד%6D̅ab˝&?T|\!j $&I}²N15JU06*MDFlbŌ<'uo d)#PY0N.اNAGt$LKI,EkԡfpP1o{r y04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f-comps.f20.xml000066400000000000000000000044761400672373200422120ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodata 20d-printing 20D Printing 3D printing software false true blender cura gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 base-x 3d-printing gnome-desktop-environment GNOME Desktop Pracovní prostředí GNOME GNOME-Desktop GNOME is a highly intuitive and user friendly desktop environment. GNOME je velmi intuitivní a uživatelsky přívětivé pracovní prostředí. GNOME ist eine hoch-intuitive und benutzerfreundliche Benutzeroberfläche 5 firefox gnome-desktop gnome-games epiphany libreoffice 1e12239bf5cb07ec73c74482c35e80dabe30dbe2fdd57bd9e557d987cbacc8c2-primary.sqlite.bz2000066400000000000000000000072771400672373200437230ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataBZh51AY&SY킖 o5Y`?m|zC`4rrziQzM)5B==$m~ڛS5?S5==&&=MɄ0 CiQL21 2h@h4F d4 hi'&#444 2@L!aLa2 0M`#bi2i04m1 4d@z FP"HhF11hJy6OI(4zhz hPս%ό3!I9/$!%#31_>74h͞R$ FyC \ /ţysh#܅wЁ$7͢65agU 4/iQ |חS҂9Р˸yC9P s01!p(<#Zy;]|ojc` z)BuS C>?oas^_[{->o"^RE0a$!#Y"8Z"g`j(.֚묽EqM:m c3m6nJNڅ Y)IqOA}ЅdVkii6M i&NavڜM8>~\d4|c1fӄPw&/:dvYJXb(3`"W=%[45 Ô{Tg3#gɱ|(P^<}ՉdzYr>T8`2UG-.ș|:gbZugD+'U(zQ˵|oC=_N6&9PqyW M&}${[큒%/؊JZDGX4yp:- y޿!<*luG,r'-&ePö Լq>l\W#{!x}w >撒?Xw?{$b58A\%  qHBVQwxq p<^fM8qYgpg Ï{V 6rmOt.ugIFNT-XJ cȱ4tt9v'ZnAH 0Vn/ֹkrf{nߦ!c{Fje1 1lݱ~ly!lXN ^]vWCÈ|jAo;,[$m $#ؗ؁{o쿏Qz~OC$V"?Ag1j Y4(_Qt)GJ %%nM-ja^Q݈ vɆ`*7 !`+L*"\eZD2r v#{ot㫳>FwDA+;tvb J1RJWרzHHjS8X2|gdl/ܜqgcD6ͬVѕ-Fd?ofє14L`+!Z+BnNfv- hH^ ăeP~M<"bwò^Lu%ku|$dh/ 6 q$߹i2r[' Bpzs j-t~B&əo}_wy[dGl>M;i夼RRPmA$a dK&m[4\cAx @~,#މ ~Ў@^duP6@Zo0--76 X0kHsBDCeS,eD0-ltV D$0HR1d`!n{' xqM"PB o cc(dc@P\ .NwQ5akU# x4ˑT`0! h' ,k@4p4srV3 e*Y| l7HFaN)DTn<dAbK B1eÇYT*]ٸhrA63QB1H`c CPRG͠^+:mSH0A @%KH2z%ecuwԡm$ǟc.;,L \wp 6uRAgt<- M.k*sFĆa\.$70Zkel rL BΜ ` {2jx3SV0c̐AL+OOʒ\'8i,9 %6Oo I ]1b@mN5ha Z8BtTs%Jd,&=&7 m΄^Em 8 B:A) eP%(@)^)y4 lbPw$9+j`Ws%Ut k)B@^=bmK:*k Uް7 Tksqf|39)mO 4Cmmބ+A| XMꔧ wTN41 |Ny<-w%0.acq0a#N:,,`!%f S7ޘn"8%9 &JN)Ur% `@DcPLE)xX`$7t@0e܌Ap*20řJ̊U2i KlKr 7_tA)3aj cOЬ8 RECIZ 9i;c5Kϯ?gVSKrKbnmw _MoOQ,a 2 Ve0A:{WKخDê]j$0JȢ,(prE8P킖2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d-comps.f20.xml.gz.zck000066400000000000000000000015401400672373200433420ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1$| >Nr[ܵΞ!СLx&±rC4@z|篶wbGcy3΀T\Ntam#|m>(/`>Fz3P@[&R%6QlS:W|P 6S {M a"N:)!-tij&J~(,4-q|F0xИ,t1Q! jX$24Eޥ(~S [hy/zT^;T\J4:ԱXoiIFƖ|b7Xk/6tUݍ~!8i8uNT8Z?sGT/k 1:4Y#| *z Tȇ9|q)/NKdR77B`Qq1Pؕ)1-f٭۔WjNI:*& i[,ѿ_OG@/&rϻ"[P^rԾ`Gq =)E,rw9*l*KΟG;ٴ%f C` A% B=mk 9kYO#'X+a `G5ip@F jmX@rcŽYSbWAnܴ"iX]ZNSk>勴#ݐC(/)Mz`2 U m]]ḦD̰":܊8Bl/FNeضrLfLY1)KOs:'UsFi/+h8Bjm`%%2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d-comps.f20.xml.zck000066400000000000000000000015401400672373200427230ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1$| >Nr[ܵΞ!СLx&±rC4@z|篶wbGcy3΀T\Ntam#|m>(/`>Fz3P@[&R%6QlS:W|P 6S {M a"N:)!-tij&J~(,4-q|F0xИ,t1Q! jX$24Eޥ(~S [hy/zT^;T\J4:ԱXoiIFƖ|b7Xk/6tUݍ~!8i8uNT8Z?sGT/k 1:4Y#| *z Tȇ9|q)/NKdR77B`Qq1Pؕ)1-f٭۔WjNI:*& i[,ѿ_OG@/&rϻ"[P^rԾ`Gq =)E,rw9*l*KΟG;ٴ%f C` A% B=mk 9kYO#'X+a `G5ip@F jmX@rcŽYSbWAnܴ"iX]ZNSk>勴#ݐC(/)Mz`2 U m]]ḦD̰":܊8Bl/FNeضrLfLY1)KOs:'UsFi/+h8Bjm`%%3d6eaa7c77ef92586470dd6a542478e42cc421a85f12e0db93aa783077704cd0-filelists.xml.zck000066400000000000000000000016441400672373200430200ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1Axb=|sk b2~2 մ JHƒԵȤ^c9[g( o8z'gdK'дv ANE17(/ q-p5ߜP[配%ME.W&%*?Y6&mKR˛CH B˜! YGhq{ ,T(/` =_`$mH63ڿ?Y?KV/% @WTRT;$@ $o'LQ2WS]9&~7г_g#Eb^x/@ Yç!b5Je˛vTjW뺢t}ȬZpUuN6[ξZqt*3r W֠°+aVOo.g)n씓USyFZ GW9ylD>ƵqӼZqOMΛj/ܴ n_Nsr,R)'לQbqG[MIOe3-Ϯ$4{r1ELD) :@ CV}hRAsQ>ҭN&.AӶ֒LF ฐh{pe vK26)AM:i#ely]߰#N ] $eBd`3 BTBl=4w FIQ[_3jDBQ9g\ut pi7q ݗwtndXؗx%6vVlcY/u 0p6vu .˚p5Ѫ,+awg.e ƶjBoH?q҇6A$d[ M+k 4OKX,+ol-;t&[ ƞPĶiF5eI81յdZE-|/0E>>4\!!E+CsIfF;KSwd 6K'<,]YEE˔NY߸ιt4Z XJA0\ _.U5P5=\.8mSڻ oL^#kU vݭHL!r+=w5Y$=xh=@4/1=^`ۂy4pfI4E~,glwZp9|\ȶP #¶(b8XAiiZKn!(-*GLoGV$\ a@(H>%@idj꤯qY .C;1TGpN^)8 |7qpt#FR&À<@HG;+/5ba fjv v3F U,#rmm \m)zƽwgTO/( @<^x9q4-î)?׳]KT(9 `}@ypFo~{uOց { i_ps8Rퟟ{yWW]~nQǔC?ܶVR nkj绁?2٣28)QpR$',~ ym?wv⁦+@]#X{vq? |4SN<'*G5S=cD'to-v#Lⅾ{'+툠0AJ<^2@T\|JNRp j,&Q#Zܙ@Z9@(D8NV]OH+;<&ܐ4k (ڢ2<;JR/d@M.}j:qJAL"a(mqչa5M6Mkac5M6R[X]ac5M6AxI`Sv&+zǫ;Td%4 4=|4? ڀ6s (;Xb4f4de7d3254a033b84626f330bc6adb8a3c1a4a20f0ddbe30a5692a041318c81-filelists.sqlite.bz2000066400000000000000000000027571400672373200435140ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataBZh51AY&SY,so>D B2P^Z5  ҟ2S4OT= =@h2=COSA@(jmM S&04#`F  M4LF12`F2dbi&i#0 #2a1HЀ2Jl iDdhjhz=F@=5 2`M#?{z7]#/]e,'}%uax:V0BIنuyNhX]!xHUB~^>Qc[c\b ur ImO}Z%r%Qdb('i#O*:uD1Od0rgD{&b Ƀ]=IeS*fyW² JLS5CލM:b?#i6Vf3\]t`R "Q09X |h-qxN8 aB >Lw <-Щ/itɁY-R΀:2bC^78hx;-%1¨'8i&L:F̡&a178gVPRIfdzc:U:(IQҴ晇T@_.v׼.@k@QGws߿F>6_-2Z.mb=,Lwfwij~JHRI7( !k eR ,ZQS)SHDFg̗ \ydG@ yЉ$1Auu*.`ZeǪ:usp3`uG@LenKK]QH[Hpgf= ƕoB!>S.]|YZ6Yxf؅F3Wwq10 񬢚Er쇿$Z6,y,8q<9Ͽrk{x7;,73{WJof{WXo~A1->fDeRp `vϨD B>J^^ٷe=usfL/~73^pXV^P.61ǗŮ)zjМ=?٧bgMgaʷ+7zfX=~fi`n띟m\|lraj)UԫUr,?߿oj/&evO?afov|nkwl?{Xoڲ4OoBY%9QD;@L}|"6TҺ2~yY`$XdcGfRӗFz\Dz[l,uCv]aqA ((YO +8o1/@:pYpe%wSKi3IܧIud43/ Ϝ^yMU$shVƯ_E5:kWby4# Nj=P\?->,!_C__˂ҕ)?kA?AyxXROwhXUߞhIZX+U 4K R7џzhF4E8+/n+0b/<)[ےo F8⯳u8Q8azدMx7m]b}$f},T͇)B`}NO$x3 Ʌ0"(&{c7t,Od5uȚA{|Ft.0  %Dِ`/wtӐS`@UbޖtsQnC%T2Xq~xr|,Y:nX?N5⍞_&PS6gcL) mrsYx`f6?/~Y^yHA}-<wB]~mܿYeܿW+'Εd:ז)C0+zav )T&ϴt:+rBL!t.-~viha ¤*,h H0@LTt׽=v, cH)8Ɓ*-4 .8d~.56`J=ϥ AVMM  IYÃ3Y-_8uP=U6\AT.˂rD3KjpT C yȅuZwInСϹ0f$YEn,JwΨ]zf!z + 9NJ 8BjFv@[<a6g4ALf R@\쒋u;7v6DZ@YFBN8TIOi6Å{3j+ f5en <#"=Gp/@!g:{(32%fʠ9puI>{->$8qX(ʄV2s[c T{ءİVhՄrfYaRjpN 1.P"/h@2D*D_}J ?3_H R侀 ! חB]p:sLjF1p(dpǺv_?H[PӐ &i$". ۞Y0ݐg-`=ǵPPedeah!"S QD utE4l,4m :0 t9kL\'-JN -@a\t-(A3Cmf7H@\y=*S f41SN- (µ(MY10ܸC̋wˠ >mYC]l3ZE&ȣWh[4OB"ˆ2sY4s~FfB cy)a߸Hmȶ@ \mq>" 6JLk<#(abXѸLLpiRW22TZeGp,c8X/38Y%)`\ڡC Z8ђ"DScEb5a 4 W5@L5xϑs-XO~_WVyy0% 0O' #Yd2dAJ$LYJPϴdپl- BApV`50֣:Boa)Ck؟AcE*!@Il3ҳe9 qTs3FEa)=q$=Uz#0&D-83gCx yAtCJ/l bʡܣ:K2Lq !Bjິ$Bމ ]J(\;[*=A|܎vD rLD[8!ڨ<d႒lLq^@uEWO  a\B!= < 2˵͆ G\#f E gMfQ1exaT-=5peܮz4jf1i -U Μ'“"c9Xѓ8_ Yk!FWH 4Zhޣz@GIXdĒ =6p.gM>6Su '}htG \qbGtClO4<1ACl(69v+ZC  !{+K൑ Z` ?JXꧡ=2BYA7EY3XW1qp35DiYy s 'Œ0 9G(xa"]bP5HC=Q I@>)(M/<~FLK@L6^PdE6 <ahBNfH;v #ah)|xF#@dq z:e qybmE0M?ps(*_.V `4ـĤ1NQBy@aLA4b5ĢҠ?Cs'oT0ĜZ Q d;6"~v\?=%ȑpD&C@D!4Gh uz=@ 7BX=̚j3rQd&h< ,t;,8͎/~&m(XXM;}ayr@HU"7BUɡ8L^Crlb^#@$^jM "4.Bح>x7ֻyjVEp0UrցFiieXz(sp 9cK5V g@c*uLʏ!E)YC( > 3~ CyA0:GMNchO˩o 7ܟ6:$8!~AXru>fy<C2Gj]%sChК\8Kc5R>0i`qD tqXfXhcHzш9 z RRr"b!L+E^G'?G\`"vBpY(n2Eb8%׸Kɑ:8O +#GMns#ZNh~aOZz${,A3xp{.Wpw,)QڔG*YSσwusx26˼^~WG Usb3xriNbLnt8%394Sx-i,s) SæOUa'\ gښe Pמ6M ሴTktG*`0| (٤4ߓtKqMo!vBHQIuoicJpQb :ޤݨEz6zKL (I|+''*;BǩVF\ i0}7&jO:T&#0i6ƋNM֏VozRm(еc`<?@KI9\O@l_$wNQNᔫ{[J#41Ӝ M!BUNC`X}Ǹ}H6[1]@4\&7(CPsRcc4ad]k=Q(Bӱa JjT x}8E.WFf{$) ,Q'IHF,ZuεAJ;+STz5/5;> IH&O=E~,o { HDycAIӱNr\a8În$Ԃ=`uMSr/TcJR.1[(Tƺ ^L[ Ag$zx&p.r<њE"NmHN1Rxmd OwŁ[yv1[HXn ! LoB0ڂ \&/1vq'ti@ {/ ,MGV4ulyj М@JJڜ(BkO`ei1b*M))1*KhTaYoPR \S!qml ~FFhswMͫƀ, Le$! Y cEUG@F?pxcB e %lc'\cGX;ӭyGvƚŚnw'&O/ ?G7AoTVǝR Ffi>LF=ww6^~hT(X,ug4hipLqF#NbzhN^Cz~<1^%:֘(~NVP_GV}`w 9'pzN Z I2Qn;ӧQw29vAGʓVo_7[oienid3hBBu qdK u?=~dOgM&cR’ Hkap.qk,v"]TK%> ,BerHYzjP~:EHA!#,@/)c'3v<>CCFޠ.5iNN1&P$ uz6I'ah4/`$ :huڳwl#8 D-''8q\V(!ْ4b+XҶW$"䐜-AǨ}dcTIkmw ӀG5stjZ~m' Tӊ%оehxu\F>Bnp.Koi]γU&ST}${ ]wN-yU%{wށIMqh@ 3 w;%O;7lʻlܥu^W?O }8Lj26hAЦڶx;oĠ!nrhO{zUBwdT>P>ú{p4;p6h\wm/M8#GUsÛ[Mvu=;l[m^Z->$ll޹ >.F8#;pTm JйV  )8^OޟǓwcӶ۸8r+ gf;N厨{pߢ7J'&pKޓeS..n|+ҡ=Z6NLҺqp]&t\ jK%Ҫ6]]'><ݮ8W5&H\VE/ RjocΐuWŏ[TbCx룝9y4o3wSSne؀uvl솾G&޴WWa) gVu;Wb7Cທ?2|<=MnbрuϏNiљ;+G*׽4n/O'v5uĢj^)A/ ] ׽'0!5'VĠ&p0'6FfKm Z):=8195O/tƚ}'}r?6gi@uqѨL;!bry$uמᄆ2W?k{W Ĩ&zm׽I1)sç; [o΀u+NFشVu\b3neVuhO}&߉9EƟ{huLiS{Ӽj3:iiZ=8w^4lnnc_u/_NbدdfȦ.^>7_.<:d{q)T-/Qy#k4{u<<5-:qm i f)`$Ycsحjml꒨i' י2*}%GXӜ}NJG-M=*=6.S :3mM !U&H&Tp9t|V{V[ɂ" Hrkh|͖ϘE1Ucet{<~_m $)Y$6JXئg:X鐨l8 *1t @87\(Aa Sqiq=:^&!)!RgQ ѕ7N4ʝ.Z錑-"qAYł{̬d#kݙ֝ݗJq7JzMV}.VcobkN5! +h 湴p#RLZ+JlӍkS"Y% @AdPC<>nHU8Q]De 7(mq>:!WDZ gNZW3t3G +IQJ{뭕ΕV_:ysofPN)ňQky#)*r fnuoa_mx1{^N݁m*՝A ZWhkfhL57!y[{!/t>6 ԧQ62kHiqJ()%WMY钩6.rgBZ!J2!HL ~RA~۹&Rm D(+fig5AdJ.VYzC¦5W`PS\PB1*pi 4bW䧹,t&l 2E{]_|raFƴ)T{7Ȉ-wufUDS "Fɐ#y{v>U|;{ !ϖB/4=!#+gđx'Ei\75Yƞzۂ) b$WPDV1y@ d=I!cFp&n e\4tFCά"4"3iꝧU..#.$r*Z R#Fԡ̯p]J,Mh+a,F MrlUν7kl{DN[Z;cS!)J"(7x\ڬ@r J1!{W.Onlc94vf(U3;)4?  e<p3$bjyzfFT\ ~11yhƍf:8tJ|D{^k[CUZPQI"ЩBD"~cNS߻<މq|RwΕc7fHnUwȧC'hb8D@IA sɩ{[^SnՐL%N̒Df-8Co>אL][z-7TV~$d"Rih\Y$FTW b)EֶilMߖ铻j5\\xOYrԦP0 aL۸GEzm?1poYh}98I,gAU(ɘ) AE܎7ĀDQ2NށE2=` Q:QBcw!^tsT/{#xWXg4)Ky0LhALwI_4K.._ꖕ؆uQ~P{S8OBS4Z:A >rrE %JobKF %e-<+V,>tJKS]Jq*M)JZw!!;J+H2xUP\QM%F<>AE_Ɨu_let;Ee<Ո2霱*,9-*X[b6Ko5h .|3 !)hkoΧXbl{,loՕ!?/Q9HVo?.Z"4UcM)7k7HmIUhg qQљ}OŇ8RTc8ay:_ ?Z?$X0%euQa%%fh,bg1"eS=:?3-/ŚosbiRV6NjxHFPc|?[Bm6VxDT}b ArќwRsiWul!~ݓd ^?Z};/޸ $c\7z?/vqxa TJW6-|>."r?=/vQoeWD>D'SЬ=-:ac+C$$vfTg*n9,oG "6,]jV蛲_hUYoehƎן`ꦃa "usZ=YQKEp<}ٕٛӳMnU6;XJX6|=X^ Ylif~idױ 0Ȇ@_4⇒}>)'ߗ˵q㒭HxW3FEj.1=`cEfl_6,XVO[ED̀4cYawM#W8^Лr-{f.^%rlC D@D{ MCBb=Q_K\w뢸ロm?￯??|0 <=|AjPzY9#&B+bs"r[n X ^"ZKucQU'}0fϠMyfh-yܮ Y><"'i`X1:}%6Gg 4*̉of?F;[8Z dž$ kopiNnmr$r[sf7x*aZvinJlllv#onkݞp8q}`GXFf/0gOUMen1W'g%O oy Y8TNfZ"]JF:kFW~^/RvgH"h c2Cme`_GBS"1X.!DuP%<Lj"x7ߢVu"w-nB썸CbmAiuL2I5XUx  dx.|IT/dDSڥUL;ڔ'.}…֌CN k j9ʊܨBkU.M k!qKfBq74I^+H|%tcհY`LAK'"/HA5:Kc<^&I/Ѩ=Czρ5v*ZI̐,MfHyTjRߪa郢~5oor 7D<Yǡ?~~zGUawv~ IYEZ")[M@NNC~I֟_UӰK~v e4һZNH>ٍpi^|M1#=t& ZD _Wy3ZE\?K>0iSmq |)yEDZmf\vcZM4WzInKs?Nh,8OM wkr(_9YrT;᫯D;I`*fcm{iL<Χ!9O| .뽶—uzQc=l㞭ԌF=QӚy*=S[4S}rVkڒbka'Pn(z=*7m)c2fs3 ޗf~wvПvO}~}jt?ms% %ZWMþl(. Zɢ(~'SR;Tev 3K,UPѸLLȼT;eg|B6ifX?ndpV\]9わ='k56EH6YrXYMŇttD&/PgL窽氾#\晵9+<UHCMT:Հi@K{tsy+BpRtWm'1Α]oǎ"uz0$xW.W5yQpU{zX!RSƥ E I fц2P QCPĮ8ˤ͸Ք,;j)YR΄2DccD51dݘGQqCvO>=tnU<ފ!NMAs@T6 iNNհѪؚ0>5n[}}u > ukA8缫o>|YekHnaon<^[m?;=M6ʓQ {?,rovbұ&bdZ'6lwvk[ Ε_c78\~o=*{~i}h<_~: u8vk';Zzeg4w:m.-ê?}"F_ьޤan6A fi/)P '\auYL D;.|j[X#vn⵾xjufݣ'/qC߻D{ ڶP1U؎3Xr[:Kzw_ž.K&u _]<_xtf6_W?^54nw(r޲plzz5M _f:s]M*oF7ʗ#RĈBn- ]Bu?d%Ey[6Q~5`3xYDBI@ Y:7O_,ޣYLE [& U4K;mXF bK̼}55'.ey͉< 2a" CRa VaP\ YaUpdt)->wޗm `ZÃ3Y_8uPKſB_HTi*H!DN,d!TK:;'s.OI wH2k3uj :|LΚɖY$h*='y4),h]G g: .. g|@UkURh" ,ph= :eox :K]lVYa%c:-SQFI!|z[=qOB"nL4s߶U8%z7v8ƹPՄrcYaRjpN C5R suaRi)r_@,Bf;y YH `:0@]Zo 0)!C#4L,́r*Iah%C&4 *EϜtf,^;I }n 9{Zg"e;tPp TEňe# xz#s=3zP/lPڣ%7`5@"S dfO2G37nj%h #F`H Ea0J^/_rVn/Fc^tz4 3(!jD‚B n8sIA0,Z*H}F{6CMa-CtɌJ24ẁ$T3>1$y y0-xlAW+:8Y9]Ό#G2h0.]-HFa,l"8ZhGs\h 2Y)}-{cV4X,V"q) D,(Ŵסe|IkLL*SxZ/i|͙3&!*\πu 1168A o |5xd * N'ɯC֓ieEɠ&xoF7z˵J6.*4 eZV1fTM+uZ^xeG:C9)NATV`/բqS*6@rj>@z%tT/B: llt>,pO!D+r wAC,#qe&sj"a/#{`vl*tLJdFw<[bV AcҊL0rai)!YaHG%(2gmx/P(_#b1CAQ="9S"#xEO@kdDL DH2GV' .Zq1 J"P:ӽ89+jHG8f!g#f*A6/@  q/F=Hrpq_Yd O?{= %FyF6!"zuKG0.TCS͇,,! LRA >Vf1 GoSF H9 +3dKRYx8Ty=/P-X$Ez#’`0p#\!љH{QZӰy! ҨYLM6Q/zu׉f$)#.t|0/Ŧ|Hi%)Α$z˕/4J‘o A,!2T! VGLI|+'!&*M(FO B *ysx5S]mhW{Jƀ'6!b%љ*!4&~9/g'@Ixko})f%)rbbk9p^/ 򹹶65NmtnF&`qv|;$PFePUZn, !N">";.B-M"@ 3ݫ&Ұfn mnvʍ8belj)P]Ѭ1 O@k=Qst 2rMz8N/8uxYQ"E1x-yM]F2P/Lt8# OS޽ϏxIH~Me]*$oghɕt>mJ>K'xQ="x;!Jj:j0vQP'AO~&o'V(CP~pݷS%Kx 5,ri=?Cɺ@ ݏǽ TH5&EՕ u$8=:W( qo';m B:-uT)%s\}@L?*Kh FPҖ ^ +`J#$0ݓi e{Kg2z#oaf;-1._L/8P`)\L1Ƅ4JNhcGX;ӭY?#TkHcMbMM_׫F7S {$S>G-gVc%֦ "m0Q*BHIcMy!( q?D@pr >0Sz̏U 8m*vAq /A9S}gtŸ ,g*Ve2#;5}qvVF8[-$T>ǑVzɞ$`ĉ67cs'Ǽ& mkp*T]UXwv^yJ$ aj 4g4w^6ytX/S=CʠGHQ40NO@A"#4d?״#?νF4 mp֮:ZXChslo|:O蠭i^Lx6ҹzOrDJO`&(z/xæL1٣[=:hjo%*a8ah/:>G+ҧZRZ^-TW߷.א#7Gi)ﲩsЀX&p6{h=[VX.9S}oz$!f;3_5ƙ>R۟! }Yލ\j꿍M*םS.i>U2}̠.g؅ ogmٞzi;FMn}k Htؽƪ܁e?ncsu7LH^ 4+4og *)mnDuY2~E4w`Oju׍udpnxъf[ŵ{iѝx9 wݍHajKma=kش-x'vi]w L[{܋rׅ;KKƳXUUzz$Sq<3OxM8_$hqt; ;)}Dۈ{M꺫`:ao꼉5=.(@~r_݊ԧq=3/1=4q9WLO70%z3c:DuTO owv ]wQ/&eKI;+,_uT|6]wUo?ջ󋱩v&u\wocLNFKwNkng݀uWӌjaMnՀuτǩOPUU{kz$Yiޜvށ=uݷ1Od{գDr/{gv\7_ iv}GGw5Vn,ŔtN4ZC?Yk+7X>vȁ Yu|N=;xuq#Ҵ{>;06N\w}xڹr-;D{5)[)h)Axwh93 ӳ3q]UnbyuO/_U已g`7${.᭾ʦ뮎w?ϤۙTzoDW=Tf豦Vo˾wb7C⺗'f@ $[rD{n.m?3]UMsTM݋Oϲ^J3tֺ4Or>}sim_)!CAzl#"DUيh' R}9]dcBݫ?T*Wnu[]%zPF4y\u_;>j3oڃ'_IJݦRT۔IfgȈ·hEKמ&^.dx |"/kO˘Fw},RS&$ "jU1rp[/]q\׫e7mM._Q,PsԙQÌ?dk讪i;tnvףD7%NJhLqTI;R}0p'SD4$тm`6219H2f,]Ϯ(6{* k4(˲>xZsҙTzI HԔTc/Iv;pcsDb3v|C#Ս{ M]ڧPDmSh3.P\.ޔgyL\ !$r޺12D2&nYp*7 ܪ8Jl"+RTQy{כ'܀L{LWY|/!:>u9ùoޚۦgIpL|iyHƀE$ nW xZ~\cp$w;՞@V^&ypb At a !~չ Vamy$Z e&Sӓ= n^G,?d/#@RN4`FRHT󥨔 7'93Z=#j@o.i8]B^$)JKJ8G5\`SzTsD-L䐜ދ##۾,!W",b4"bHȐV9ڼDገt&!!Ć# /k"IZ};GgV{4FM*mSֲH PJgi99=d7&eyIri@vki/KJ0/K8a*nSrp _ zrЪԭΧҤuB$ V)t{Q Ծ}U:83[{=yy5CC *x KHڕ{'V.!d ~sXj˄Nu!Sj5EK@>!*=aD)ld) a=ݭWم=pE/W䂒 W72g/tͨJww&<\WWw. Nkw1ܥ_VWӮޯ1a]L J%6Y.趀ƘEjd _-Օ+䛝 qQ0T`\7/ˆ77)Znq{kZ?5(׋施kDZ>CuL} ׋{\︼\仛Eb4m/݇lr׫Yn[uɥWM9-ӏ}pJQT-ke2\Ba;Myf`6+-߼}O?Q槦?P~[o=||і߮:p巰 ]{o&_N7/0ˮ,DraSNjB4BocVPҙ͚zauno v& sh3n' m}Wt,lſ&LzK]*J; ~r9j L Ɏ)q U몓;j]E00)=-!O7"t_޸˕p%rkC$Q ,m)1t4cBL*1Pi]\. 8\츛nRܵ:MCV'V~KR^$EsPva=؃de^; 6# & 3\Npc{EuR,sKHDe0Ld9Zӫ>]bUJBrQHڐt 2X)J<: Cs\އl|2gU Me CⷫtNNu2cjԲr%9J A1ݿ:c)vRyLXbr}9% 1-w݄Z*7@%L>IHJh\v&86Qr\@E"@YTO B'*zĴauB1@RY* S;R h8 Ch%r %9`K| 6AhhOzW-et~cuE?\8: k}ėG}O |u2] )! |#}^䮽 _\ Vɮ۫{~eٷg͵w|w[`䖝{Ψ` >(hfx>5ٮ֭D7ƅSA: ůׅ)rwXATik2%Ib#{y :q,Z#Xe>OwO}b+*@'%:/mVyg OaVZv>=ܶ(餸iD8%7WCr}]/n.18eS5Z/\l~T\48KēQu.Fj-޴x($ݢ4]Mx Nti۪k^'k+ h7wOh$s>lSӛ2%wz5*f,m&ej3)䆣ZAX is=dC;q= {C*n֦ZQ.'2ާ0݋; 7ms^8qLWF`pSA{WA'JJX@G\h:Z>\Ӂ% ZuAiв/8I!/o d5prY6  y:|KHpRjlkfR_圖GHc``:eI~1-RȤ"ß=޿AR&H47e;rn" eU֋L\dUYK:C 9e yytIdQV'3.!9)GgqGYna֖+ʙGڤJxSA};Gnm`苦N 33!rS 84"6GFh)㖢v_/ ɕUGZ~EMkKo܌UCݪ,=6_zfIꐵ;no}h`[3/}kF|,'\3:ݽEج,[wL!zMnoDT:O,( :8 zAn Tqu#R`Be_2;AIS~Ee5-i[&x;ij>QY2kxnSeɚ$$OVB7$jg޸_1݇I_q׿w5QW'{ir\VE~lzw3{#[C{d7t!j]]Za.?-oeQl.o,U>ǥU{ħY}O|DNm 1r1IӜ)˒܊H!Qjfc>ÄOOB-Dh|a,D˅*i$ٔ]>N5j*t30K8tC+$ڄNgzj)ܑuLj"MF?.{69yϲ@?`W_/x?tcK{+z/O{&]v 5 [uqWoE]EIzd۔ Fluݘ֝ݷ|3 b%w=n$^&)8-3\k* Q%TllfN@THFTnyHr]٣\+=3m#Hq|q>;o~Âu;t;<ΌbDǩK&C1DM!ZyJ)Bo pof3^d媩;g`*߱X<@S7Y_e>:DePE)N(h&tT`S^3't Rpk{9iSh4%e7e,/eecH\j*[q7!(Ȯ*AZxʽWN$rar:G1R\2.B,YNQC秃#8x=Yז/>L9vum?I2or44؃xO{,F]I//嗋2mP2}eI^]g߿E)XD 92+)gD{>)=V5>OoOb&m BAS1y7 _QzAwI[c)t͋z7+G1 }}/??F~֟H`MfK Oj> OpL{kzfoۧK|ʚ7ˤO-0)V.'&HoNxpCCgc{Le3P\^ZZR!0yD@<МݾGo1$@xl!~G!kv6cYq0x{6+~^*q&X' n2BF 0%d? jc@V?JUC)V K0a"-Yrvr?=sbУn;XQ!h*޾=->~vp{r:{.lE?뛗IGbFیO02ߔ@kzZSң/B\16;vp37ig Ύt"@M/Ȓ\] ʿ0jx<|iQٲdp۪t~pLnPf7efß&"͜v~N)zi~˚3GvnM[Kf5πl5ѷY67m`x2JiVY9fe}K?6)۲|kOɻr/MۻrO>5 ajPC9aUZ^oNo:rJsu}u︺/7ݷnnw mQ A]<\7ځ!-t}vJ[.(Ƕ2SU+.mLJ=*DkM5z-yyuߗ7M|/þx7n'/xAry׏廷o~lG?+^sS2hs!ةǰyj2ʻ)$z?=轡9_<ʁlݟiYi`k<a*@1{›f6TtnNJ!i1뜬\] mEQ!|WoZP*2TUQI&\R^DEHCr@XU8!H >qG#Q0%"(%IeI$,8_ߏ)iyk)ն\9 GDPU4)$#!>^d"Մ*kF4[+4/_`Y /nh$yJ@]$2"^Q#7q}pkQXD}d2p ORHL7 OF6j e,gFEH\>UrbD̕tBYIMaNT꨹RVkR7]BA,I՜(&}\y4ʑ|Tc:’N`bK!iBma}0 N\hI˹ܵia2VG}xo+f2ml&"epBNXQ)M8  .SJQpCrl=y"\85@Yzr@%7vUA kzH m6 "plŸO)ar .;dGv 7] 8ئf EGBX@+kQO&ԈLIe˽ʊ:CNfnJZrxNJYPBhO$<$qZRUў?L`TFϐ_D1HWDe|Sk0"hZଡE ᜃ?v~;blaj9)сt=@5xN,\Sa Jі:Ɂt"P<'aXv]h&@‹ӡ'xΖ% fhw~.GVj,Ga!Y&2b =)K<  `lhc"rmSGiԋ@[NXnGrs6XDN*IXDV:Y(UKxX*:.۽ӎ\1eAY L w[jq0)΀ t޽+yPj+wRBS2Ijc aV v_Rp%:{wpe"t1fesS*|n밼te%#bJ+7д _`5 Oc[ۺeUB(F"Bh XH.礈{8^m^(D[(Ç W@ X!ndc)xϊ/Pc,#rL5NDPr;w}k8,ݖ]Ŝ)UJ A3:AKB!'hr RJh%8L;7Bx-*M$,%ט8Gpc\,W -b W$5sDxgH˿psZZ\< AZԢl=4I(02wLǎ/ס.E~77X6hcrO "Ph>l%s9(ce2!2 J-w+53EJadjTeQE8n&Q%8E`>smNF3q 7g(9P2K|UQFZ֝`"KZxrZc130߸(heW6E2B ̐lp$Ėee~WvgMRˆ tL}1 %=.힉X_OSF0f @\C-5,H |]BJsLq *@E] Hazd忼~27Pɗ"ࡲ"|+&Y-: pI%g jL| 8Q$p?(30A#fwQE١\N .dPHx (BAhw"L@ F6-bR-Ia+!%TȹuozToWhRJ@U#G!86+tWFČRNi"Ll+bҗ^4\$H7V:$)OK0\9Z!*ˤt2Na00zJl\9D3Taz@.KD>ӑY*3e㙀b4tJ(%K>vfvV JN[qeYdࣀŠ_ ҷe 8h'%Vo*SQD:p7, |TF8nhv79jlCq1A`_rZ9c4ܾ4HX4F1Sg#]=|$(A_MsnFDjSG|0*0]NŀPSfdtrhL5w71Jm6HQqDkN!CBK l4tJU,|#2ns(c?_\VCeU)ɬC*[JM;a!h.ʅxA'zr p9Yl}kĠpZde| 䀽ɊTc1O {ꁸ]xȐw%DuXZ osYC"Uj&a4|mySٝBJepJX})E,6U+y=08G@ ݩe2p53i0et%Ed/9nv5)+HѤdA~Fƫxˇۊ4HPUO8:]B- x-PDGQ{B0ij$Lƛ*GaF܆@eE]RČ3x4Lrk#֕ȰFJ҅b 'ZL,0ރ`jP.QF@c(0:|psZ!˖k*ƛfw`|kH !;10!);0 -W{~T!K]_4%BaCxFlA8_߅_pi%1cʸn VҖ/ T>Ϋݖӆc .H [4.I(*ҼYOв D)`z(-e"ftW`Ȃe@SǕ_f`HL @kqJҢAe,CTnJ%g){a53T2-47ig)h T@J]_A^NV5ӗG s/IYC&$]*E%#0*]Ɉ$KqB\"1xwySƆ!-[}+‘1f151~ԉ+U$Y)dFTaJ01G(%Th K!W* 8YQ#Uʶw!kJ+%,ˈ /(HudcsmS;A$l H-Ԏe]l)+r (`&eDvڒqpcvm5DA h2X PbxooޗWX#,oo/pY2&TTX/܇u6wf|Ii)P `87^ۯ 萢ѕh0SsP@*Rd|ΌzSlJ/aJaѨ8uۻhf%҇֨2%%(*E#̦(+ \D' aI`l 3p[\$u r~YeXzZ@Dlcx㕋rl~*|H1)=mvS Ia$%d2pĴGavJ-W}{x_Zϗ[Gv~.9Km ?l{7O[OF}I}0Z=y= ۜ.=!ᣍ"O?iܞ@lف})lTt7+OHc5{ EQTR+~ӴYj?eA2:v::qz۵=]çE<+gԫ|ˌWǻReWd\nAiy/W9|˲V۲qod*ݖQɃɇy |>jL>~K~zTLtgTjx`YoMoq㦻}.$Yopfy Э>Fm?fZ;M>>?ޥnO>h4m}\j[Ut.GSshnaV}>6^WtiO_NWtGOj?}P3HCÏ'NFXL?O{GŹѧ'izxG)GL"X w;t|ݿ]yߥZy<%+:oyW[.`2& 5Eƨ}_R#V@~? |Vn4t[l}V/;q׋rUxcB.&ONK=?m'>&tx~yYn= _9s{;\Oa{@fxJ8&ޓ0%6{wC' `DirkvIetx/?LJ9ygex8fG$2:)tr~'Na{Li_Ncz^SrHp=-EܬȜq{3]?;tQ;B #0&v*_#E1%w0`%ᷓE1v$&̜Gc~'aLp1Ԫa19x*d[1`RYGr\Hdfދ=D);}`Ye?c4R|T*.8P `ZTꤝQ SO!! 1)r?AcB3Uax;*3]C{r+f`xs߫np*39]Q,+7,ϗ8!{TSS:jg}Z78ktZ YE9Jj{PGt&3s=,is,c*39L i :m\Dei㸮9< 5^YИ5sѼWi~h_f8-[%tʀ]SS:y_Df,ӯvhTf)ಋfwWh^$f0ֿ;j{G:17;,"x<}3xWȼggbZS8!%f}S܏͞$ ePVY=|okОq#,񹸞TH> ޖc>ƕOĔᠶzuY$P3C\uJw}g:}Q05CwIweP<4gfzc9d2>$YLh͖Aw_yE0$$ I]#p:|A통gg~p}3="^70@\ҩZrW-)gY~j],4_!4G 7ޜ;'1>3q=,XTfp\jkߟOBg&݅)d|xG J}>wW/=C ˺N>3&sIcr-ّ"Йwwi9ucB3O풷 ̝.)GbR;,7/1yasm9xcBKxFƟF̼.#ޜklz l;&X{f?|bJp{/m8R8N6={T=fw\=!"SX=ň፻gcz!6n azQb|x[<`w/X^E}$5B3Y3>kx'{7{'W3Oۻәs=Hdgzx^k{H w5םtv}g'^%uKCeJ}=2sȿ-32|OIpAJm$)?&4sBn;'. ǔYyos'`Jka}p|\>3SϷyP:UO" r")x:G*5߹mٽtdfwwF0oow_O *8ﶺ+OKәPpxWh͔;Ӵ.)H@Q;x?4cK..!=+OFh߾s*.{ilOa*-[9^ OdzBf&MMs2ݼtd1}ƲۛםE}:gNcwLR|&V񦓸?1zyCpP]{DSjK}JU'; p8a}Wm'1:q8~L/; 7eśם|lPʓخ:u`|^|22̯ò۽$+d˕>e'1;"1u؝P9|}9n_v#m+8NcuDc7r~n7; 9^7 s۽$n4fpK1Yy1֫ºқe+`/W6L*bYoFjwmpsZj͖+zA-K['p}[=7ꗴ|К]06K:n~+1z}]KH kt>4/Z۽}|oBj[!˃GCqom6ėѡfɭ> l߷m%!G)2b#&.8**陎90vpo^8];%^9]t}u殳Ky,?Wmjߏ4~ 3s8CY7'wdGd^?CWQ6O&99_}suԛv|ؔK S^i|X9.]7E(i,?1ׯYrrV'gLqJ?o7M}̿]gjdg i]ݝJeÝ}GU4aCjh Ȫml\N"I9#6~ԤX]CܖghCw*m]Jh<<>ZF>/mU쵚ѻ^na& = auSgm1a쑩3>bG a虁XHn |{Slt%Zdt: D,3#U[Ih%rYn ُUx%Mfِ}4beuj>$4M*耄<|I-6j$p ?uشոDKtKk0\ÑЄ›Nl `̑_oԈ%%|ws^:8 ruiZ]?Yܴ|{p '<ekqpr!Ȭ;%Ȍi珫F{*,-~{q8L5&^߽֒{CW 20߸,Ip Gl{uӋ`4}KΦ5o<8n< |Sf>s|ƫڸ-j^])#:ǤvHs{5~Vjn J]\!fSLCkr4:Wi.gM^dFb l)tdB+AY9J3`w1t燣4YkJÍyZ6#DNnb~F~i]2֨X_veK[}<8|qYqkn9#Ag}t(7.7l=YԎYslVI=UZc|l,ۏ Z17v0ΏFe~_ ]m3SPHU hwh;ْ\ھnken{{*oxeЖ`5*p~vR͚ 3׻:dm/lǼ=öZF.Uk>G`寮O~Ǝzx\<ԇohikSG ִ͑(k_woj3E_z!%;|.j,Ez 6WVgŬBe .o3BYqxSs{SVa 2#Dil5ogv˨/}67 :Bk7Z"]]Ѿ78.WXHMJ"M-˲w1)7W 1pt[}|z^Nk9cd[M[IЇȰ}jUOw}Wu{81Ax1ޤ}W { p&m9.=wAg~ m酤[II.{ul!yB|Wwi__3pҶ63R򰛥y/ӚeJn35g|ݞUf3jB\+i>k >6q=o|o.]KNev_"q TiG\*6Hn%VGp*6c\òq-Hӝ]kڬjXhھ^'s"qRmOoo:g4a Fk^۴f׆ Ⱦ >եj9sp n+DuF7ܛ"6ij'dָnN?frMY:)مy\=]~Wm鯋o*%"mB1,+ ^Jr&+srC9YSD-+_ƭtebB90Y J"H+ɑڍL; 2݂d{ɰ7v d֓gN*~:%ǮV .վH8%S;wBozUG*2ɚi)7$ƈ|TI dHsw2vBP'mTGo+وAk(͚TW8'C3I9$&$hrHzB26^bI+24")O5&2+ "e"Cȕ(U,ݐxj'SE'FGMI&hMzYE F6EX,kg4 nMj# QEEU 쨖Vf*9vHor,gBSs>%b6Pʼ2^Hzgb2xEL&IQe" Ͱ1׈"tr"FɬE2Y9a`f5MQcF𛎕$">K.%N8hLRWȳ#J|6s0V6j&1v0бĚP3),cR# &VupȢi=oST%OyQ5X jicT'PxNHZauJùazÜ8LCDPLzF$֙Pgم)s밼F+tKX?*U,g-;A`Q xy &f5X)sB$8O|i%BHP$GC<_1qyϴϞzİ$ g$3 7&t]<'D!^LEm0Gި S'1 &+͸ 9 fՈި\Z\B?vPLiR eB e=<]ϩt:kp袅-0= 6m"tc!It3;! <$CYjtFb`'Ak3ggn#j-;SQQk Cd]4$50ۭ*i]J"229D r nvMI`uCaJE4Ӧp$;cϒ>&v $3 *Xi[GXR"0a!4@yB҄Ìr٦{Ĕe`@Ͷ Q#6 )@Uk1-ʸSXud<`: S+ gǍ2 fL60I82&́0{( s-MER)oRgz㮲`&ؕDws;fGJ7-%VKZZ[[2ܩduX@ {9bpQo'#wp˄bAKTjX[" 0 S}LS` :ROZr&R@>%R~ixFʹ: ) ̸%E0)҈u.-'@_`waQ$ z*H{zE9%q`/ R7@|@L$e4Zd è `;EUʽ L a(0މq\94otzF;$$Y)b YxV`8Ermu> 4 dH4u2@V7I`3hO&`0b Ì07?X AB ("F-9p`]M)ATk*D3pbިh".]T 'D:4Y*֦c;xxҀiLIT61XF ̌F~Ks(<XKϩوցMCd%Q:LFP7]jS@`. ]T%g+ bx8#@@+HXZxUxiZ '4*I]U#xY, P9Obp@јjL wiJ^D&|B:1{KT ̿*xG$'oһmq`8;HlPtgLm.+L#Zwf,qT8U XB{]_uA`G<P'rO\)51$,GF_XG@#pیqd)aA譤e &"G ]D) 7f6$>! ET2L;T%fORy& CX$F[@KV1t4ZQ'U JZJ{ĬG}> 9)<];i0B% nG!fPc޻`:! øԘ5'#9)]:@a$c,ļ*6;0P)lx8iZ. Hk!(i( X^ G~NwXƫv4C4u6+@1#d'xD${F^Y`'!_ Va\8PH{K{|2"~xI05aG3gxF|x: ʲUȈJLLJF8cwTFq'rn(NpGqvw[vJ/'p.P2ikIqRC b7otPYUrze,£uVqʒMG19ACa(E : Dc/N(@1`TAq໚pѾ\m,j$-{dzz{L@20NX h`41Ļ5ċ?+[b\*=,H  "9cz Z4WΎM-%'j{o帑$~ׯ켪zd owJRgT(v~9~FP yd*͏_sw``0JTUf3w.77+ۯɮ{!tqC )l9.n/ ;܁` aЯ)P!{ rb _b`Gd Ve 2ׁm`Qɐ0SK+m<ԉ "^긛ٿ87Smo|0a?æFǀ)o^* UƠ v; l@̛VNjzqnw>a :yX 5}sB"րe1 /7hBr=;<QUT4$x&jqe>՟ڬba[j^awM6~lǽͷhg߃O2#;p[oi3ca[N:RASPO;hk޴*QYGNWBiR*'UuH/ӧ1!Ƒ|Y4czOЫVm3ڗ|or)l|֌j?/ [m}}?.h}qt^c,z{Om8v>/g^ZOþi=)MZj?ziۛ{|h?kKOp+(轪O[ovvy{6si|ډlH#/q|R46_u#|~m+Wmma [J|oH6&C GU`az(M|scX_@ϗ]>w^O~QkyvnAw3fsط?ۅ]Hzw7U%AUwwǪ5\!]ƶ(1Fn̻g@]j( ȵBz͓`M~zS[߼_`S{҃]^_ ƦŹ8~h6%00Ҿ9}77x4-}JolyG oP0xAWa=Y~Nj)| Cze3NW?y";&"sylYlI  61SHv_ڗ4!J:s% 6 z*IN -liIlHc:)L)"{R#y03.$I+W$s4AzAv/'h^L QZ{9]DIJhf')L!}fnlIعr;7ObkT_FW ZQyW:&mP\JSE/Ċ )p^iiƁL&$M`@5KLsI{/@o3brNR.Ä. W^F3Pvu1]d)jSW[$rכNdhi%xl{9]D_yȋhka$}ʄA2HX $ BZl#`n^f$eu1Lfw@KdqpK淼i0ĥ uUGF5nLcvT_aTr^_zR\o6Oő")ztnL%.W'uB:{R&j_ť$$K$>9z}4IS8KfKyD}k#Gn OD5+.vάѤhp=ٿ8jVѾ+SXe1&4Uפ{m:u D>M"w}ghկI`7tS|Dud:D"7z9-,*\yv3^Dd ʀWm"BU {u[5 v1%MǸvӄN&1(Iak;L] mRgW혴"hRʃ\AsQՄO3TzJ;\HKnK+.3 ͤSH3%ES HSy~ Z9C*]z^O.(Eao~!fח=|חyV/1&EcLm˴V}*MIz 3JHw)ݺSכF$uP$鱝:W$O#IREj LVKK,p0.$U^f JY|x %S InK'u?=~Y܂} -a. % <{.D>G_.iܶ H5R4Y|Ch9c#`{UZ0}$e8\S9-(9r0l<|3tD&bȡm4FF5.WL6/~ހxh<çX6E/2{kmF :^f ưΞfM$1bfz5 tP}@|NFL[Wx۬8|x/"6  &ƌPWD>fETQ3bɃ͐#<-8 HĂa|Ă+ȜFPUNC1[ F-nI 졛z _h1`njadI"(^uR?ou(J(Zdh=ho|`<sVjm$?u0ĈQ~Cd xc#iSQ:!LTٛ, #~e0Ò. SDB.W"u2GXаgfM|ukK:B +FQ\#?D RrBKVK~EF|jMe Envr۸aGf0| \ ٸU;) lv c31Tle-k)G S1tWWKczQU`]bTh1j o#bNζFꫭ' ~|hlvǃo`F?G@%59QvkUͼDv})HG\: j~ XۻU5޷VaK툈vnE)(I|*S] C]ڼU\G}Vlxo#깣@9bW&xZYtݼ fгaPD0)F {TG:<<.=+`hG {a_-'io8#6[>NqBӡ3Ò!E.:LO$ӛE:-x6#EUF02.cIX +|X8ƣt$bik^XrhyEXQȩRmmY#c0S<-ب>icQRlFлi&ʹ7v(Ga/'-]xx(^ցG Q]d5 <*i4aQ,xF 0;|9_ߛ/¶˙_7K}2ߎA0'u܀߆,3 B|Ͽմǭg߃G˜I탭!T?@}lul9l}5s[kW3A.W^[m.Scq3w֣QM̼0W@ǽZ}V߭A]]vkc{;hos?ہ  qM_컯~x~am*73]U r,vm1}{ =k"<\8v`g|`cwf;wq߮4`6#8A:oS7%ȿ+CeWٟfZW!bvֆ/_"p*Xr!hV,qV!E߿k),*r>o⤾kr~߫:L0Tꩀ؍r,n?GFI].q8HeThVn_F 64GˢEƻzCNyfcSNKRer8E$s9B ej"p2x鯩 P{ ka?$Ç 3 ~ ߽3B2 UswP huzZv'e5z@#S{nVݯ߈)+pwCUxMvBD,a tV_F )$2ҰЭ r?g`ėOn&1I&z/?؆,FdcPF_wJo^ F0@.SۿmCgP-RH_x_BKrdN4uRai q r؍OtARxuJn[o0p;zr؃TcZɱG s7;KE.u'~7=+S];{P?{ j*T0l=esQ| b} Ӱ~d@8խU5-&(jն z^=\3q:^A|`aᰊ=lVk8I f wyeLW& !;n=h\v`Eucƹ6j]yw0}}sj+Zq-UhwoihoAoZ$FWo6r퟿^g'Ď\A Jmuǰ5N°09D4aDUR+G}JjeulWBh.3bE6@!sc'S]M qQ.8'IaĒ Py k.?>BL9/3-%YZ bp]'! :Mu^2J2LsFqbs)dEGKl9ׅA\hI+h0t8iPѸRb|n׭k,%e#K3:Ü{a$aGRz(5EKS&%QeP%{sb?#90*ip2#a&k(4.`$UҊ撂#e@+Q:JdLMaN0Љ,7Tkm3EKPVxYfBTp;IiG`)*L^[Vf%2'3 "b$Qe'EԉmaHf _.p.a]@yA( ި=X ?PDSS8V$tK1t/* 0>"%!!kE:s a*#~ށ"U{J,T0DdT%llTNb"į.K;`XX v+ eJԊQbotGWFew`!݃t`c.a ƍ ,io8SI YUe\(0Fh  <%S H˝rN?;0:A(<˳Bs\x;m:;mWW͡ckemr1S/M{{nka9M\hȩ &'Ffp_v}Q,U~R{<03GZխ{vE q(ܨHl*)ZBx1:yXzS=pk{?c=m + \ Pz֠O>yU[Ƚf*&ŤCqGY" mMkW&K`KqO "e[R2 Qt*; -]HihZ{&5-.Do^ͥn&س[Hgօv " mMk޸&/=11پWlBXu)$84u)m$cz(sڳ{Av.W$56u!$mIoRb[8nɭ9upE]Hxص="ٱ I ;=5m]Jp Nh]躹K#=wUM]Lr ]ಱKI9kOEٗl(~VgPvZxcLZ$hfp`7Zmb{h{ĕq{h AO?Q[TfE,-\nB0dL/ &԰ Nikq~FZZw>5s@?bou`~YϾn~ ߶023:ZgN𑜰>niv.0[f_HֹZm ?{/r?[-o;.Z.t VB2jt4;pp73bB[,bWoH?yu#C* !BE "euư{eB*G}B!qpag:B\bLܘBKKqV:Be|/g&<| i}ڮ}hHfNˉ 㭜v >ܗ0}Xik#~lddID#UN+aop_>>HkF>~) 2i nȰ3! IFc%.N, cR ?K߀D?[7.mW&':)_JQe!8UbuHO_; 2DZ zNQD)KA p.w ַ~2H?n<E0#rN;Fl Z$pf1u\0%BhEr&q[ЧK1R9aBLPmgm߭6]ͺiq^oуOvt6hc% 2:Ju iJUR瘢e?IEY :.-uStØ][.a# }8;17ccb8hzio2,\@*a4`pq60 Ӏ' 8@cY $,8v9Q(CqɨUTojiu2F}Jm0+r.J Zs&)G>d)2ln [ V.~RL1Z;]dP\1L;(jVhas?l 4r\i=x};LϿx2h{FB8lZc5Vu8/Zn& a zrp\辑µ$Dn*G< 2l$=xH40G# vc)t!ۜd(-?ˆT3vG30#cނ]YU!V;olgzry"f˵^|.p.g 5V! XV操Y; ,}ZS~ RB>5~3>C=z,Cm$h!پM۝O@/ԐlTZIױ}D'3㊌R$Za!(q^e"L`f:?oQK#kT$%`1dG~4x2=k[Pr߯iˠdEj F`P :2S.ɣt%PUSQGϻhDy. 0Sr] H ,ɠBB!Ϡ'r,2'a-U>7"0CgሱV8 tgAgY HA0r~ .%qo 1` q>n}Àr+h9i_?J'7J >-zh^7Q:pL&O켰 _N>> CA K'/99} `,r1u e#W-xZ򫰷ɖW8^r1a ˳r„AP}\Rg(]ƕ)B,#>!S3yqK^PVe@N*aEPh8 Y<v,jr6piӅ+m['NKˤ\<-R ?ONއz'owv<'nJJV8LhCpMkOGEuQB, E!!DX}2^YcBI=nw.7e8{gamַ]d=E..gҦ"]SPGњ;v&e_UZjhtEݓYX;2.Y6܃E!t5`!VX{~Fyn'76>"wYX:|U:O; 5jUl_c)*ϟZ("e;ٟ0fUY,|HRvq=m-_:fba u $W6SzPbDtts^^gj`,2Nj_mcafvڹjxB@ʇf^ T[R| b⃎Tc5nP22O㩁X੩ HO&z}B߁+5c/RU宣GQRFcA}$!*vH)+':C ^I8*{@Ceo==Keg$+S":lPA c+ʉx/?<=|0_qPf-G QcB*Q)V}\)(97;yiBM?4`iRLIAaN"Cƙ/NfbޅXwe ?apY|e)ɦZAFAÈ"Y NRLOCIU')cf:c+T5\f4K5.|q4āi搘tj8.{`ùI=,Qǰ˶uY7yaR2thOv|Y ~׶' eE!-݇V࠸ų촵~TNɅJ3K|}`sj#2AdoC%o+DC5F4LZ:5loqX ӘzS,CXU×ނ-vPЩN$aAьJ)C2k)֗߇rjˠ}rp}%{ﻐǁʙ${ y!%z mz'GpA >B ~gXއˬMuU̪yPuOb05~ bv FW>8{XZc pSƿeXƿ$3Hq6tDAI1!vc0G}BGalz2sK %83Z d.Ɋg.=t-?e>=i'žT<1mE/Oڝ{h˪i0ώ `!',QpXIyInǮ$ ,ϒ$$" Sa)F9WJfװE9 B$ K"re667"8"""m1%ыHsb.9і qtNeC/) #"q$#>UsZV sAOȢq.~970걟*{Ð-vYvwbGtC׵>r‡\Vs|?^ k}*m-"|Z+Kp \4;/뒑8 KC+Ǐݙ}r'GM/w#WCush3^ $.yW~ݔ0A\D)fY~%8 ~7p-#d&3C1T[as ܂%pA4_ tlD4'˭׼RBá}cFz%w􇗂*gNZj,Ffsa+j8YaBs alN9 YL k όHةѭGm? [ fu \}-ƌ.#RIl.YFf#!rN%V~D{D'{"6x] IqF257+ւ߬Ú+IFι"+$ Nm?74/bM96pR0k5vKM!ca xܙ]`eU*$ Y 8Wr92i"bem| 4rg ]1s +/g<]yjMzh@k5 fU`d0blǍeQ ==lFؘ&Y + P˥%y&>z>gː|%׷U)`jɕ9MH"뚀F5,"xnש,a!bğ P0>^+t֛ɺU8[-w\ + cGorp9!shX3!) g:[3̶0䀙@kxS9w(ʣ`i 2NMf# @qQQ-![9 cZ,3@;`&yfa$bjlhau{6ਓ@΂43W`JY&rh>[ݬ\74Ql!`` d7F :Nar3v]aks]A/M-(b$f<ګn}bEF7&E `iCnMp=rր1hTpu1 ʄA^㝼 }z9cA%ȹ( x؄1?*,oGwB :}W}a!dX b@)Jq6R*y~݁KTN0 4I2dUVBx7#_Gz.J9(_5꘾z'>'AXld]-˪7>z>Kd=i]ʖ/'m]=MjZ}gnSW{ZO¤,|h=~Rƅ[V9ɝ^._~:Jmk%pV@+#N!n8.A{8v .;7s n7Ww0 *j}7?K~70S?ߥsǓ`(Aݽܾ70陇c\t꣍&? p#v != ]zK}ɫIg8w?Qkџ4uWQ,wG V1^IvOӛ_]Cq@{zD.^듇bӅ#8==&IK[5X-\ 2}?X?(rňaDZ/LutcWv칆mOSeyBmL.ȡJQseSG6>B>7I4?u N҇goKƣ늜2)cG ydS8nIKe&A|Nq&?xQ_Bu\:$ L%Kx`T @l %kڸ [;p ) FE_YQK/س_8Sxww|{Sm͝v#1'.7}4+72s6kgJ{hv468&{pt{HqGk%W;9^5"c r`p9A"ư8!VK5,炃" Εܕ%'U.]F1<\_*[Ο2|93q3aƙ!F"88>g&}Mt,? ]?Y>>uCFq); !ThA#\(1yZ~PcV^4s9dSb{?dzw.ݨtWeABZ1_jц.''jW/ =ځZ:q+_]iƑ$E 2JfsM2SJnGinq2K`TZ7le9VB}q '/߶۸BƧ ny{}7rqiPoBNr$uG2e%5XD4`a➮]] M nk0<t6X~e-FeYbp'y 5uتw%/A]߂x "[|QY5sBt3/"t7;J0­{wK]LRV^'Zy[|\M 7ӦlE79yF,Ӆ$?dJPlGAQƄ"sH0nx )&02D&9<gS7Sa,B:C4#W_25Q>(m"[e _S=%/d)Bڰ}r^l[>6/(jx3‰~<=b4=O 4p(@> 4F˧7t(St]Ci8S U 7m갷,~!]^N]ޟߖE@~E@ޖCKTAqـt(ob}@~[X߽57;緋ٟ𾺹ȕg:a ܵbH{ :Y쯢B:$`}e'Y4pNXQEՈUQ*r$'䨮' s|'eTG-w\`m4%|O)}fxƻ+k S)'fуs+PΥthx{XxvF63Rx/.qwXoԝ 1"֫ Kv!VL 9iJ)۵9+* Ǯ$ηx ][ dZt<>ˢ7=97W?aKU!$v0cͬTik8OJu0s@1 yPEFi]W6@ѶrFb%S&zDfgxDА3)`s!`<*$Nd_VcS)J#5.w_V JٍѠC¦XƁh(+7p?7IkF{Ud<لcQq2~# N1l#FfaU0!rյzǁcN-E Z` !e0 LaQ[pD\đtaX!ơz;)$(@Pp ǪoM8 WL"o spU'㹠1kޗ &ace=-O>)"ݮ6n +ح+)}9,pN,{'[vuu @9 *@?l˲eE-Eɀ˿[ܧje}* >ZhoJkKmR.u(m~3 a,PJCuwZȯbtf};IsKYW1_1UZnolo.In2*@p40iQH_:$7Fe& HOHل61k9gTSͮ49 H#|8ۯ߷A ðFTR35]{:au0뭽<۫dqm_c}}1G;o>pm}~;ݼ!fw~ } nI)8AOFw`dEoGIAl-њa:pqB ,g Q7FKbNDƙ'[pR7.j?'uyF3ԳI;& fl M1&M w.Y$xYGճ:><˚zKhȫu cD*gQ8דwW{p+~vo\/w{GUSմ:Z~AfސZٝ\9k+gզϾ *"#>׾wOsG`gFVVǃl`XLpIGmi *xim7 "׌̢LGMa4 vN\ =>!C8@^13\)n,ń+Aao#ͱαֆ)#>  ڏٲ[oܧ^uTƛR}Oǭ\]EN-kɽ*'0ĻO*ɍIdP3 HjrLNuA﭅4tK/ɭ4R9F =%{Ўnb-)%p)|}ydA]nVyM{ w w /i=l"+D3vmfzQG a>xE(zvv΀ZݯunŪ k&wЮʊ.G7轿Pu"^ :r]7zt >ú^&}7cQ J[m~8r -ͮ?,_^OyYʓ-?`}o>zsTR泿֞_ 10.s1bh cP]eU)b#F(bqӾ ]/gOf nH ֶ|x~^Vf(灂T!ݰq4ź} \&d&HVpN-%pr6ϕ(cI㖿}0; sY`&Ta .#cL4lUS3 *"hf*F186J4a’(4uLp[jEք@CJIO6 Z!&VʹdZS̭cy?&O\Э 9Dˀ\r[(GQr}BgE\Y&?*g"O{O?ի2˳W}5Ԇ*>/0Cc.d4$1akWx9i(YS5O&'j'TC_kMƗB[sU XCa3FpSP$gu3W'nl:v%LA eHNHHiуQZ;346u/bi7cf~o,W>=\?-Rq |EXuXdM :MD7WER玊( .J*xa?3')VڬE8XfsDǬ\p^LҺy(WS+ ⬠2,2F^_!)!̃u'2K s9gdյiYf- p&>}PCc(8Dt\vRP <2ҹZuۥ68f$L^%q~3YL14Ԇ~8s:@m 9rE 'Ld6sIDSϚ)ğ 0Iei#trk>l30fw%;kW"L܆G߃fL/ۼ.e9h%qB!?du/CbEN!2CfL|.M!qN0@g=9d49dSk OzA;䰑) RTgY-3gEWlo_Y)Gzzeg1ς?bS jYz1bx\+'HMU QH0 ;yvO]g >H1>ȿFdžuC3淛Yb$nK7T&hՏo66NsbȔ"݅ʉd#>!wހ})<+dF2 ( }Z&sadQ1M}8a (-m q_?}57P˷ v~rm?V;϶ h7BHCL HI2= j&bYΓRcx0%4Wڤ%'n|$YhHz9#,)}"78SWQo7Ϸ|80= bv^AFQmF@DkSrk|n u 3S04XENI1 Vq01?Zq8([z^7P:܆H鸏;HiUZ$qԏ}ݑkk)tʂ~s]Ědi]ւ9ƀ|?\s~◑fyq̩pa&@1gM5wKӖQ8cMv}O_xǙ+||]ٔe<$u{ Y6u8G+3X!d6a^L{WRg$ا$u2J%U֗QQj02k-Ӓܳ^FY_><~4?O  CժgޑBwф׊eXχݾQ1"n_6DR'[ 'emg^|Jݥc[Eq1\w1yb Λ0USBJF% #ϑr!`/|%#JD_qUkyx,Q6@AwXI.E8v>Wp6n5Xl|xQXuXf伵]#҃ߕT1#^6mI0fda.}EA=5r'.'1-X;+yyx^&x^E4P,nP!@JCj`S/6UQGϫ6fqi\$-r?#=z{S@>q:WSq(S36Ӟ'1–uѽ3 Nu x:/'BMl8cPczs>QAb -ٍ}&|#A>ȳ|#!{>{>ȱ=*$Ѵ}#>H:]AFA%A A φAN A υN |1LAk8!*<;aNI(eS~?퟼'.nle570~1هv- *cXpH> 7;0ۘVLjW.>c9l[_"{SZ<0+H(*154wsQvln܏OS<_tVk礦yRvj3O)r'RIurÞB Z6uW|Ž@3bvC|wʓΆ?|6R~sWSD*8,p!Ѝ.rTӬkـEsI(!`M)!b$.))D`;zwj^O2ă+ߡ(X﵇[9/W˝;_Mﻻ?D;h~_׳bdʮ,bÃ? IU1g/](\߾~;]U^ tecЏ94_E!;g++>CLf_#Ցۛ_-;o]KHB \)IL.P&-hҟa$|UTJHRgEy,6Pb8s6}!H!t|P$".l(s3Y)ةL:'oz_v=ٟsv юLn5N0L3HJⲚK\W;T$)=e[eፌ'e2h?NQJ!TwtU2ݥdy%Ka=K*Cj7.^bv?igY2! ;]/Hh!fDʎ Y2w,1ɍR z7%?`-Ko6>ݷ-bߜ]fo] ̯F`b٭6r?n rsz ]i=W t~eɃ5#ս|@j/,gT a~]e(Hi0Nfx Nɩ9겵 Er$!N'PM4,\i.rM=_\M=LpYsĔᬯ~TL3acPEb%#:z7k:a7 Ę\1}հGC;|u500q 2)E^[XkؿyQC#mHW}HVF!r ߸́NK !ƣ@BVXXR8FP%N1 ,J>qȨZ#˸$(Y$v4'EgV> &;& QP9YU jpN86bD2sX;HX=2|>Bir1ǸL($7`gaM0R୘P+j7eSE DOPt_bm4trK`KX:`qhkWTՎ#d gƛpy ii\mnv?͇ 8r ~X޷ۭTgϼ@-֜-@w ĝܺ/F[L&4b9%ebЊg7:LN֮MU3|,{2ͣb9))Z1&KC{1S%.Fa,za4QgbFx1_$&W?"jOr[^ƯB|f=VCTT6>!Z;e|Ӗ&Vz7@nw蔷M.A {XM4b¶+XTaEe$& 5 Kй3`$)$Y 5Fa:VXt5┏)B"I,mX3E*5R}DOc߾>Sxy͒) Lؤz:0_ɪdzV.V)5_IX˟PZ>XK8Ŵ/Ĝ]~잣P T{χ"@̛ +rZp8>(][T {0џo1!׀7r&0Mޠ{%M(hr]<'<ˌ2++4Ѩ`RKeHV$ ق Ee:bV@aΤB6k.*P3d riX!s_wT dUnoNa6nGyۭ.NCV~Q;YF;¶!Oi~&=?m+_c̉4Ês q^6+@8Йd Cy!yǸ,w| Xk_ \f *'<&] #HV]-%IQm@<dždl$'d짵)k&BKi,e^Ld2N %@[RT3ZQʝ iRW  s Q_r遅T s)K3%> L^ &IQHBYvC A?(,3$gѽ+6QD#hMǵ`D; E#%PQh%+;#>yDh.sq'3nF.ӈX6(-4i\(E9y* ͤц)8>"(qߞ~NSDE^ Y5xCD/rfHDOnhtmr4I"S A;ZCo quVbsz /.etp7{/ʇ(^K22|o 86a!jLtޞR3"qQX$9'(#aFs]>7UZ|:VQ!t!(2R,,+ P[ ѐғDp ׊Մ8yRH3 O ]Ώ3QǰG --Np)H,cYXHA)r<˵AI#9f0efRd 1bEiw7Nk)gHFP.4+8ueBVpa{~+ZC lQe # lra*YΗJ9C'Xd2l ]0n,IawQE/ABJXaZh̴ƀkU"d]yDdf\<<0psswss IJd4pÚX½O w/S^@^& aTДg-+;[&}ZKoQt+pk <C@K d]r :RSu|t$Q)az!Ϝ(,$ՠv/E^TdJ!@0#xٞKg+pdE°F/N2K'auFd TB.D?} pnR1 [_1ƜXjs|=fh9ה~ABO [ފRZ~l.rckFzXJs2ZЂ|f3@2]CÏy-_1( ܞh?nTSϵ"جn` _}zZo}w[^ !\ʂ`yu{<|sg{?isK䳌[G芙Uv#,#? NM_UiwO+VT:ƏP8}#ߪysFǭGШ b{{۪L8߹#zY'FMF^|K{Wf!xoIіQuqE,"kŏ;fKHp6}-{lL>uN<<Y/%egpa= 3!\ravNJeg A SJDh9QU;r0:[ Z+޽妙%NQWnfץY$gJ_bn|{GgT߾ \yk͊FV۷8F(ePszޣg:gͻoa d*g$H/+n3/7v]z46C}Uuq?r DםgI9o:"Jwg-ʣlUnI&-)OR8u0#RX!e1Z{mbj&iyps頚serGyAR<ɢ ٹ3!p`Hi\1A(DK!Cv墻W1݇lh/NDŽO ^,(2[hBϬ銂4e( 0YR-)ؔ9s2!ݖqUYLI;U,Yar2FF6(a}ReYv6hzLẑybѻR2n(2*-m ӕä#g"אi%(MQ'Pt rShJYR9M.rD1"$>6n 5F9/fXK8m0D 37nigD-:PVZǜ[Md)KϷ&#;.I,4qwEC_׫8.S %Zb9qe1UBQBVs jm) D(-47eN `'*zì@):P">%*sdf B7WY1Brt7}pZ{` j$K^icj7vK`D#\f#< LR-6n2|?Bzx,ѓD `Ɣ\*":*BS#v/lJW}HQn : uԉQd)I¯FŠ?Ӯq0O}CA˾)hoM(}T(4hf@(W4runu(L O { !M7"+QaHu6S! Km`vÉfb$)L̰[t2 =]NVd͡h8V01i1!0e3+*"!X8C^֋j{vU"!@S,pwmQ UÔZSo?C 򩼋XlKTH@ G %^MBɄ`l"@9{B}O ǢYaLB,J-\pE(>3R5:h$n*J (sf-#\Bp&}J={xπXy do@ D0`B=G.> N"-@ԙZ` :,Ñcl@m*zdcDc "")5@`JY@02%7&\0ʛi$ ۧ ea@2<|*k8fWS+ȅi g(7;]NclFp>RDɖC-88"e#>l.[QX*OPdW8((x鶽jh ,$@8r %qdbS4"Q8>xcSTyˑYE \)"* '\bXƐ#uJ*z-oN&zG=eP3`M~k8S@!UHP=h9/2 YŁ2N"ԅ8K3$2ܨ dmHL 7Cy`3H$pXM&T > ?M]pTقyR,%sITgJXb!uܬ+itqK(D +3ַ%C@AQ"eL9b+9bE8 N Q3=$59UN2!( \rQH~@f>WDX:ę4H^3ӄ(YRP?LpWGm @\+ űp{G!I. 9S` %j*ZFStfPO8O"2]%RT!%n)dF֛X9*bEmB7F>VBPn- 0P,!2vQ~}NaVQad+*%%≲&`FN1oS+( QFRu@ȑ:ejja(Fx\ ӌ` 5Hiê{mM7)_^bXCTh;u Y1#ª4/پK-eң,L #ǿX&dze`*l(Ѝ{3!@BH^G^KN ; =Sc]#F2Ai˱06xY/iJ@ f9^& U׫!6 FBVN!ң` 9dn1EB13W 4FT=szeK !A'RiQ'=Wl@èpB!8" ~F/FzX&] (0k XF 9d8P!o0WrP\yVsah$a$S'Gr \dq: 2Yuk,÷8n=Ɨr褂y++$1 VRX*06 8eNdh.",m=RI/t<*/mЦ} U*Tʁ% ةWb|bDkjj9pH.m*)Cr FAȌy4a>t#0>-H6"<PJ !( dyzqJtV6c7 _-vԲ ш eLy;ڌ^L`,UPXE,K_؃tvhrV2sM›!D*͕bx)bo$Yخ$|:W& sڦǯQޖ(DǪIeW"AK SP)%,Y*G#iCy(u4K] f>Nsc >&ѡPvUq)Q!! )r,;|!dㅺ^03 ".": $sa RCaGfnCU-x:(l0|˂!9kɨ7pZ,P(.GYf,zKSe1oUrΏ'p_[|4:Q[Ɣ @ :R7)bS3e[[i$qRa![h'#ܙiQFEpxa1BXD$]6Tp(ڕ@f\`$5jVU *L. Z4I5oJЗe6fQfajx(ɝ*!V e} >-0E{ G!vٮQ5X,^^ǁ(G-SD 8u%pZ0 C-Y>AeeӐS\mmU+hB^7 OAԨүt˕̍ȰVLE22xYW`h,Lʀ#yFSeӲ> MBbIFBkRfs#7)x1Τn`|Ť:s~ 7@9s~: 0h5" $1`$ȇf$W`% Àp$*'6#5ڏj_Hጣۻ뀔%H"Ҩ=F7Xm :l*mĀC8䎄S]s>պ g)8c 33D"7SJf#w|]eMp΀}r9&՛۹.;5η[ۤv4uϼ!_UЕܾU!$-dQUR{xfRD+ I%enoo'Rs fx^{$W)wq>XR_X5,9fk#yp1n(0[{yDn9["AH_/d﷧m) jpBiKWY^+::ZB`qKQI9.R/d;HPIܯ:,p i!^>$&Kvv$`Y=Uz6ϳڔZ?U=c(&ܧZhƸIzW#,.>aތ^7; }ز_>uG,9{ gC4np{ɰyZ/CIS !:W'ConS37٤`SNjxSOig[>%]x^s\w]Rw*^:JBsv!c^7]shӫs4$}x{>o$>iPOߟכSȧ$Ne)j)R&ڕ]Z꧔#|v/QLS|MjkmU`mfTVMLCQvT~Q ԯyU}L UYzXuz\7yJ:x:ϵO\n1yzRq ebtUepJuˁV.dmg|PX|Gt{n;3_R^kPL?sk;MlMj(/u7А=nTMJӑMRݳܒh. )2Ҏ$3 :"ixbvInNJ3;;J+>I`-͒ zsB,K!I{ f@TC\=˵LW*z [hCzik? sB̥M?{u86ɳMFEQTqZ2K?,{޸`+|d/ y8KjsJ5i;c?_ܞڷ]b-bqGRI:[~U#M%9\M1ь#?R(Pgmu#զv)& I7lO -6C 3TR)/$3)^`HWOթ9mjqeoU,=W*̯P~}M3['{{xYlI%x{GJbw#B=ŜҫӥVR&06є]Kew5~h.t-35QXҝ&qFW mSyPkqIyyRh5n˔B|XXgBÒclFmK=fmiF0M=RǴ/ϷaL&D'w؈d6Czm>uZۧ.iNC @T^eA,lH s۞3$wk []Q,TRzW^.ѣv~ _0wOQIL]ߧqK5}o>upQ ϓ5έ%{Z1j>j!VhjJ9U7KZVgF>y>/ (rW#B4c"[~$&v'%s RL5gOA K*%lRzXR_ vB=Jk{U|6}.U'0(TnPK&7&^p^C[.;f>ac>78*U=-)BjP+2mz/(>TeA.STct{}ixBU]I^ k;F٫e9N5]AP].ӘIkrW_Lz(oQ+ 5LV}].ٗȩrᲰzR.͡4rŌA'b^D^u(Ru5t}Z ||岤-אR)B Xwp  #*$/A wOVz~hy'/w}7v&^nJ@^qGTN$tҏW^[ 7brK'k'\A:r/PyO,hzknk6z_9rbd2Bm^$K)!(l5<td LWMz+2Uŧ@HiWE[E8a{e2?ŶT{Z9^>V2YMٜNȆ)^$gC 'O&د |` Z'gʦuǒeKRI( UNГ'sGYmr&Lxi%L1Q-:d D+OZw z(2Lڲ>'1cdaT$2ae/Nԏ ˢڔ/ɀyO@ﯲ8ٟd/ΘI+h`DÈdbtP:q+=a Y_zcUn} ƠgНYB`lNJE>HBkKe5<~ ]`AѶᩘOuwdrsy:{7\1_?5>^<Ǚ ܻPf_2,!x+'vSy(C}KhdU6J!b&4T RL2U.PfVDcGjI,#BS YCޛ_& r-WfB{@>Pă= lħ KQI9n&yÌN8iOѭ~%vF:J7ri HH&r T;%D%C}cc&AJZq!$zjTe5%*GT9h2'Ћ2`^-aH,W F,E>3Y#&2/N¤^+y-uKƱQQb̶<3~/SS;rQ(y9l9@AP >zʆSY*蠌*3]F Žw$gm"9 w_;y2e_&).~{E@.[m ʢ,/eCi?pXҿ3Ro5ov;m{x^rar^>{l!~NK=@G c-/eOgQr.ɲl#TJRH%W_ޓJ)rIJHR5>>875$ !VPՙ^zN 9S(MktjŇmF Bg4%yZf[uOa txX4O)" ts 9 o]p>&-̡\.wN޻6Nh`O:r X V!47 Ad(*zìr2Q[0`ء~/CWlzb)(rwJp,FeQ@rЭrɚI;%*6moM~E_. @0e,[3BڌI+ ydX~O>px`ܦ'\% pБh`8َN}G;ǂ/mm4e3,7$)+ sMH '!cݧR/̀CˠZr>2c!fStɬk_߯3ts,SQ[ ))/6HL`uVmYHH1" ,R'Vr-|_e vT+oʫ]EЖI  ̐b:g`̤T~Sϵs1xh.๒\NhRܦL>84PZt>Ad3`#gD ӿ˝b1dy朩(lt [czKxE{X<)gfJ$URr7qb^ALMᗉhȢ ~#+Ja<9!c@ۺ]OKLv他_AVf&2 ~M2xZsҙO3Zo݂:w:JwQցk]idiקk?u`t(-=݃PͅYl(egxO˽LBc&-~D;M,8GG\ؙm1/!6OaIe/"MNF/7hMJ0/KeR+V{Fh?i ^ɗɨƨ]4lΆhKVgM3\x}UWR4,hF *x v T+t܋7q\ kPi&-%JK2aHy5aj/1dy(2lFQ1j|  \x96"hUu#˦̇:+^ɗI§5"/p{ nVyٿ Z'n}CЋ3nZ7H*axHƔ@*Sz$f)5 _N6~j@(:>5-oߧZ^AP ׀A BV*{CPK,'\])kN] f̣!>-vCBZ{7$dyPqd9 W}C»NR/HA;=IEΌ 8,\⥼^zmzT+{OvTWz-"X 1ZҐ -vZl$N*wC/ϔK7SBV̠>*h@Z N[_^_ugݠ\z%0dep&M` bt˅n[ev*oU%;{Gn:<{bf|ݨ{%~/iMKo̾aDk>GH}] vM|<9| ڠ(I46ϿtuGW:۠o͗ozqOa7qכ?֏nv^6lVLrHOmoV`rM.5gyN燄~x}Ntݏ3k_5H7;TdV*X2nʣ?Gpۿ_ %PzEG۪l;2(*.mRo}J]Jo`y۴GtxxPuۛmZcytuï ĈHb\]7;j;D!\R 替)^Ae )]h jֵ0>J}~w?5Y֥V'0@=-ǾW71O-exW{y8܁sqzY=}*V:5m_?=ybwݴWwmh6߁|^>ewy*W-]۴f}|iψq UMi\\z7nߦVv˼N<%4},н<=Aм/Wi7qȫ__H( CgG!]GjXs 2F5`fx(s[ne 5U[UɔwOR0{e\a\aL]i;V̯˲zj(ejiL?8&k"ceɻv⃗ HL4rGlߘ |U<߄p>'u2MKJe n&1bbnٟ`_#Y?/=]$~ed\. ShWtJUB2U)ќJL)TbJ9scͪ,mE%fiG*1W%ډJ,Gjyҧ\+5~P\it- 7ggۋi58,5 # laF!k1lB u }]Ù72:F 3"ڸ+z  +2$w];[j[-ͷ-BնTڭob9"[B."nۊ47YSڵ JO 0y\)nF6~B}JekC/8<)%cr~ٖߚ9uWeáUkJQQ\pO`]դ!20@fN)??0j_֛t)a?> ޗYoWRa㹑Yc oWteyh䶙Du[#}_)WS߿x}? /)k*}I׆k_ΰ^];UUګR>ra$b^DXǼSo63Ur?xDa`S\$S9eLVAE{8B{ $KEB 䴝(3_WKfo֝ZŖf uκݤJ)(%y>[x'ʣp(\ɑHddOO̜a+TSbJyoyʅ#K DƖC4RI{+#SؔmM^*ER^Std c6NE$PbvpB479'+krR(@M91I))~zqUs0´Rjǜ*W(cܣ( a…Eu2Q*82$Y[{5C6S6Gc#FPNI# &F)Az{E! @Gwt;RTpPNV[i{/8m#Zi.\+'Q&$IRѱLqCoLʚq&&z H(-D93<;O\> _P<5hGP+($%Q}h[dmZEENE1^y1xc5GI 0rrOb\x"m鄩v9 ޘ$ Zaa`uٹf4X ,Bb1[7JD CWgr :c7z4v@>N(}u9|!aw ݣߑ;|-yX|puiEs1_Owza̛W'V{d?k?:ei#oտ4OZYsrD7SMCnaIXL2l,sLJ> 2fT4k39Ek/б%)~a\.*p@g4v<|ߧ|vC{YOkt&6|is6O򿱃tts_cީꬖ}Z !i([<~csZFs6:]=s=|N{wq^aEj[0|NS8v'ҷ]xmJ.Dzm|C"{w4~c_/νWOp$1]Bz{n'OSU}7~_٭0x_;] BX -9>o4k'?D: %4V)r-Q7yXMY#]w7RAOikyF :;wY;7ot~n-\b۳7fP>ߜJ3>^ s5 eoycfo #<}9@N[NZ1_}Tғ*=@~wwS0v yc/ٝ#?:״߲S*APJ$wԌa..}JgwRsJv;#N+WqG٭=5Cj؄Oi~/CswT߲STסd$E#Nv897 ү@܇m--?Eǭ%E.'6KQqFo|cgjd1؂sIn=PNr`)25G{/./#{P԰enVϟ\4,|͒UF~If16FK5X=~S,뾦~NN ÞDxpGLr{aq= O}WͭAv {eVC~OW|=}ZO]b!~N-ZkK|N >U)3fp88514679cb03d8f51e850ad3639c089f899e83407a2380ef9e62873a8eb1db13-updateinfo_01.xml.gz000066400000000000000000000010251400672373200431220ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataTˎ0WX ;i 9;j[ErpogYoG3u,d\0 2Jɒ2An =3"xt7]WW'ƥ[܍*)H2Q-^?=Acju,eCsWR@CHu@C)DDB1ѸQ t>Nn$6V|c*'/)]mZ2C굡|rW} ћؿ&ot딘5'^J +Ø , =53l}4B$ ]\i.+(,2HcQOv6J)T[pk9 F׻ (r}y(icxp:3$\X1Ie z&TV>8p HTP1Au ᄐB%d!ʾ{{܅"5[0ޝFz I˯Dx~":nHsPЪ?b`a.ή:8<^Up8cr%OGֺv `93 )3,mb^ޞ7,[Lŗ,^CѠ{>fܘp!G,ٵyn>T Z*3#t+DM= &IC-=2%WEnNg +Z09G0!8@E~RDD9@+N'e!B!m4CAP`"KG_rs)ˆdTh",U24fì^ yK0C5L!w$S 4V`ba5a4fdbb20e7b9b70d9a9abd974bcab1065b1e81d711f80e06ad8cae30c4183-filelists.xml.gz000066400000000000000000000013551400672373200434070ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataMs0 t L~tSg핱[v-B}%|=}^ym[VZ-X*Q\SvvfRRY VӼ Rnc.^ϼ,e"fW5K`i""8d:I$GiNF"&tbE:# LR)Ă2YrdY`i3ې7MMlvdF%܊rb_ z{l.ʖdÇ5r'?/ CgB*)UsݥQ=e;mPta丝r{+@B _u %T&h.(E N- +@Q4e4 ΞJPN)v*?5IM~aO5KGe*Hs^f(/ m5Bp #Hh!-a@,Fr-(GU)87Q]iSmE4%7ԀV"7f1 + ‹H= QSL)Pf(/`fp$q4Cda<U2Ycv*ceho1}f^ߖFHItU'Jٲŭfy6_Bq=Q4=3;c8#JU~+_ԵM>kjpƵBgUU)W_~G.9`Ic>5XN_ r%F''}&7e,҃SVB>'!ybtFe1@\7g(.LcKG{"|WT*u Pk$987)9ʣV(Wδ:W}E@}ާ>"Ёl-FWD[AtЁ40ͤa-۸vǵY[g?\꧹=Pp4GmxPBH-!J$젗_|mʯ; "4urŀEO d7YW 6-> M!6=Mc+L#s}<ۣ:l(<oC» ${",ODZsDIlYnHi2(dڪ أ7uC'"lRnQ4cb0f4b5df8268f248158e50d66ee1565591bca23ee2dbd84ae9c457962fa3122-modules.yaml.gz.zck000066400000000000000000002512301400672373200434170ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1 RLc t iZ' տ2aW#]\^T O% BpXZ}}PJOA{ѷIWՖ#mĆ4vi#x(d )ghgER3j!Pӑe ^(SnD{[2~nko¿vhWO8&j$KjETc <{<f4ugpA | ]EQs ̙@^&xw14;5}y̳WEJX؀;)?$}`9(/]jRj_1Ўm oflmȆ13s&,ΡtE (PG<dɝb6^ܥrZHRMU{vs-Y&9G~7{gK"{bLqpҥƸ=el1ɪ•P9L0Њ6ڽOr[vyu#z*qiabMS X׵NO׀4 ލ;!C_mw-/+7!U%I,Ԭ>k+UQ3.  ug7=z)ň bQֵRZ|\Z*”}<`QjR !R 9N Ro4fNپMbG"WvrmվOft hCi1Y[^Ks֠|~VrKrYsH5VnW^ZбKU!N8iL=9y'zBU4#Ѹ—iE֦O; pBlFnjڪCX*%R@ X@AA4 ܓEĊo Z@ft$4Y`ux~q= @h'poQhaю<2&6bIK,4cIXWM 4qYi$ ƃ4x7 8+ފ3ފ{y[wh!sT()4CV3TrlԸ2qI(H="Bi,'AAꏅcI&qx0*XxbĒ#Ƨ\5WMߺ_m=/`I(0p$Ea`imWa`aGq,$ c wl/yi8@@<Dp Q$qawoVaH(H0CQ>Zap].L|'"O]bI!UOۿ!uB\"$ WSw[q/ 1/k*b2jߚz${CVbs=wOپMiI{koMrXA r?1Tqzk]?HK┮JYwRWooԧ0W۽=k-:T^wOm}?.Xu@jbW=UKPQ#庨yk8-chU,B>c;6[JKJ\ַVWzqu7KqZj+kңkWe":Z_2._|sY{b(xi5s]ZS u<. ur5\Lڒ*Gb߷LӭOj UK;o9]՞7VK:OK>[%^Bх7}inSTcnѢzcDjM54 oX yzRR*_ussZ|jn#W0ÂR5K/)Fr{cDJZ4n1Ty[h[c*E:jˈlWΪ4R7&l3ϋ0*PWG)327Y{Er+zmG !kp:UMSsll\ j%f'*۵ӦJNls1;+֎B1bٯ^s&r%B0cJ=lNWՐ~2 ں0}W 9-Ԣn;VW^y/:3bEV~q]B)t9ga?j"mXWJ!#&a{:w[H=HQj׎[VZRWM{culh}1JLR>*T##cͱMMz,;϶љ$(m A8DI)),煑{!' PhI$Ot(H GBI Ő+ԩPZjH9V%DnX8I!  8@LD̡hh$ ƃ$ȶ7q4$%Dq_ $(Q8gC O#Ʋrm1zxX3į)PB=}= cY,8kz l8e:daPCtmKg)_ %Vfp֦Kv}koʈ`VULkdUˆǛrR5qt 9En[b,fuK:) FJ77CMSZ06m[ {CM_BPb5>tkg95⥁Z%,!ЯM[MGă:̵cRL}_&p09K6Jjk?fa. B˹[{*$KնK3Ojzw 9:R>~&zLivr co)[ ݹ 1VwjoWF$0R7Zkf(%doj'DjJ۟"Q?gީrm;wnL9{kOCjkv?sJŏ^s侖.1dO#]Wk]NvЛ7 o H,CBJAogVD/aX } G,K ǓřY5;SV u*J4_*kFҾ'-ukn5!7K%e̾+SB)66^R.3{g|zu  }m>;TrVrFd&ƚC,%Q3mqaXU4kubŌ:|Y{Ecn~&J3]}bF4Czn.E=5]H˥%ahYg-fin:Mc&W2^S?֌]J͵w=ߕ׾ǔ[iBUX[s⊢ nHBr.&y[3uUrerϭǫQj1k)-*cʴ=3wt{j JYNO Zoz:FE5tg W^j3i~9k9n%VCO}Ӫ|+}Gx[(80`K  QF/Lzj1r^zIS ewbD̊)[k)hxnMl_: p,4A@$g!@B7(@p8}C]X Fh0PB0hPK9drIJ`xD408x44~"$ɨXP<`N*pKѰ,MI<0*pX w$aGDaa6{8 V6 ;0<$ PAģ0jM7е_.P8{SNSV> VZ̮۲rms*ujJrT%R)Cb7!z![ԚVTd JkoW!,2MԳPe6H@>F #6 F۴w`]cF{XDLPlYd p UDa#ĄD858?1 P+PmT啳$~4tW%sVf+K'$ Cg⸑ 5GTnG`BжCK-s^er9jv9ĎӻP SN OpyT7 IlGg`Q(aȉ >^T7RG5xfjDždiI@u?)W~MDM E֝:;Inat31ɽwdcP3*}2ՍG#e{fe uHvecw(LX@H1v)G}ǖ͍ƭSfL IgC_"MGL'߇ fRf.8:lzߧX? [otHb!OJMct/g8I5ذ'I"X̶rƈ\IPzopn&@_< A1pP3VMξ}fO24>CaMCNE%% P{fqC}x,A N>;NFd<=X~6  *D=YQvu[KqNo198: <Ч fq>FCCFM(?xw :bz1˫qz[W|Vp;Ph 8],@OyME/-ruAKw ?_eGs!5}yn;(9vp&В.1E$H!$;3D4Gbb>B /1U5TPkhHV+ר&I lvG"rrF|zfjnauy+X@j|m=:G&+᫂E؏JHj)A5#͡}10KbIHa:bUiamtKɵ-9P,0#Rց>wg*l]*c᡺` 5μR \!\86nԻ* $m[v?%jlEF֒Z/I=+@"YwK|༴[sұq|lj 0c@rPO:p(TJTw+[BLGV5q /z_@f UzmMDY]w(t]< u,iuh14J+#D"X Ljs`ŨR#m$:Dp$Vy3ݠ4%X Vdvhut|9 pI|^h뎒r[;z/<fZid\BiddV|hҊcnwHuofH3l! /WTغBO,c~á q{H:ţhł.9{]o1`r|"MR?2 B s.ʚ *bm5Ćwe`vHSiLEd+-t1{ cu4kAu$ؿC |_@g֘A.uɬ@-XҺuQ8bHJO8I9AtVIf0`;;N-s\Ep?VxNJ)ʾK鿸Z659shDAaov6I'pb.B bzwS&+/gvD!/ Px+l,[ck,X|Vd8^W?ĜNyN8PzIXɕccr=0\EˎC}$ 7)~hD`Apq'8Ng"dB0V!z'?㎑Mg 뼆P•BH-,fȃCu=+-]^ ++eu+ [_6ߣ,ٴPIay\7SCK #jyސNu{R-x`dQ`2 {Ylr>jui<4f.60o6 wۥ5w0}ʆ<XsSoâ6DQ.odOL"T5HܴATD/t52C'^%2fhft-Us+)O(^[ >Q/58qjo WV4Um Aє ]ulzjr xNߵo]vi2/`LeAb?N.r0ߩ&Cf< #ߚmAf4b,tUwfdgw[%D  - >uj^b'޳ݒ6e:}xQ+J=y%?%Q7/L}N!B" $; *Y ݪs+1̣zk>q濔ٺ.3%RDܛ7SR~Zf`5:n_^NDo|'Kf.pImB^?QB9q /*" 7 mTv<ς쎋`ՆaӴWQ-i\d |5S>G0@sUK+oMUzjk(D'9p~:ÌnqR8gаDDӂ к^-%YKX6Yk+S>~[w.-ƞAM#=o66뻜Oa^S1G3V61dDh͘:h]ٔ 9kq5WAZe`pS#+}v{mmHyHŕ2({};[[ҁmp|48$, ~,3"5fLHdkq146T$sb 3",fKMqIXhMOɜf U WΛkǀa"mI*syN\Oځ X#fpz6j ϫ].ϲB&c/2cNx< -V ϱT0ש*`EğI\3ͽʘC3~LUPdX颿siOMXL]kAGwC}Q^qdK)Jʗ?5 gK*>zUn44Oɘ1ŀaM6f_:-HJqv! Zμ tB.f dKk0-(PZNO¦ X\^}8KJ@bL噊%ϐvbd%DX#Y¿((e ]ηdk67>6a7^:UbHkb&^fP )SR^<La/gq5Z'o)>3ƸXXŰ2FڼG4KpYh?y^FI4%FK rk&Ɂ,Q3Ȕ؅\?泃8=+ft(y(V 3h6AG#5>#r8c"]ם4oҥh9sv X`"(NQJ!'J7F!bԑ&EUƈ;`Xx9!H4Tc 3$Aq!E!)V,\b>L V#.fXHÂr'^a\مDT؞x=L &  A*]0M2 e͐NdDQݙ6:˺5^'WIpM-#,.$*ɴN̹uFlPOĞ%v/c%qZfLZTvͷD Gi3!j3eO&ɶUL*awRCq;-&E$!MJN"vO36pl( Cd>6;:dB75 _.Y哲bބZbu\Qb 4SlXE.+D,sf'8kܕ\ajkLG{|PFOa(9{eY isб 5|sm- P }j_>*Rvy(g 2'pg Xxpm6<"G6_С_hu}P vTGHqrչR3.ahlkgG1 UE&h~Fhh:oR8fn5(^zEvEqr Q^.WØ O94ޕ(2YZ+a<-iMZo+b:z: ܶ|Sl7 sޕ*^QƋ@'U~d0)|lbvsq,?&V G22d.>gv|(>С8bDtʿ)b'бѐ>l!V6+Epe_clI(cV( X8!(i넴e"lQv#ī4Dzg0+T̄V Gp--Jz*-Wߥ\ːS?e}Ηşcrˆ+ (\y@V<\K;a @2NSvˋO`^a \,:e a8]'O@s^)t>Q))~c= P+cn~7><ДlѬ>pQB{+w7}*1BRbpyE`ѢDIVN٭_a)//ȱ@&J(OӐ"rjrU%L+ImTZJcf=) ("b_ 8TMG#ZK. &,&1T ;U'8cI '`ϒ9R|U),B(6eآA %ֶaU2%} HqTށ~'/ВZ绐"iD TuID qXsiR)ЁQI"9C-TD"PM X$*BxYw jt@PDه%&f+q,a_2缻N9Cr0HDŽT &N @+JCo 5-u2'v!H~ꭽ&C%wja*Ka!!$?@+Ne`'/>uʍ`S"ݢv}* L^e)ndjvtY9$iKz ^; dN4Y {ԬyQ<w@x֊ t- ru&n%TpYTzV甇 R%ԅʷ}E YvCXO#+߼Xlmv+dAܷ6D<jbя;ΚN MDR񗼊ݕܞ V7*"HDN. i*9dZE +ݓ$q /FvJ)"&Gm6 5&tSdcD\E)0B5}8N d46MNJ$Pٓ[k؃@Wӹևp6UJ\)۳0Ϩ+6I&RwAVG%3ح1e12!Mt*f[ U~hEh!R "VY9hm/`~/j&ѠuqFE eKg%z$fHnh;Ԑ+$iU!zb3cu}ɓ7OI,mkPBI@lU2QY+H5'b3Gnjl]c%H*{gD1GlERȃ9 v9m0@#(}n=1fhu\ ٪\ԇTވ# gr nkLc'M/e2M m xdpf+@^.%rX'T8Ť"(|H GiOYQ2MC-+pP]~Gt/)(̋bp9MjP- 3jT"π6>/.HI֦ew#s >ZIegIs5WL4r,6p+l P^FMjCC$T&e(0k-5m/ z$^J͡Ň>xFnR#5?{@| ~f %g lRn0M!"< G<uuF_KlF5iHƊ3(LOPixr'nؿpc_,YԳ@7U$Q+tEoFO K;f" #Vp!}#ʃ{~!R'9#ܗ xOx 8 w[@e*1 [j0TuMC߱,b-XA`4E 7}cTIHN;[9D6$ꗧk?\fjYTk7 Ea&36dtw ;tGOKqYm҃(t lt~y͙2DEcVMoٵȓ3&+qڦJ NP]F2U8m@˨ՌXdʑY6^'T!_hjׂ> pT; 1NˋZh eDtp n- ˴qF6?XYp,il&|Cڃ7Q~3H8u@-L8=]Lle]DZ#i'*Y(mΨ¦ոE+o@es~Ii :!GP[lɬin})ؘeQ:s*! :EbpmA_PdI 0? Drmbs4ZI>flS]QSw2,E @n|'C xU”u5ɅS ֱz) Ir(JZ(ؙ#N$d\Enxg3(n>d/rUȏר30DdXU:9P[D ~$pfꋈJcĖXpAOc>F&EIWZ:rwF)XsB| DPhRw}gȫUJTqC걵q:-3 [} _HH]daJ=&ѡFOD\嘗A n`(-;SE-Om<^Nٶi*tPDMz aFJqd>IUXtq`(YgT9F kϝdNqQCZW f}X& O9+,y5mK yt&,KH$J5~~fEϑ o,&뺀C`;D֢`X3/+]N ,9\O^$OP]KwoDp}# =2bɷ54LJ`雨<Q꧄'P;F,)q'Xn%$Ʈbj?aJ#.;y 2'Z(Z.VIXKj*@2K*Yn+J^WhrBPSC?ڞ~p yBtc V%|igSgQ*V)&-3Gɵ~B|څ;t׆ TZ[^7t q޽e f:OSh?(/fy1hx\Uݲ۾j/2Γ ];_ ".}=Nt3U'BGWiY~l.2mH%:gwje,BdLYsw5Q1 *ӯ֜ln]::Mn2|dI7)/3oi7#"Y=J^_G3!\]|e9g]ИP.ܮu*1o 'HO9 ɻĭ#j=*4reeH%/cJ?Qf-:̩Yq^a֕LA|7kYJDя hI%](!RJe~%*c789'ءQ4zX/U5]qsRFG Iu .:i-36Lrc",qu;Ή }qHDif$Uh[1Ri-V}龖d:/ĸmn,O MRnINjr7ѐ_"̰XR%D%P8E 

|uYNnLj1asҺ  ⺭~M/ռ+>&y48+`2)ZhYJҳwf0@;"mR=ZrqZUGX@2Ŀ+cYԛ ڱH9GWZjħ)m%% Otg<[=u*(e D$cXLh( HDD8"P/%HieBטWHa7)WHIn`@DhA, , ı(ǃKC<XD7x CY#A wO\氞䩦N':2s C \x$@$KD@£! @@ Kȃ!8_ujh%/|n>c-SvZ5)sg2n>i8:5y0$1R&iYkE̳π0E"Q 48pX8hPAQ"㗈h("JC`Ţ0b" /, Ă# <"ykxȠKdq8%%e)0&v@(t;+qO:@aD.xG,/^A )"/( 4P&Tsc[o+ӧPx$|+m"^&+䞛n sncVqk^"T\иБA津^X2Bֲ."7Ӈf FiN|p 4,x/f!B/i\Z,`.]/l(o?)md*Fد6ood?%SzLᎁz_[ XY4`*4<8.Xʈ_CU ,"N cbQ4T8Uڕ1\m t9VJ,>甗91%s4=Z*z3~øQJ0j˕@2AAJ"P((':FԞw}Fϗvn 9dy%,JYVw!1+NDĢ@40(~?{4-|Uuz+ꝓs:̖J"47$ C @%2Q@p/!Q(A k{ː<b8׸ aP,>( &O^󥐷?7)]=73*:jx:f:C䥳FIn Z "z*BsFDF1ԤACN3Q 2+;I 2ImuG !dƐ2`ff"M&X1&6%pɹX|T-%`%FrLw Ɛ ,A ! X(q<F,Pp F`0( E) c)$/CB4^ 3$(ehe) EĢXP,J G<(d!]@)44p4CıX|)4Cvx &!эz\P,=@(~fOzv2m]< Qw 'tP2]Q{' gLTE}N!3[6 ȍx1coRؚ5zpS|ܴD)T,\`۝Zk .vE~CYCy`*Oi)C֤| sVhR(}co[Vue(4/ ^=:a"3%ie$l+s׫.-^YK#ʲ`|qN\ jBhמ2|i]iEb1>IQE TG-nDsNek?V9io*c]D}R;X&wG"CgؚΔ2[J-2S!=x2߮; |C _R\Zx(x0>& ]gx/y@d`t $<(|  cYBC8h >-BtKhqЬpT2Z %=R"A_s_h 2lj7|6'F9kӛ2af7 (p@Fi9ʇ:F`wܻͶe eU `By XS^<=X1 _i7u_@\Nt0-MQ , ^Ab\=GuLBfݽOCB¯⪢̫[f:V0;LcmLܟGH>}B6}RJ bZ]Lw,̭h1kz{ΆH8"/!qBzq^Tn~:h%o"DKciB2 =v%D(-Zhy2:=ߦ2-$JA)4X*~Ҥ|?Ren05 n2L &W :jϔ Q":8em)!DCyH"~#1)2sDvޙe3%ݶ˰B)V"^һsE$XBr::ItUdR3F Qn> B  2h8UK Bwca7>D27wVdnuY@d(ǻ٘z<c< "$~|Dh-<5~lEhLE h!nur:Jy.n9_[HP`v_n~Ԉh4hx~Xʓxݳ~9N,4ruħiMXxG*\ͦЛ{E]'CMT2ef+r?pG Hܡ뻷?5{3EV h8h 1)ӴE9G b&+϶͛KIͩ?hg)sw,3ph E%jf&99s]hCL3d ݩApG_-Jl-C)SsA5Dn1(x0f (CAEgǨzWM ImnOh-Zj{һY)=eDx*)x5Mx gڛ q444jDCY( y8JtQzj(eq6}4PR~! qDjQτ VU\8G\mC@D]Fb j]>S̈́융gr^] mu IDekm"t̳ZI6fqJk0Y.7Hץ6I4mZJ(eK5!ɫPͷU/-j?yu :g( 3풲*c D(OU&5I53{hmz-r&4 0ovi-A38mfJjr)V4*5;3Z: R4ýވ:XӼ~\͊a̻{bfϷ@>HΒ B͌@(:,/ d@ `@  ɪXTG\RS qx+!%Ske1c97Ew9)_P <-IXp<4.#{ٌ3sjKKtDg N֛I%JTNkaIhP*z @A6NR=:*ezCq2W#7EFX쩶 rjDn?ϢI'tQ\揪@0`K/}Z\hh -b"dO8XVPO:lgd͘6]pM@T@e:BwOόCRW?LMb*͂rA;|8__tIX<ZW @Q+n(z_+C,:kgg92 @Qdn@, .6еL.f nh_wB7t+4@'Z68A {Iӯ6! 7/TKN=S,) \8ǻ% rGœp:3 A1-1."%QEiMx; 8 ɽ̰:2Q3|﵀kF>5x<-L԰q'Ɛ0}׻ڱܘPlVtI.DÒ[fV W@z4^Ŭ~fߵ@[T-Kmm[! xKhoԹ'i."|n|gط/-qg4{ W3ѽp͋ȃ1<>z5qHoZnkQK$\>굷SN* GCD@ؕ R5C@Hp xa'申5>Zt Fp^=3U}Ε!Y vf. :LiA].[NϮ/V]Z㇈)# F1PKxPC9  u' WߖN4CwŽFQۤu hoݸ_:"}2_m]q({ PI+mYw##l`Us`Q.+SǺMԺ॰XZư;3(V k?bȮY7du? ّ5֚~]>cP17V FHK_ c}uš]}9] cA Ή iy5x[z{`r6e$NJYR[Rhԫp_7Zwl`q>vC w+|܍~O1rۙu,34 |䭝>*v(0b<Kuw΋;zPuFEǨehN7)~\u("諀H柝OF Q(H@p*11CLÏP!`GP4Ϲh!9pģ`,~5P&|X#( o& AX%ǻ++?22WFo,/lt1.}1S6&,<5:Qs"1PM-pI^s `~!6Sf)v!%;\BFxQ+Do nWmca@yxwiQ3HAj#l>Z]KKږ͕5nDZFcwX9?O2̑'켧Xz"_G!ޔ2BAbEMPܞq;B29~:{F?:WUK j6tE+. /5qY@KmW }2!tZ|[/ߐfz>Lߊ}|B Rd; 5})GS0rnE= $߆cz%]}̎ _^H= e-Z*$wT}H ⣄_~Co rB E#=HӃhq[L}  ]+/ xyr\Rء q/sX̜RBW7-:a$_]j&0}r_ B ՗dlAulŷߖ$@-!耟Zyq߼iTy杣9yyX a{s6PdBpWfhv&DHB :z8l-M|/TrZ}x,-\e !t} x*Scƶt ``σ87u025 k {  baW|yJEr賑 ^RGH/ZƂ @L=I5?]9F[; F1Jjg3{` 2={a"fXpTy>e܃O1ডh ίiJ{d(_h9P͈w<0g7@.z4jt|k1xmw B,wLV_i789躕{;k?7%IGd :>̳F' K ƍy`j 3pbU(Z[Ȉ`n4E_ iJM' 9U`fрd8ɕ02?e~涢YͰՇLNJTQP6(B6G EadgQ7cBNܟȤ_:r+Y"?,s/7(X3djJh'S,$6{}sŪFhH"v@=FWG [z 2 3gaE@נVg cͺ<+<?{ WW+jqBZ0S'gQE(R(F<#?{}\0;RwGn*HA̙RÉQZ_ΩhE5.pn#F'GEN,qg2FBT`{&bʽ5E4s~O'%0v ֗S Yg:mΚBйڃmlqzWqu1xRH:Ga #X@D#DQ5V- R #$cI#X `&"6ib,yofh%\%ZcG'$aȎH] hFLĦT 5|\J p`*Κǔcԅju2(кe$e+D^y-h;cBV3JSq@sI25}Dyh/ ?БC o״BN0$;^A&=E\yN;^˥ \u5u VD62C7" 3?j ũ c?Ia\A%+9Srw(&n ND$*Y>b\?d5_NJ2m| S # ivn D\>Lai<ʘȪ֏D1IE+φnmz)FI3 ]Fr/zev>,Np^#oi\1 apEqa(Ds2%CΏQĆc6rsQaC l:`dG\ E$V8Ӊv htEuuإ?Ч>Yllʃ<3U%m5@$ {艖YY^0xq` use;Y4ibxwEJ8-n A6Ze0& 1RG] RfB._->K228¿D+~$OF7 3˖o90RN1*iو3] -4[/"ޥUwCI(CR*[`Iv*v%G i>uNެs39Cq:7XS@9.SJ80T0%{egXàE1ni#KӭQSE,M֭gtg[9 Pa}0}^6tyx qccQ)a83weE3 bhɴ T/J&^ X-<ihjMBBmPvyU/E͟WFƔp[J8| {%'( VF=K[ha q߻\ieb@TOr# dA6F/<֎`,ygwxj"O!9$je*K7R|gf43yTr ݘU&K3-CE:T'0Pz-0 ^cfjz,sX$Í=F;/E^LfB"R>*nfb;HU.vɝ?Mλa6x'*1-f{.+8osd#gNwez 7b(nܵw&H :>ֲDTh8 Dk`MuN'IY_3s+^R2~(C*um+,N|~[^2D~B3#7"CgiUaNA[CÂfGhbjn$/IQbٍm9 6aK_Z@t `gN`=@~Y%fcE"B*xOTDt;@ Węl8 u0CN5QY/{MWl }[n>pE,̟*|9関?uXƱ\uA"R!7`Nd8ޞΊ˄fMPwZЯ}(9rMlϫDw__pj8D63]sW< b*0\ &@V]W۬ȃM>UP5 B/wB6ě^ ܑlhnHP0t!`" 4 QL6j YsO53. OP@ptw͞T\dG!M%Ղ-H!o-AJ5+J OQ_l P #` G T)-M"^X@N%5|&ŕ%Py^U,z>< Z.?8A\$VAlu9ٕ_:(%[mee, D^:c25^("Zlvs! MH'}QʳQX/jeI2;22KLL5_N`q*_̓+VꋙȘ q"k(JZkah!7䞂6w; KPF!^(ʂo\ã->KAmnJge/RYP$I[wJwDΌ]t9"~>.5h yGutH3O>4T|>ђmJ 2hs2b^R>NB(.l& rhaފ 5 h%Λ (l`S*QbwsA^⇎DdH0j JI2Y{H_@RbeW/./"8E(ڍ ȧ4U+'"=-[2#j fF2UWefym1nēlFaA=S2L1G+4͑߮Ǻ~,Ę 4+S2G@7)j # QTتn!~C@>^&*M1K"聯T@AVg/y52Y#rzg/JYVME梚;Di g$H^FN\@ˑ-B͘DQa]wH\1XKICfj*z{q">QDDTRh$ YwPM_$ɑPX`G5ccW,N*Y-O}bp-Il硚5~5͐!]e\휎E+|[ttr ql?pAs UY.C R6 kQCTc@-  MP_dCD0~8y>,.@6'>B#KQeDKi⧉zi3Mj^Tn(/(A}%X1lj ofa6 ;;׹l!.--I*m[^p`5_[ܽrSbҪBJ-Ns֝OJH"i&B{ӥĩ7Pqܰ[RH-+RZwk7^oY. iWֺKҥFƽ*^ˆTJJ[ _LT+j[9d=Fkғc>bc:^)C͹j<SH(@X05~^Eޫ'Rva:铠9fvUPcNrK84V{l*+jBi9 Lxء3fSTsQJn$ 4PէɸMna#֕+pL꯷y!Gz+qd/g^/R5,D<IS9MXsqwBLR]yڛ۔P[O+oy%ioRTk֎-JKÇb,!S1CO9UZJEü@`p󏒼SKtsF]{/3Ƶ8<&iLW;Xs)9D%^ۦ n $ Q<`7St0`0'`PƢ"0`xgqŅH&U  \P4:p*x ̃5<,qip4̡,";wG{ ɓ$K?΄Ý( b8bAs{9F9``i8$+ID/tO< B Eppp+sqÇ3 bQp*Xcs,pp,< ObP%Y,`ƁD@$Nr(;`Hv!0%0;yaŢ Ǒ υ#چ 5Yq$8G_r aOC_ f<  Ҧ$qi4 G8tDRadCĀ)ͫ}o3i^Dq ^//.Qa441)AAzIIL07<&*,Hg\xUe@Ē8˯B"1 eEcH\S&< F"4p T;ɲABK2h!'/eRKVs^BMd5a.4 bPMD cP0@x6 iy.8FA" mJo[)jvbfGu~J˟+rEо2JΙ8s۾7Pz*}\WZRbӵͩ3bN_N[┚3Ι)ev sLkf߭l.{zh'~Wx]lMcPفBСIݹ"O_s nfn]BT*_]T[m_'CԿU2z6)ZuO=OĞu0~U7}[qN$z!fȎ=6K-[T!k'Ws!\{6G)-8c=uwS{o](eŘA/}̏[6\M{5z/-+MV&\s~*7嘩Dϒl-yUcN/ͩuޒck}I:<,i5rUqkL[X`'ֶܢw/?.2_&ҦW5}%.\ujJ)zܚBw}ȏXz=x>\ {5Z ^+ǖJW|5jc:zbbɖ)Wwb~ 5Ň:+嚘2ϊRuY?{I%B[kE_F/&Mq1.lkrŔ? z(ݦ㔹~ qliu[>c!NN)e.EUd扲Ã3jWSkL伏bgs\oIԩϛGgf[ yXO!̅Z{{K[S+ܵ'/m)R͞vz[q}?;mw͏+rkU^82wjׇ22sJnb2nt3k/]G蟓55ڪ}ԋ9]c{ϛZX奪Җ*_]jBt%C_k;*}^iK1!пjj9 C.gI張ZsN1*kuV\n\h!*UoScmZo6|_[*9W-o*9#D\``݌羔7Z9k^S/0  Yy (ǂ os8XyƂwDɂyȉɂY&4‹^hxIXИYw,=rD,\Eð{w0/#(.yxBy8ac4p`f%q oipH!BøCIX yǂ84?a B"Iwb2,*b!BRdLD"2|t3SѦ„d*Xk-iIWKdȄ4&SET D2!B*C"R$&PiCJY6= d QF,UCB`@(8*48Ph%'(80 (Px' `xi (8S@!Q8$h@~;hq q@a ɒDn/X04  xp<<*?ѰӰ? 4ϓ!a< (1,(8q,K J;08 d5"ē4M x$Hw<;7¥*Zƚb)ސZquǾ9+K8&n}j|!քw9mc_+4Գq+zꗦTOw|^jݸ1CبU{v↭:to5D^a Bc.[5OȻVTlӯetȸXj*Aㆾ)ǚ+嚩֜TS "@۵+^˵RVLb돥_xĸ5g?L*1C@a s{VjTqu7 SS~?mkӦ{˱fL%+ĩޗJn9_\=k%wOUaSe[*bd+.Y.wjjvƘGa{-ݥ~W5oPv{"VV,1j 8x)Q[%v͎O7%0ᷮ<-R՞Q0!FlkUbʧ9Զ{ҷ>j Nj4,)fuc4@aoqBVKrS,>\̓2DJܝ3jv {熫g ,Şc󆰱2 8  ϻI㼯p4hp</M Fii48pa"Bx~1p(DI7p(e%i4 +1s4Y`y 8Gi4"x{H):g Y4ͯ:Rhi{r鹍%߻#2\w +O>g?x V4/Ѽs_$X0!HjLAb)tҵ!ICХV|h>.[HjZk-Ch.Rɏ9d̡\)!(|d=wˑr4?x"I0Xx p pF3C*J*K`Iͨ`7̭ STy Y:n%MRSk=1j1ΙٿV2}{/JNWb1_ͿHZ8!CS25"9%zt UDc_#7'Xq;0 xx{>oB5U셙I4\_H1L]ء>y^61DEJjXbaވ{׫S|969:ϝr'<4$lX0fT1ORT0cłKb t b[hC @ b0 /E8N3ͨ B@p,,NcX*kȺ%+!~lhy:MFlTTF5_XQd1^[21TU{SK.%e^o?FK![)2;]YC蹳M5[jOY2ւxfݜ7Mhw"u9&i*r-[ߕ'dŻh5V1gˎq$>uH?d ߳IA3LG$Rk f~i:|1NnyD23o\C12x IE2Fe]P#JCn9ٱ3TCeZʳA? G塮?WN/7e5n`At.M1*sI ^qz )cVJyg»6сg~0zJ٪á wwE\%/Z"^KOvvj0 EfXpM)p w%BEl<"4/0v!{pYVE_ee۔řaL*0Dl=6/jJM!Ë fE`ӕKpcWQd%zס^yRY'j t_cn~e|@f@a3ZUYv؎0My[Vy@~3esUv3{@3ǁۘGFлؾou 1Eg`.Hig9OyCi:HOݱjdq`(ɝ>7o_G+K$qgE+7M(ѬCش9b4AUE4Dlp %lZ߽>|B,EDt|'˘E#D?a:[/y:dY^U5&,Υ4#\?Gkȿ{n?CCz7OzE*'! ]dR& #xo\`B.ܫ G竖=WOP5P,Քud(x$.$( ]z׶;hNHQifm&Hu"MK_9 ؓS!"QGZI32g]beVxĻ|ޠNv}zڲj|,NQ}e~ uHαv({Z_^ڬ/y6rps|! [W0V?A@FN^x_ҋICw yIBRw'N}ŀ5'NyO^}ܟ?,K~dQw ly9Ih: .e:uk54/-Ua-$/)`ǁ~*JNfI@哜Q{օ!lȏ0*Q0ݙ1,,$9U >%{hU-AeFZ- -)_-)bL!^WttCg5!;Z BPyXu.«RqB#0IZ󹭳dLX(G_{NgRPkঢE  !F-:ٰ!*c+jmm0}]\.;%"axS k2J^L %8%|4('{6_OoA' C&AG=P]!a#qf_Q〉{2ɴ@UT*Z`@bT[귁7D#C3sNtF;H).D\p./bGHfԊzɽ&F}kSDܯysS;Y=7jXN cԶѫDi"}:K|vЮm9=c$;$Y3yNYKA|*j 7< E;IK;8 H;8:iکUW(6: 7l[/K#__u@=7{ʙV= 9<⛊-X}A9Fd)?WHJE!%X0+vfV5J7벙8fA`8ei ź2@NRg> 'D|dsP τp%U\@6d=Ef*;7G%9J ;ů쏝,z^ >C8xc`?9é?p.:$@qmIH%?}<-6$%>mbe}=IZggw8QTMQcݔ`s i7RFhG؜jR>b0D?אK @g/'hZG2 o>u#}w#۾Hnڵ6m2 )U95G`fGaXN/lxOƍ8ԅk#rpqCʒC/EVc&o rz)NJO(O'L%5+'m0WB\-Y0ǿ}C1B0נ VX͒X$W R:?VY. 2 [9$ u鲸&rAZa4](NZV9FaV zxFZnuU8,NLEd. %R5Lj`5{/">7gp{~.uv_6,!+"D]AuYFvVUM0t2^r(d #bEJYqyB>9;:0bػ6miStBݡcԀBIPƌVeU#xL*<[%pCF IZV ȣ1m>X.LGqȤ߸t]3ȂS* >@mo LL?]b\YyMFgWup_\ z7C2.,=j>GJv=>~+>3t5}` mz8x%f@@H at3<(2鯕rƾ3BQ&խ݇a9v^:'+醊fmSL5r$Џ ~̱x)Sڟ8CgU0`\'\zۖ=]t>CэO3B̯[h.:  0[ Rr 1:FjB+ E;/5ǥ2Р ҁS E퐅0437..*/иGLu`kO)W"+u4XՋR8JjUWӁya_S3+M@j ;&},0)c~ ͢VR?Q<*Lsnmj&f(O'+ ]͉QҥͰ\CBƋjBwTIuz5U:qƛ,>f X4nPZ#.r298KȔL7pιO:陏PXxvwfƘC_%].֜/hoٜ0r4fپ@?E.VkG)TΔG$ܙaG7ejhQw5t}h& NI!2WCoAp(?)iHvCi6t 뀶p—h֋;Q/OpM %/zmM4ՕJG{ KzS)4dӆN=pao&w[\D.$̙;uc2yZʟ} 0 YW}U9f#{jgq^2;şV-i֞ \AP֫#X|X֜5WSC{7rA?N TiLa#{)ZĜyq|^;esugr#<"-ḵ MsOV"\!6mĿ U :5s!AW \M`MUQVbOdW5,wѲh91J+>ċK[rL ki<~z?nj)|^trժI5lҬ8٭mD 9]v)bcԠ1)R}aNu?s {h_Cfm+$]xRuث}P*EB\$'Q׹J:Qi"5F orcVpQbkH/DT/* #L `=ygZcߐqjܮjNRO9ņBѵvK8u؆Z'JJۇsUH&F1=F0B}b|/IM4*(riD0π`A BZԔB 1ЫtdG #l]WH 0($ BHI^ s(="\GǠGX*/I8N"&La!@ h pFYUY$2IKD_ KAn&XmB ,-L>*/+f)nϏB^NR*e ]=ҘC[;l^wL~[<>ykǤ_Z"ScS)mjjui1k"m!{Z{ 9c>x:lc)wP3F7WnCM-Bv#]DQ( q8v r'JPG-~:H/ˠ:<#C~ѾŋZCbԏz)jt?"TogJkyGYPNV*sW޻;ngSAvO%w^&۠`lV_ޯٻʏe>PG'E:N|}X-F-Z,[AGQy{´V#ΥW%VSW$^ H !v|vܲ~n b:_G/)4ԇuoz1צXc͞sPZAM^AEN3G th#P.YL%Bn:9ɭwm[6?s.m!juΝdIqId.hɡ\[J!}*f E/)EXVCO>N˟c>Rdj5WE TlGZؓgyd ֍t]Nuj[ܠ!-љqb{S[qjK;s\~u*=O7V7a :Ȟc ӁɱUdMfl{)RS R!D049 $ 1Rc D!89>Z  JP<,Jz#FWR>򃑓32;A 3Š|#(H n,j!j-fFt\ 8uUh"[T -;8n!2_L bl .\tTDs xOԀ o wZP72{Hĺ$ZX% U*D>C<ӵC= kZ`,7 ޣ6:CXN|h=[8hS'{:U ܑ;c?3äY*1 V8lL,L= vW漬w׺]C;г rXb42\#AV#_ԶG4\HBި="WZ 2({%KA,TUM2(uU7pR։BCDK`z݂f[,Ao&Z"J^t*̯3iN6Őf)ܮ!_iW9.OM:[$!< p.kN1{;%JrB!;uۢCV|WN7 k"Pr4Ԗm8vhyUIW{F~՚ooGq>neFxՆT?S GoAM_wݣow7 e.4jOSFAQ8ʁz5Z)(Ce(S}-c@Q-YR .'Xg? ,~S@M$a  7FJ-oP6KKq: [(w{ faeMw #H Ҷb:mmXfrӥL2]{Me/ȼ68U˂[Ӡ6P鞳Pm}*6&)_Jrҹn4M=Uш3!:fG)1BiQːVf")A9zgW*DF`d!png^@yP{3= C@r%Z倂Z4 "W'ʓގ[:kX[O&Cngv#/УD 㯚c.$A`Y *$ pQ웟⤷2mHIYd\}Fv96^ ƩI'oMx;L(/`d*+n4@~Q$ƴ>ТI.h#ѻ zyZj8~˭q[/"|3D&!=.]e#~8әL˦4\7tɼv?e]S^ȷ[\xuER~0CWD<0Nx q 8Y'\o{,1]3;>N&>}n}6 SeO[GU~fjix z~vFhVg[+,֟o]z!g}78A[GNx'8֎Xoe}kGE~3I; J! tE(g캨-nhPO#@R^D !PS<J8jRPH}^=FD=juyA~~JDMtq>Q~԰ 5gf3O>z#;ؾ'Z4tEɀ,<~"h^G[;E GJ Mh0R„); 3[ [[# RАίP jBJIA)SN-5#1jzmm:MU{LobS,VTNaN5e%Rx0:2~ *ȒR)+1zJ+CR5@B3fVSHK,C9e%C5%\]b*]L ACbJI:;;bA@o!?c\O]pS/ο4:{uY[[-3JKfzZ*C5%; cIhޤAF5c7ř$!̈B62|adlkjnW]o[1H90C'pAʘ[B#t߉оx#0?kMgv ձZ`{" +1S,ߋ7RŇuiX󳸣S~Nm#dtMxcmA7ג>}k+9C(C$pĭ/qęq!i: o]'Fw6ۿhxG-[џ~G?|w Q>:nrқΎQ PH!&П5}FȻ39ҦAh1 (@o[)\&c:3RvE8\MEHyH$-m!5`Ig kM}@DZc˓&ɯd:'lO]?x1[9,ϏtoKvk ZiaպԳ:#}_"=+$uAmv0r֥_@!`G[Lie}?> J ɤ2da{.e;ցx$3vf$t("1aDgsEqEr8ǒW{x/'䘍^c -f}ӶZkfcs9,c{-FW{=8dyHd[83x }LfK{ɚC:<(s*\6 [-ڵ]@/,ŎVuoM>L]dt<؂Wrd1 0CGaOH( bC11xН&^j!# 9nPN#Op)'FU<) D0^}þ=yyK 4fj R@V22n(,w,=ϢW-9fIަ6bT!߀%Z*Tv&}(_-Je CU =[fo8-Ш{{7'8b"^`W-g @vJxn|QB>&s#[GO е|-$b:TlLP I@C`>ܰj֍$){GƘ1㊆VUKVG;gQ$" Z!h q@>Śk=BE#DdWvYz7.*l^76#Z.% LwLU@q?t>u!DgsCሐ΍uxMdd =yϠ&պ~ ,jD~`Hi-1k!YR]IEH<^xS8~W&r4R`f}K's-t<\LEHGAxX=Q;sOu}j*J~r4b=mU}}@(du@^kYv R"E8s[rJ7{y8U%ͼpёѡ]2R:w 9`ITb)M|"I97(;`p3o<О{AX^sV7_?Ė`sG+)x2y&jsm@P/߄ [,y/GpA'@N( AU BXIE}xOJuEPd .B~y3++o//cb!9 P( 11`0[{WO@!72$; Hy p)#F E.psdnNL 0n0s ,Z]_o 94'{̥IRߞGN'D'lq:`BJӓ&,QO,K,oS[h~P; ܎&C ՐaCB;-(a\![k˒Ք 0+D-vd{6G0a\xd* F( }Ķjt5ͨ*H7·R5WoX.O0z. >~AѣD.LA ƺ;U!$ G""U xDuۆzpR$t&=c*#Nsq :Eר]p_@8*PU!&veTiJ˵( hh/Ѣ!ܔPo@2ۡ=&nZ-'"-RghhyoYG>Ktԭ-4ε%1 nUJ ˣ9߁@'8$k~p "|820B5NK j>?t^i\)E-?+ (or\P% /@hD^O1339pf`M^j?7hjꡬ6{Ahr %'đ1[i@B >dᆿ/Ug谐kv ""p5lV8 dXXлcjF#|=. 89_9.| 9A\Z: 4]) +|#fl/'&7.Y7z?X6LhΈs H .X( 2JL: t`ےyy~w uĄ~Y &k4j{ob='} *ȭ{HJzvDnc$Xs>m$|0ogA^10R&Ԑ9fg)vWcdz1 !X&%=*; <&k՝#ۡBn7̬Ar AgRf0H& 3H bOA˚_+4J L(/`<*),3P D@ sy 0? x-g0glQ0LT?ZzϸX|>2Scg9yBiY4[LH>Lyi)&B+tɠbߗ`"/4n'q i߶ s@Tt! tMk(!ȴ6= s_,X?! ¨NX 뭜3cgRYN,^'{N3& {,OKƭWXl}>WI;+rfڏ78/_?ʨמ+=''Mӟ I2 87s}D<(4re*UCs(c#I+&ɦAVH!C1${&%c26`#*Z l &7Sʰs>2&\As%'lɩgr9VJB8u$,-v2j@&D+bK ti_p Os9zƓPn暂(9~[jҹ9׷~Tߟ֗?UR5JRTl T9m-cyZO}/6ה9zB;} t9 z}RT+DkH馄o3Gڙjzo5lz]Q jO=5|5]cӜIO(%RlP:ӭ֦|`R[il5[*E;&oi6X2>qMҥr-!|*DRWkѩ9RMKr:l^T|P-d|}囊(IrA΁)(   xP!  boX~&ks/U}go@pZNn0j@<xL|W\ҵ{D'w#K>-S![7 H{l7 6Frl0IZ7VIy{zQ+v{jWVb|z-h\--3=Won2 5?;q$GWk,A؈ϘX; P*X`4`It&$*% t-[1Q{mѥ5o=TT Iԥ'(N8P  STbz*!Н:۹Tivf 24F(uTZ2)Vlr|~孠|rΔߓ. Bel(n1/Qre@R>3xh!w@GO:ؐXLhjA QUTLR1ol/R!H V4qĤ{?THgTPZ*UFx'%JzZQ9IA%TV$leTN绉\\5H١0R+E 4Awl^琢+= QtN?BCk'L)K>cJ|1Ʒ(aYB,s%qXZQS9,#:odqm@nK+J_of-$c-1c2\NVC,!â>I|p72Yovo5mw} 5NjǨ\1zrb>4o ގ%Vs R3c|y$ ApnlE'mB?qX8dssL &\R21BL PVFW MєS=j:܂)zZ>K}SpVKԈQVb6&z/jRFl-QXУՋG\ K]|(5Dۡ'E\扶T" rcs#?`>6q(:/`=.40 g>I$H- 9r \kI >AA<EXOGﻟMobgNNսíyirMNɷJ,PDb1Mb6$4BouD /-0&{cSѷf?Ť?:C0"N9]TrEWJI!.MCHC^*!P YB”B1B0~. Iz|ɊJJ5Y0R+ލU^?.d!!tX![q{ a y9N14202@H`RYIfP00 b  A1Hr[;يt;]rX>(Il'Fa g25f$x.3B.>^t]{P WvsBfedxgboމ=3wᆓ£}*G?粎_Mexճ4a~n[6}j](} oJ (q\ʩW-bRQObkqlHPIZ> ~+pO9UB6ϓ(fj?QDxZ0@HYR:N*:x_ރ$V֥)44ïdHMExRiMUL`c¼ږlDpiߊiYDܬsMx+RZ@$*Rdq #l?? ūP4 0g h&Hcvfn#;e+0 FAęi”$B"wPi:7U LZj(A=G7۞ 'rߦƢsTt(eTvj|R+:WWztű gI"sCloI 0gm C1X1ğF}F9Wb 7=gH 0-vbȘW@!@+%<9jw# bC1cjHhWF%HPe\9{C!KRi;f+&}TvG++JFN X(r(1Bb%Wh + 'l+p TK|WLp;J U6]voJ"Mښ@ a{SVH$CᘧiO\S~F2q̦JpaGk̪gq0f{ΎD;!Q7m 41<txz9Ӣ;{2GLdd9mA&G}- wN3ɾrϨrz.V2?L803l nP88}eU N&T2=+!v9٘ gCo?_ʫn3;>|jwfiRoK61_)0g]݃gCh7ԑ}=I=x&l>O|Y'=#@ 5Wemۍ|gA5+k+DnPPv3*d&zͬUeWf %Mfupt7~~k9E; Wz>Ljߢmؤ qṬ p-&IXG8MJ•̄+j)+?; "FoU0+mM>X1fo,R|mc037CTKדjU.( G jE}*jI &a[y5 򨛢sN9hxSkh}Jv]^+<>EuĦm3;} V'Yǎ8q!7C×lV`}Ґ q$ST5?c66Et@͍A8:(s2yDf_8y80D1vK9&4W\g)GUÞWIE$\'Ħa PTHxǂ>*RY.E-Jҕ :?]1,iG N]c,_}(̾O~Sh,0$ yIFE=g 9`n^:L =قA[b#^!:9!/^Õ7m:Yn 6RZjnjG8FXCצ^Q"\4[2M19R֌Ցv3X^KD#;yEW*zҀseIsU2p*aJҘ0LV+Rz~jB*|xBQMB!z^DI<.a,v+D!Ye·`F? ?DJx`>Nt?`R/r3Y%LVhM@tGM7r9!ޚQX_УC֘ѧ dQ428D?#&Ϭ[j?@H3G( iml0Vz5V>lLȘYV x:C4|?R` в xV9Ĵ/.]+i`.سeg9ڇ:Hċ<OL8g"ʀ5.18LƗ'Mϑz N WSӤ9ADıroi3@8 0): $5X˦GVv6)ށ}V"k2CY|0d t?-,ٰIkX!),)_DZC1p6w܇E␉bx ! ֮nW!? Xk1d;K*h09Q*I\$͊k N cK  :n]),:_{Oр)U}BP:V48"S|㏎: Oo.Fw[#kĚƧ>?~҅ܐ n2Dv{ZԬ|%!Fȅ,@/q>L ƭ. &\D+X .Cfw(GD։q (/`2M|T&!V Ho;S6z16 SKI|_A<:aak\X__"ێ~Y 8^wAي'7@()?R`@RѶVʴ}(hyhtgV\K)Rӏxm瞊Vb2lyN $IqxDBSG=8$I̕KP(D%oě֌!Ev>1C#qGox;B |Oǎirnȕ|sun/ϼ rj##z͠oT@_!1}{R ipD4~hF7AHG)S,%bjJ i҂2E]rs*'[Tc9IڪKWZKI,RQ *jY6xUF,T[4Tzq8x瑫}>~X!1!O k&{W9SS2-^oa>TRb4 >ϥ>-reU7Q^iƥM @d FND@xǾ  ܌1Y6 䴽yK˨^s3 Fh֦1~ɿm FmJ))^.vL@z4W%k]~gLN@&}NzLQ SZ,(XBN4AogN\[ 3Kp*U8Kxenn z"F ˽l{Q'tI%_Z9ӄKS*Q4y)S)2[NҧW ,*+ԃtR?;#T\6Hl<])E[2~$6M2+.S)"Siݒ:ƤOb3dSZh+Rq/.fZkv+o?RVT|&7*Jj8]bI-ojIIrmm}?W@W.%WFG?n\.1Hלp5Ֆ"7/>ts~/kK*,kJk3ZL\6^Ca# &$p7H϶orAx]26@k QEj8_zp8Rٔ~SMKB5erwѓ4el)IBLڏn%'Mz*`iۊ4?7O@9Rq R6DL?U 2WJ.FA R"h]Üh,0GDb!F$д홰tRki߲_< _2_o2E\$ E 0L3;_4 ֖d׮ڙoRUX^^N)F:fÅcf(T;Cu4D뷩hL4iR\6"z[ LLaɩ? 34qf&8C 8'%I~7:AzcYeh9n?2{ѕȟv&*fNM__Ipo-n0bBOa uQ$0)'%#Mt# ;횩'|6NLs83L]UR>#+AG3wDa@qN䠏F V*8.wP;IB6cgJ錃OgIJߊ탯JD5n^TrS3-ߔMEGXh~A*,n lِN~u_RFb~(|ϊy¦" :8]ef"̐`Z3|6& LCw '5AD`!HWb3_ /H$. `$|HwI c5ܽUHn@8.F%+FBwW m3J5d^.kT+IHk&}};id2tH1CPX4,0c@ ACA $ 焂'4!L@4 fx󡝡h"pXp_xCр/$ HhCq(+ 8$Xh`$0!qxP#2B-/C_P6Cő`,z!AC |C}NW ^f#`@bcAq`Dc < X` @#pH`q4X80  ]H,$pH 8 MP8F/Ă @P`@X  ]o  h$8B* A^.Մrz4KW(WKq/F\T9+f I._.j5!T0_A ެnXt2[AD?4lw,g }"JMӆb/ʹSaBVVWB.*#{ oTn!4yH:Yt)[pgbE 돉YS" tݩ[lGJTm<{)]v󏹈r&&0^1&! 9ΝtK-ڕUod7{VNDEB|=<؟OHk9]CԆO=DfT`"Ϋv5xMCwq* (P | |It#RE;EBx{+f:kHa@HO))d?BhIDlw4āq$/uǦ:dS VU^_{˨cĚ$XgT>1Y9C+'{SEZ9\*T-#YU}B 9]8&KvP91=xPPP>d"6jcLybTؿH(8풝6YgrES{_I:Z~E\)Dˉj{&/yWq OӸeyiJiob̲5JZZܪ$wHji/jr9Z6Sڨ),xM~+tۆSG2NŹuu_Њk h%TiX̀4 7IaýgC?c| %#(`8E?bM])fBCBʿӦ-ibgרW3TZ/ߋ^/9GER#"]ihܴe9$ ©͜m>}s-F1)ƸΎqB.ͳHЏ_iSP%:H\ϴj19g$񲫤\g*i YEPo$vW}ky26}<09 7hqM^ɧ+];ɓθ]~~'|SBeŝRP9~C "ZBQV;h! s9E~Z{;rsnP "z7Xb~KI/Ҙ˓T4ɘ:TG*ޫgok!&ѷ)w PB+\FUؐZ}’w_HZœ*?USٰ[K Iw״$'CqtҞ;5Ey>4pVo|KbNK'C81]IO,]xzys$ٜcJv v.7eSE׍),fɒ}9LD͑Lpzcc拝=9bهud!V-; _9rQO CQn;DhIҺZ82BV+MiB [2ƜQrN :``B]JIsAS.uslW%A'LIH͎ & :vGʓ`(8LVGfRU\.^UpLXFn t&^d.^ESJBj ?j9 oOhB/2LoL)jsx,8CjkCeϿ8y,M=jN6׋[WU7h4 ܼI ‹ě5ǦB&Olg8Q>Cjz*RSrOq`SiK?\l/XK\sqZY/YhߵGsM዆Fi|Ħv?zMdy7Ǒ鈜<|B3%Jn1jL9\?ccO`s|c1dRĤ_Ms ]X6n)(v9CŜdmUj"1'9\GTBf~>a;g͌?2 v)'b쓉9b)o/P Q;tZ~1M>XhP/̄=~e#ĥ = 7{ sysz*ݑ&B12L`呮sEQ@QT"c@HYSR cd[=JHiĀ9Ī/X ,oEHrwKv+ v%E/e;xV"_@lK6}k~7ѭM>HgUcGHk^C~9Yg }NdR@3EJ6دXxEL Ba'š<ׯ~Y H,[JmײVE:H 7Ś:ìu_kƒdy$%mqEϨWMUp$IG9 װUѻ".g??B0t-k'oo 8Gr0rBI 'EIc_)j@(7=#'c(Dv=K@TtPq(B}#qpɢ hpalig٣cNAX}ώm˧9TZty0;eo|MLaod߃..@ڃ(R7sd >mvɨvkYif }WW_3π "h}|@P ;}6d  Zv`²9:I1D 1xs";RqP,}i~N,abPo;$2ȮԩE4m|0'KNTD"3lpPi`\3碠#67 BsX"v.kAm")ZPXX P\Y)t5k{NgT["b}? Dɱ-g<ˋ|LNEiE5h~{={})RZ ~hB!& hAG˕vA E9Q**ҤP_ဂ00#R\iUL Je PnD^5.|uG(ǞwaR5zuK^ )E>/c>ݯc!47(S8pZi} yPLZ\Tr2\R7 O>yVJNlaI,I8΄E<&΅`+E l ~WʆfC~B>ҥDksR4QAAef8f{Y)@DK8,ꓕJ}ܷj }9[~uV>R8!'_dy@ J%Gal( f`r"Qr_H#MxzL❜, Ge#78 &?Z[ʍa( h@?GA8cREi.wYB#K%a덆Vh'ox]ZTvBrAx7N/_{$ 44U^_ kԵkmdF\+vοI'mz> vit)es*6'5V)-Ľ{Y3_j1Y #m-BRnh{k 3oLrfӘ=,fAκ1t q~_ T dusÊe 3)d6Fb-5R1Jas?ki@!@' @*%Ъp%@FP>\?Ndn 4x#Q) k}_##Nxp9t{T_ ",әJ[.dKr)\ob&l.̄zE8z*HT7ʅI-,cƣ~)i7^VԷC]9!sz7;3RbQlAe T fZ;-i_{7?11@d97؜C1V3ޱavF㩇7(feC꘦ڃ.d8sr+Y9!zmq~wC)ʍ7b%}cMrF3P83K혷 zC ՚ tf^i x;?3sϠ lfƘyиqC 97S47fîwFA `cx§zA/N]ҷ+n8leQȼcrKfp= *1JB0HeñP$ @ "IXC0h%wq9A#j-(yHC+k{"c(< fR(;&w|]qY+a;zb˕s]Cnfi )+B=M{\5c`J!+c Ƞ VQU"xWgj8 pFjol EzYyUud#Go@.8ﮊXyFPbNvoPԲG-:l I"^<r[& m1q>Jm.r!w fX'_mh<R 4kmuPSmQE엘,\#שo2d ^8kQG3 ^ɼ6С0$|s~;4]SOg]39B<Oh/z=u3<#Tk<$#m +:>$p]FD@;m(#1k2DDCUybWt 8{{{ A d@{^ Xd0+8/!+ RI}tQQT!]i9JOfDh|LHTA&7RuAH?7g!Dl^Aβ\7'8 V\Co*8i˳*eJ|:mgn$on"BzW:-qƒ`ttLNE)!FG߼ǦNeH7B;G7 _q̝ɾ·\.ĊC%sZ J~}>r_P G4fTWZ̗` ɩxW"5tRDN<`򇴕MM;KOt?=AQG>p?]d(/`AM\#`Gt8&MnHې,!l@$Fv"$P00Z9qcMv3Bj̅kF=ȡ`)\"e Rǫ@А08 e_#S++G; ўR@P$ 4t ]M33F6sd(=^mj危c-٠du`Ѡ(P&3Bzc7T,aw:Rl*f.x {K'[Of9ǏfӤ7՜XXYY?YhGt}Gp$@I80Ĩ<$zF'@`8V vj^wzO4E#csh0QRM~V}3WR:GT<T#Db;<`y_~HAbxb \$H@:,l/j{&1uEk}pa0$p !Go}:Z0Axm2qFLOݫO^HG23`I4+PL'Ú(SI^dR* \bԲĎח?%6^lt>XnN^.[$4^,1ċ6T-NQ?Q, ArBQ!0qH0 `8(I_HH H I[ә6aiW69_pp.hʌs)SuQq8hu$Bap$p qA9"$qА@Su)º`H\R&E^ Uϝ$QL7nÆ{l]8!xD#:06A%HpFqؼa69ͥw*)[v1Ri&9ԂIƒ}2ްV2HPb,|n Џu9qHU`,t㨺ƕI!:#ݣH⠔[ֹBLi2ghF5Wsg{N DA&-< Kb c#zaقS@f <jyD*&R쥰) @=JJТ'ts-\p%z g:\Vn~׏1y(/`5jA3.l4qZ@9bN%$ؠM{7E};ƈ)54L9}\KOPRp_w!oU~.-F9"$0$Q0 7@ag]r'CN*U.l~rA:!Wv)u2g[_=_C Zk_k7{3Ra4X4W!J5~ʛK-Xi\A!_TCp3v]5ի뭤YD$44k|)ԳjF'sY'uĢH싽MWZ?!PS裘Kɨs7UcikȴZFvm8 FmJ!{o)3 p rw͒KHsm+thyCyaRSKSt^C˥Ƹ=eLى*qC dO)ثZn>VWR-=tiS ,&kUƶ'8 ͭ~w*Ҧ69yI8 À`VuN,yֻ6]^1'il &X0j5ZjޫoժN200 bAUMnqK1mF=SM clKS#r{ZIy۾+ȳH4LJA0 }umZo]Sfdn58xo OrbV`Q0pE0(aÀxA HpXqf1,8xEc9ŜKZfS=t9310 X{ގOm?}1R+$'!;dv\:Z[驧K|ʕb*o9p-*G.=ԄyoZ SBSll=n7h '/j-SkWouUSRTHQ9BsT:*+*90 Gxu͖>mk۳V҅qj.KW./ݤJ1ZCNYCT'n~ΌX+عױDׯ* H#{ KqD`=aA0x/HqǟfxAvB6vN׶TM5cN39r]FtIuC{ QmbL9Z[˘Sg}``a`$E !1 4p<8@'HbA L,hHm|q  0X¸8,ˢYGfI5GU98$ >uƲ, $0F0YF#} G1q`Q wĂhó(`(TAͭ}TPBxgLE);@Gffffff&xQ<+G+ MEDE)Ğo5v/}jB˵KQOꦤj[ ]Bt( rpKÖv!t@tcXCI)( gQ( 'PoO`Ɨe<&BQ?.6 QYe(ҨByu$'mJrcXױUzOfL嶿K ( "SϭsHwbʲzI&z^ +y[8Rם1~IEDI9Zi9n_C(nCtV=fUPuL)xÊ&-sՒMw8.f+M5q&i\cɽ{4kc G[jZ~J[w;cϢʥOt4ZP`3ic̀ ȈdS0pvJ1nUR̂ ]e% ?|Rq!j<&:w{Ѹ=k$Lix/Bm0At>/}"O>{0,/޳{%THfXs &7(e2<97%h5KǧphSO 'n,˵m}}K[qa%q wj=\>WЩjg;e2qهP*7{sٟynNLa"4VGG}a׵9d 7Qlv/f7=|F*gVAn7>BM "B4|+aJ/R/0\3Y)V΂#r4]Zs &e=Di>X"QEBrCώLlP%7JLSH')A$#v,ZʄkC;qlv~A]_ p0pao iZk%!ST9" gđ%y7\1d5 |`\WΙ)}JyEv4WPO]pK)LCvWh<3[(,;Xզ}/N 5B(mStmOtuY|X-{xB/URn $@+mc:ao^ yj*!s(a?pLA)_me!Ld;grAWz/.]LӥsI]nKST;߱*'} O%,M%0WnM6Z c+z0!Dv^}sB[}^ ɢ"$K=YH:^<|{uI9V—z0RIr$<6|!}5XdelOZ!캤DC&^LӸ_O^;R(s@HB _WR;xH+uP[amN= Y kn+tzX4h_ܰ-ɚ#y{9!#hY繥֎xi뮺E|"d؁HmXYnSc\ 4m73CLC Eyூ!xq3aF<:g5`@&w1a1|cBZfcddQPXcސS(J]?^WRJCSrYƈBo!9$Ɏ!„S}3Hr>xMIÇTyd`%Ew$8cqݣsZ8@5~zUT@eb`(LND `>t6Ddp +}X& ( ֆFfaX-3pQՆ< \QeVLjLv=Eۨ^=A8Zϯ* Yze9e6ca4765de75cc3b2bf05e6cf631703c6557edd642300748d7747000547365-primary.xml.zck000066400000000000000000000027121400672373200422710ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataZCK1X eqV}F<:oxe"Q{E7l%ڝԊNa/|IRvd뽀TB#?eZw"ybCFiެVO(/ ep&o_;kbHUShI90BمSi74E+$_Rm3ܜ\H!yrT"\r"^ F4Y;Env[.Uz\T;$yϙΌ, BB#0B`byePn(b6h0EZXI)j]f;89)r+1r5rnos:`0K b)b%4DOѷ389nxnmsWp#L'9:7Zϖ\0 f[&h0-T>I'>VPm)ǗIK[u\~<3aΟiY 0K"2fçnp`%҄#ʣXh Lr6k8 lOnLKVƩ2ZaG~ ?Y c2@v|0MgP)"p=@X,2XȢ^5Н,Wy݈aEo՚$ a [-"csOLi2a7V 0 7Sp ?vE~#UP!DQapYt\BGnBZJ,t;f9d860ddcb64fbdc88a9b71a14ddb9f5670968d5dd3430412565c13d42b6804d-comps.f20.xml.gz000066400000000000000000000013541400672373200425270ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataVMo@W +qqUq6mԏK7fw6+ \B8v8~ތg9`z"a eoHBf;FlpwqdG{<1 X).f|7yV,feF(G*)6a+*~5N7mB\0Җ TX1:STG|RZ ~7˰,$(7Abj*VTn9ձhNg^:ӳ}TΌ#OJ^",驚ĵuw7ӹL4FViozaC*Ʃ\."pCHRnZoAV}FD4Ŋ9w?Ŕ;rvE?5DW(B[vS#T&1HRD)nre$}҆<&JH:v.2Ȉ[k5W.KͰWte =Ή0U?6eۇI,j0CA$"?o A1"5y]B>;棡΋uhq);2 li*~l0tl7S-iS9Ew Wƀ(3fQ9JC7w`Э>m 2} .= 9^!hx5h:> fd458a424a3f3e0dadc95b806674b79055c24e73637e47ad5a6e57926aa1b9d1-other.xml.gz000066400000000000000000000012571400672373200420620ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repo_with_additional_metadata/repodataYo0)FD.%M*M^< xͷa7ͥR| sxrp]/dM&hĸqt3Ď3@7}bJ]fB žb=вbj{ЧVAY)Ewg,QC0IJӐ!4 6$L wr S6e20uYˈR_KLr V48a55<+YS`- ` UJmy%gWWyɔ^bù4K qCna\MScp5)D،Ħ8Ez+NLY+L]#]a\v01}q}/*a`n]r{v'}nҘ:]d[;pC⼪M>C{M|%i{Kv.}'i/XdFy5 R0ixH{%B+fmM|TB8PGy|Kus;dزZхSDL'u~}rȤX!˷ Dl*0q~z32/d=Aŵ/:_?/CC>vY FPs֚Nc {L=ڸB:0 1554795405 490a2a494a3827b8a356f728ac36bc02fb009b0eaea173c890e727bb54219037 6b17727c89bb6fe4737038ab35eef884442760a3a93adea506486a753f6bff48 1554795405 1287 5025 ba5a4fdbb20e7b9b70d9a9abd974bcab1065b1e81d711f80e06ad8cae30c4183 8fd9fb8389693db44ab145f4b10d65e4b180ed78894214820a9da1d1433fe212 1554795405 749 3880 fd458a424a3f3e0dadc95b806674b79055c24e73637e47ad5a6e57926aa1b9d1 9a94f6e50815385c9a46c29402df8116be16b6f8a30fc09e82cc0705298d3767 1554795405 687 1747 1e12239bf5cb07ec73c74482c35e80dabe30dbe2fdd57bd9e557d987cbacc8c2 7566198f37630c89ea115c282c93405fb1363ca6fdf2a8ece1aae266485b55dc 1554795405 3775 106496 10 4f4de7d3254a033b84626f330bc6adb8a3c1a4a20f0ddbe30a5692a041318c81 265e0354eba7f9b530dd39f3367d22bc0f09566956949ce8a72398541ba31930 1554795405 1519 28672 10 8b13cba732c1a02b841f43d6791ca68788d45f376787d9f3ccf68e75f01af499 580857174c525b3b80af5a33ae194b88c33194206769bc286d35a02d4f6ab374 1554795405 1301 24576 10 e9e6ca4765de75cc3b2bf05e6cf631703c6557edd642300748d7747000547365 6b17727c89bb6fe4737038ab35eef884442760a3a93adea506486a753f6bff48 5809d2657156017d46bd3c3acbf06f78947f0fc516652251b5ca7f7b459d9737 1554795405 1482 5025 134 3d6eaa7c77ef92586470dd6a542478e42cc421a85f12e0db93aa783077704cd0 8fd9fb8389693db44ab145f4b10d65e4b180ed78894214820a9da1d1433fe212 4178f7623da3e57ca1edf873ca6bd41f82a10afbc10087d062ec328482eb1096 1554795405 932 3880 133 c5582e660ed9a2d3c10ba44f0aeb02f2bb70e85dc3c8cda4266183d4e5235aa7 9a94f6e50815385c9a46c29402df8116be16b6f8a30fc09e82cc0705298d3767 c49fa7ec9ada1b3916bd272bfa3e4e1c847f2dab0bd82bccc504e5ab40518ac8 1554795405 850 1747 133 04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f 1554795405 2366 f9d860ddcb64fbdc88a9b71a14ddb9f5670968d5dd3430412565c13d42b6804d 1554795405 748 2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d 04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f c2cedd24f17c1e0d3eff844e94725bd1d1dcb51cce9ef421d0a1cf4c7826c2b1 1554795405 864 2366 115 2bbdf70c4394e71c2d3905c143d460009d04359de5a90b72b47cdb9dbdcc079d 04460bfaf6cb5af6b0925d8c99401a44e5192d287796aed4cced5f7ce881761f c2cedd24f17c1e0d3eff844e94725bd1d1dcb51cce9ef421d0a1cf4c7826c2b1 1554795405 864 2366 115 4fbad65c641f4f8fb3cec9b1672fcec2357443e1ea6e93541a0bb559c7dc9238 1554795405 88281 cb0f4b5df8268f248158e50d66ee1565591bca23ee2dbd84ae9c457962fa3122 280f628a881d302ba62856fb08ac3894c16cb1b9fa6d0b0aa2cd2b561db0b18e f5528bdad44c630b742069a05a270cd5bf320810e20861e9ec5723e85ddf5c5e 1554795405 86680 511344 328 88514679cb03d8f51e850ad3639c089f899e83407a2380ef9e62873a8eb1db13 1554795405 533 0219a2f1f9f32af6b7873905269ac1bc27b03e0caf3968c929a49e5a939e8935 88514679cb03d8f51e850ad3639c089f899e83407a2380ef9e62873a8eb1db13 648324a2d3e603bf1bc134d6ee8c1a66cb0acb7fe0f96bb2a6fcdfe509406481 1554795405 656 533 115 createrepo_c-0.17.0/tests/testdata/repodata_snippets/000077500000000000000000000000001400672373200227355ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/repodata_snippets/filelists_snippet_01.xml000066400000000000000000000004071400672373200275200ustar00rootroot00000000000000 /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/repodata_snippets/filelists_snippet_02.xml000066400000000000000000000007271400672373200275260ustar00rootroot00000000000000 /usr/bin/fake_bash /usr/bin/super_kernel /usr/share/man/super_kernel.8.gz createrepo_c-0.17.0/tests/testdata/repodata_snippets/other_snippet_01.xml000066400000000000000000000006301400672373200266410ustar00rootroot00000000000000 - First release - Second release createrepo_c-0.17.0/tests/testdata/repodata_snippets/other_snippet_02.xml000066400000000000000000000012711400672373200266440ustar00rootroot00000000000000 - First release - First release - Second release createrepo_c-0.17.0/tests/testdata/repodata_snippets/primary_snippet_01.xml000066400000000000000000000035761400672373200272170ustar00rootroot00000000000000 super_kernel x86_64 152824bff2aa6d54f429d43e87a3ff3a0286505c6d93ec87692b5e3a9e3b97bf

Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is
createrepo_c-0.17.0/tests/testdata/repodata_snippets/primary_snippet_02.xml000066400000000000000000000060151400672373200272070ustar00rootroot00000000000000 fake_bash x86_64 90f61e546938a11449b710160ad294618a5bd3062e46f8cf851fd0088af184b7 Fake bash Fake bash package http://fake_bash_shell.com/ super_kernel x86_64 6d43a638af70ef899933b1fd86a866f18f65b0e0e17dcbf2e42bfd0cdd7c63c3 Test package This package has provides, requires, obsoletes, conflicts options. http://so_super_kernel.com/it_is_awesome/yep_it_really_is createrepo_c-0.17.0/tests/testdata/specs/000077500000000000000000000000001400672373200203265ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/specs/build.sh000077500000000000000000000001031400672373200217560ustar00rootroot00000000000000#!/bin/sh for spec in fake-*.spec; do rpmbuild -ba $spec done createrepo_c-0.17.0/tests/testdata/specs/fake-Archer.spec000066400000000000000000000025311400672373200233130ustar00rootroot00000000000000Name: Archer Epoch: 2 Version: 3.4.5 Release: 6 License: GPL Summary: Complex package. Group: Development/Tools Url: http://soo_complex_package.eu/ Packager: Sterling Archer Vendor: ISIS #Source: %{name}-%{version}.tar.gz Requires: fooa <= 2 Requires: foob >= 1.0.0-1 Requires: fooc = 3 Requires: food < 4 Requires: fooe > 5 Requires(pre): foof = 6 Provides: bara <= 22 Provides: barb >= 11.22.33-44 Provides: barc = 33 Provides: bard < 44 Provides: bare > 55 Obsoletes: aaa <= 222 Obsoletes: aab >= 111.2.3-4 Obsoletes: aac = 333 Obsoletes: aad < 444 Obsoletes: aae > 555 Conflicts: bba <= 2222 Conflicts: bbb >= 1111.2222.3333-4444 Conflicts: bbc = 3333 Conflicts: bbd < 4444 Conflicts: bbe > 5555 %description Archer package #%prep #%setup -q %build touch README echo OK %install rm -rf $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/usr/bin/ mkdir -p $RPM_BUILD_ROOT/usr/share/doc/%{name}-%{version}/ touch $RPM_BUILD_ROOT/usr/bin/complex_a %clean rm -rf $RPM_BUILD_ROOT %files %{_bindir}/complex_a %doc README %changelog * Mon Apr 8 2013 Tomas Mlcoch - 3.3.3-3 - 3. changelog. * Wed Apr 18 2012 Tomas Mlcoch - 2.2.2-2 - That was totally ninja! * Tue Apr 17 2012 Tomas Mlcoch - 1.1.1-1 - First changelog. createrepo_c-0.17.0/tests/testdata/specs/fake-Rimmer.spec000066400000000000000000000017771400672373200233550ustar00rootroot00000000000000Name: Rimmer Version: 1.0.2 Release: 2 License: GPL Summary: Package with weak deps. Group: Development/Tools Url: http://pkgwithweakdeps.eu/ Packager: Arnold Rimmer Requires: req <= 1 Requires(pre): reqpre = 2 Provides: pro <= 3 Obsoletes: obs <= 4 Conflicts: con <= 5 Suggests: sug > 6 Enhances: enh < 7 Recommends: rec = 8 Supplements: sup > 9 %description Package with weak deps. #%prep #%setup -q %build touch README echo OK %install rm -rf $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/usr/bin/ mkdir -p $RPM_BUILD_ROOT/usr/share/doc/%{name}-%{version}/ touch $RPM_BUILD_ROOT/usr/bin/complex_a %clean rm -rf $RPM_BUILD_ROOT %files %{_bindir}/complex_a %doc README %changelog * Wed Apr 18 2012 Tomas Mlcoch - 2.2.2-2 - Look, we've all got something to contribute to this discussion. And I think what you should contribute from now on is silence. * Tue Apr 17 2012 Tomas Mlcoch - 1.1.1-1 - First changelog. createrepo_c-0.17.0/tests/testdata/specs/fake-balicek-iso88591.spec000066400000000000000000000010731400672373200247100ustar00rootroot00000000000000Name: balicek-iso88591 Version: 1.1.1 Release: 1 License: GPL Summary: Balicek s "" Group: System Environment/Shells Url: http://fake_bash_shell.com/ #Source: %{name}-%{version}.tar.gz Provides: Balicek Requires: %description divny o: "" divny u: #%prep #%setup -q %build echo OK %install rm -rf $RPM_BUILD_ROOT mkdir $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %changelog * Tue Apr 17 2012 Toms Mlcoch - 1.1.1-1 - Nejak comment "" createrepo_c-0.17.0/tests/testdata/specs/fake-balicek-iso88592.spec000066400000000000000000000011221400672373200247040ustar00rootroot00000000000000Name: balicek-iso88592 Version: 1.1.1 Release: 1 License: GPL Summary: Balek s "" Group: System Environment/Shells Url: http://fake_bash_shell.com/ #Source: %{name}-%{version}.tar.gz Provides: Balek Requires: blk %description Bl k pl dy n n. "" #%prep #%setup -q %build echo OK %install rm -rf $RPM_BUILD_ROOT mkdir $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %changelog * Tue Apr 17 2012 Tom Mloch - 1.1.1-1 - Njak comment "خ" createrepo_c-0.17.0/tests/testdata/specs/fake-balicek-utf8.spec000066400000000000000000000012151400672373200243630ustar00rootroot00000000000000Name: balicek-utf8 Version: 1.1.1 Release: 1 License: GPL Summary: Balíček s "ěščřžýáíéů" Group: System Environment/Shells Url: http://fake_bash_shell.com/ #Source: %{name}-%{version}.tar.gz Provides: Balíček Requires: bílýkůň %description Bílý kůň pěl ódy ná ná. "ěščřžýáíéů" #%prep #%setup -q %build echo OK %install rm -rf $RPM_BUILD_ROOT mkdir $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %changelog * Tue Apr 17 2012 Tomáš Mlčoch - 1.1.1-1 - Nějaký comment "ěščřžýáíéůúÁŠČŘŽÝÁÍÉŮÚ" createrepo_c-0.17.0/tests/testdata/specs/fake-empty.spec000066400000000000000000000002601400672373200232420ustar00rootroot00000000000000Name: empty Version: 0 License: LGPL Release: 0 Summary: "" #BuildRequires: %description %build %install %clean %files %changelog createrepo_c-0.17.0/tests/testdata/specs/fake-fake_bash.spec000066400000000000000000000011701400672373200240100ustar00rootroot00000000000000Name: fake_bash Version: 1.1.1 Release: 1 License: GPL Summary: Fake bash Group: System Environment/Shells Url: http://fake_bash_shell.com/ #Source: %{name}-%{version}.tar.gz Requires: super_kernel Provides: bash %description Fake bash package #%prep #%setup -q %build echo OK %install rm -rf $RPM_BUILD_ROOT mkdir $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/usr/bin/ touch $RPM_BUILD_ROOT/usr/bin/fake_bash %clean rm -rf $RPM_BUILD_ROOT %files %{_bindir}/fake_bash %changelog * Tue Apr 17 2012 Tomas Mlcoch - 1.1.1-1 - First release createrepo_c-0.17.0/tests/testdata/specs/fake-super_kernel.spec000066400000000000000000000023241400672373200246050ustar00rootroot00000000000000Name: super_kernel Version: 6.0.1 Release: 2 License: LGPLv2 Summary: Test package Group: Applications/System Url: http://so_super_kernel.com/it_is_awesome/yep_it_really_is #Source: %{name}-%{version}.tar.gz BuildRequires: glib2-devel >= 2.26.0 BuildRequires: file-devel Requires: glib >= 2.26.0 Requires: zlib PreReq: bzip2 >= 1.0.0 PreReq: expat Provides: super_kernel == 6.0.0 Provides: not_so_super_kernel < 5.8.0 Conflicts: kernel Conflicts: super_kernel == 5.0.0 Conflicts: super_kernel < 4.0.0 Obsoletes: super_kernel == 5.9.0 Obsoletes: kernel %description This package has provides, requires, obsoletes, conflicts options. #%prep #%setup -q %build echo OK %install rm -rf $RPM_BUILD_ROOT mkdir $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/usr/share/man/ mkdir -p $RPM_BUILD_ROOT/usr/bin/ touch $RPM_BUILD_ROOT/usr/share/man/super_kernel.8.gz touch $RPM_BUILD_ROOT/usr/bin/super_kernel %clean rm -rf $RPM_BUILD_ROOT %files %doc %_mandir/super_kernel.8.gz %{_bindir}/super_kernel %changelog * Tue Apr 17 2012 Tomas Mlcoch - 6.0.1-2 - Second release * Tue Apr 17 2012 Tomas Mlcoch - 6.0.1-1 - First release createrepo_c-0.17.0/tests/testdata/test_files/000077500000000000000000000000001400672373200213525ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/test_files/binary_file000066400000000000000000000027631400672373200235700ustar00rootroot00000000000000PNG  IHDRJ.[sRGBbKGD pHYs  tIME 0:4AtEXtCommentCreated with GIMPWNIDATx1EREpɍ H,Rr`G> Dggzr{jv$Г%3~+5ܰ`! `! `! OsVwiQZKY?+~ P2Չ.'=xOI_ܟ&yoXHlܟ-r_K/kfI>tȶRzj4r}i,}~ I~Or1munC/CRFV/5ϯ-<t?UXz=6j[xI{7C$瓳# u_#"gI$w]ɞCl}I`?k^yC C.3%u0dHqe7dI2d$7!ۦFr汷g>o͝zH![Fr7d BzIn|r6ҭk$nOq;HY}$u>?А.~uv.KV5|{~:N|Ft;~noqE}ސ0OZ;5.O8=Jg9O}\xzV)^:Nɜlf(6t?•BܷR:yXS`EO2=&V)b?} lnUI Rԡus ɣZT^q,9+joK8'uUkV}]&$Or}8h*KZǩ?ԇ^^(1r/p)TERk2 N9VKClfK^ݕl–M )'3f='3)PvB· Bk-g`jᩬcbY;1/ZE]r*&,A} _kT{𹨰0zH޲G4ѴY!dw H #N֒6^ED` (x5o5IьZHyzs۱D̈́r[%VwvŐ2.=>nI^ѧݻ?I_GMG(#P^<}nPcreaterepo_c-0.17.0/tests/testdata/test_files/text_file.xz000066400000000000000000000011641400672373200237220ustar00rootroot000000000000007zXZִF!t/3]&FgZw}A5̓|ې/qr}Vqj"=U̥vktj|ƾ1V.wpĚ8/@QГ釵/)*W<ݖ 6\ĿƑ-T'!h;Ne0u!Wի")b!t#6Ex劣B4b?L'(Ȩђ,|>)c܎W@ Ob#G?2#&Of˨+5F0zd[SN/̢?|-Lq]r3,y2He\#s@[d^@YΠ%Bb ߑMY>ѹZ}ƛ ޱgYZcreaterepo_c-0.17.0/tests/testdata/updateinfo_files/000077500000000000000000000000001400672373200225315ustar00rootroot00000000000000createrepo_c-0.17.0/tests/testdata/updateinfo_files/updateinfo_00.xml000066400000000000000000000000741400672373200257110ustar00rootroot00000000000000 createrepo_c-0.17.0/tests/testdata/updateinfo_files/updateinfo_01.xml000066400000000000000000000023061400672373200257120ustar00rootroot00000000000000 foobarupdate_1 title_1 rights_1 release_1 pushcount_1 severity_1 summary_1 description_1 solution_1 True Foo component bar-2.0.1-3.noarch.rpm 29be985e1f652cd0a29ceed6a1c49964d3618bddd22f0be3292421c8777d26c8 createrepo_c-0.17.0/tests/testdata/updateinfo_files/updateinfo_02.xml.xz000066400000000000000000000002741400672373200263550ustar00rootroot000000000000007zXZִF!t/|]ˇf{3(1v%f M* mj)p80V3.h1Fyg[ |[0Os-TUF[sNE*{aiamDNi oBzmgYZcreaterepo_c-0.17.0/tests/testdata/updateinfo_files/updateinfo_03.xml000066400000000000000000000113051400672373200257130ustar00rootroot00000000000000 RHEA-2012:0055 Sea_Erratum 1 Sea_Erratum 1 walrus-5.21-1.noarch.rpm penguin-0.9.1-1.noarch.rpm shark-0.1-1.noarch.rpm RHEA-2012:0056 Bird_Erratum 1 ParthaBird_Erratum 1 crow-0.8-1.noarch.rpm stork-0.12-2.noarch.rpm duck-0.6-1.noarch.rpm RHEA-2012:0057 Bear_ErratumPARTHA 1 Bear_Erratum 1 bear-4.1-1.noarch.rpm RHEA-2012:0058 Gorilla_Erratum 1 Gorilla_Erratum True 1 gorilla-0.62-1.noarch.rpm RHEA-2012:0059 Duck_Kangaroo_Erratum 1 Duck_Kangaro_Erratum description coll_name1 kangaroo-0.3-1.noarch.rpm coll_name2 duck-0.7-1.noarch.rpm RHEA-2012:0060 Duck_0.8_Erratum 1 Duck_0.8_Erratum description coll_name duck-0.8-1.noarch.rpm createrepo_c-0.17.0/utils/000077500000000000000000000000001400672373200153765ustar00rootroot00000000000000createrepo_c-0.17.0/utils/cleanup.sh000077500000000000000000000002471400672373200173670ustar00rootroot00000000000000#!/bin/bash rm -fv createrepo_c-*.tar.xz rm -fv createrepo_c-*.rpm python-createrepo_c-*.rpm python3-createrepo_c-*.rpm rm -fv deltarepo-*.rpm python-deltarepo-*.rpm createrepo_c-0.17.0/utils/gen_manpage.sh000077500000000000000000000012641400672373200202010ustar00rootroot00000000000000#!/bin/bash # /usr/share/man/man8/createrepo_c.8 EXPECTED_ARGS=5 if [ $# -ne $EXPECTED_ARGS ] then echo "Usage: `basename $0` " echo echo "Example: `basename $0` src/cmd_parser.c src/mergerepo_c.c src/modifyrepo_c.c src/sqliterepo_c.c doc/" exit 1 fi MY_DIR=`dirname $0` MY_DIR="$MY_DIR/" python3 $MY_DIR/gen_rst.py $1 | rst2man > $5/createrepo_c.8 python3 $MY_DIR/gen_rst.py $2 --mergerepo | rst2man > $5/mergerepo_c.8 python3 $MY_DIR/gen_rst.py $3 --modifyrepo | rst2man > $5/modifyrepo_c.8 python3 $MY_DIR/gen_rst.py $4 --sqliterepo | rst2man > $5/sqliterepo_c.8 createrepo_c-0.17.0/utils/gen_rst.py000077500000000000000000000173571400672373200174310ustar00rootroot00000000000000#!/usr/bin/env python3 import sys import re import datetime from optparse import OptionParser class Info(object): def __init__(self, name, summary=None, description=None, synopsis=None, copyright=None, options=None): self.name = name self.summary = summary self.description = description self.synopsis = synopsis self.copyright = copyright self.options = options # list of dictionaries with keys ["long_name", "short_name", "description", "arg_description"] def gen_rst(self): rst = ".. -*- coding: utf-8 -*-\n\n" rst += "%s\n" % ("=" * len(self.name),) rst += "%s\n" % self.name rst += "%s\n\n" % ("=" * len(self.name),) # Add summary if self.summary: rst += "%s\n" % ("-" * len(self.summary)) rst += "%s\n" % self.summary rst += "%s\n" % ("-" * len(self.summary)) rst += "\n" # Add copyright if self.copyright: rst += ":Copyright: %s\n" % self.copyright # Add manual page section rst += ":Manual section: 8\n" # Add date rst += ":Date: $Date: %s $\n\n" % datetime.datetime.strftime(datetime.datetime.utcnow(), format="%F %X") # Add synopsis if self.synopsis: rst += "SYNOPSIS\n" rst += "========\n\n" for line in self.synopsis: rst += "%s\n\n" % line rst += "\n" # Add description if self.description: rst += "DESCRIPTION\n" rst += "===========\n\n" for line in self.description: rst += "%s\n\n" % line rst += "\n" # Add options rst += "OPTIONS\n" rst += "=======\n" for command in self.options: cmd = "" if command["short_name"]: cmd += "-%s " % command["short_name"] cmd += "--%s" % command["long_name"] if command["arg_description"]: cmd += " %s" % command["arg_description"] rst += "%s\n" % cmd rst += "%s\n" % ("-" * len(cmd)) rst += "%s\n" % command["description"] rst += "\n" return rst def parse_arguments_from_c_file(filename): args = [] try: content = open(filename, "r").read() except IOError: print("Error: Cannot open file %s" % filename) return args re_cmd_entries = re.compile(r"\s*(static|const)[ ]+GOptionEntry[^{]*{(?P.*)\s*NULL\s*}[,]?\s*};", re.MULTILINE|re.DOTALL) match = re_cmd_entries.search(content) if not match: print("Warning: Cannot find GOptionEntry section in %s" % filename) return args re_single_entry = re.compile(r"""{\s*"(?P[^"]*)"\s*, # long name \s*'?(?P[^',]*)'?\s*, # short name \s*[^,]*\s*, # flags \s*[^,]*\s*, # arg type \s*[^,]*\s*, # arg data pointer \s*("(?P.*?)")\s*, # description \s*"?(?P[^"}]*)"? # arg description \s*},""", re.MULTILINE|re.DOTALL|re.VERBOSE) raw_entries_str = match.group("entries") start = 0 entry_match = re_single_entry.search(raw_entries_str) i = 1 while entry_match: long_name = entry_match.group("long_name").strip() short_name = entry_match.group("short_name").strip() description = entry_match.group("description").strip() arg_description = entry_match.group("arg_description").strip() # Normalize short name if short_name in ("NULL", "0", "\\0"): short_name = None # Normalize description description = description.replace('\\"', "\\\\'") description = description.replace("\\\\'", '"') description = description.replace('"', "") description = description.replace('\n', '') description = description.replace('\t', '') # Remove multiple whitespaces from description while True: new_description = description.replace(" ", " ") if new_description == description: break description = new_description description = description.strip() # Normalize arg_description if arg_description and (arg_description == "NULL" or arg_description == "0"): arg_description = None # Store option into list args.append({'long_name': long_name, 'short_name': short_name, 'description': description, 'arg_description': arg_description }) # Continue to next option i += 1 start += entry_match.end(0) entry_match = re_single_entry.search(raw_entries_str[start:]) # End while print("Loaded %2d arguments" % (i,), file=sys.stderr) return args if __name__ == "__main__": parser = OptionParser('usage: %prog [options] [--mergerepo|--modifyrepo|--sqliterepo]') parser.add_option('-m', '--mergerepo', action="store_true", help="Gen rst for mergerepo") parser.add_option('-r', '--modifyrepo', action="store_true", help="Gen rst for modifyrepo") parser.add_option('-s', '--sqliterepo', action="store_true", help="Gen rst for sqliterepo") options, args = parser.parse_args() if len(args) < 1: print("Error: Must specify a input filename. (Example: ../src/cmd_parser.c)", file=sys.stderr) sys.exit(1) args = parse_arguments_from_c_file(args[0]) if options.mergerepo: NAME = "mergerepo_c" info = Info(NAME, summary="Merge multiple rpm-md format repositories together", synopsis=["%s [options] --repo repo1 --repo repo2" % (NAME,)], options=args) elif options.modifyrepo: NAME = "modifyrepo_c" info = Info(NAME, summary="Modify a repomd.xml of rpm-md format repository", synopsis=["%s [options] " % (NAME,), "%s --remove " % (NAME,), "%s [options] --batchfile " % (NAME,) ], options=args) elif options.sqliterepo: NAME = "sqliterepo_c" info = Info(NAME, summary="Generate sqlite db files for a repository in rpm-md format", synopsis=["%s [options] " % (NAME,) ], options=args) else: NAME = "createrepo_c" info = Info(NAME, summary="Create rpm-md format (xml-rpm-metadata) repository", synopsis=["%s [options] " % (NAME,)], description=["Uses rpm packages from to create repodata.", "If compiled with libmodulemd support modular metadata inside identified by the patterns below are automatically collected, merged and added to the repodata.", "The patterns are:", " - \*.modulemd.yaml (recommended file name: N:S:V:C:A.modulemd.yaml)", " - \*.modulemd-defaults.yaml (recommended file name: N.modulemd-defaults.yaml)", " - modules.yaml (recommended way of importing multiple documents at once)"], options=args) ret = info.gen_rst() if not ret: print("Error: Rst has not been generated", file=sys.stderr) sys.exit(1) print(ret) createrepo_c-0.17.0/utils/get_version.py000077500000000000000000000030021400672373200202720ustar00rootroot00000000000000#!/usr/bin/env python3 import re import sys import os.path from optparse import OptionParser VERSION_FILE_PATH = "VERSION.cmake" def parse(root_dir): path = os.path.join(root_dir, VERSION_FILE_PATH) if not os.path.exists(path): print("File {path} doesn't exist".format(path=path)) return None content = open(path, "r").read() ver = {} ver['major'] = re.search(r'SET\s*\(CR_MAJOR\s+"(\d+)"', content).group(1) ver['minor'] = re.search(r'SET\s*\(CR_MINOR\s+"(\d+)"', content).group(1) ver['patch'] = re.search(r'SET\s*\(CR_PATCH\s+"(\d+)"', content).group(1) return ver if __name__ == "__main__": parser = OptionParser("usage: %prog [--major|--minor|--patch]") parser.add_option("--major", action="store_true", help="Return major version") parser.add_option("--minor", action="store_true", help="Return minor version") parser.add_option("--patch", action="store_true", help="Return patch version") options, args = parser.parse_args() if len(args) != 1: parser.error("Must specify a project root directory") path = args[0] if not os.path.isdir(path): parser.error("Directory {path} doesn't exist".format(path=path)) ver = parse(path) if ver is None: sys.exit(1) if options.major: print(ver['major']) elif options.minor: print(ver['minor']) elif options.patch: print(ver['patch']) else: print("{major}.{minor}.{patch}".format(**ver)) sys.exit(0) createrepo_c-0.17.0/utils/make_rpm.sh000077500000000000000000000040331400672373200175300ustar00rootroot00000000000000#!/bin/bash PACKAGE="createrepo_c" RPMBUILD_DIR="${HOME}/rpmbuild/" BUILD_DIR="$RPMBUILD_DIR/BUILD" GITREV=`git rev-parse --short HEAD` PREFIX="" # Root project dir MY_DIR=`dirname "$0"` if [ $# -lt "1" -o $# -gt "2" ] then echo "Usage: `basename "$0"` [revision]" exit 1 fi PREFIX="$1/" if [ ! -d "$RPMBUILD_DIR" ]; then echo "rpmbuild dir $RPMBUILD_DIR doesn't exist!" echo "init rpmbuild dir with command: rpmdev-setuptree" echo "(Hint: Package group @development-tools and package fedora-packager)" exit 1 fi echo "Generating rpm for $GITREV" echo "Cleaning $BUILD_DIR" rm -rf "$BUILD_DIR" echo "Removing $RPMBUILD_DIR/SPECS/$PACKAGE.spec" rm -f "$RPMBUILD_DIR/SPECS/$PACKAGE.spec" echo "> Making tarball .." "$MY_DIR/make_tarball.sh" "$GITREV" if [ ! $? == "0" ]; then echo "Error while making tarball" exit 1 fi echo "Tarball done" echo "> Copying tarball and .spec file into the $RPMBUILD_DIR .." cp "$PREFIX/$PACKAGE-$GITREV.tar.xz" "$RPMBUILD_DIR/SOURCES/" if [ ! $? == "0" ]; then echo "Error while: cp $PREFIX/$PACKAGE-$GITREV.tar.xz $RPMBUILD_DIR/SOURCES/" exit 1 fi # Copy via sed sed -i "s/%global gitrev .*/%global gitrev $GITREV/g" "$PREFIX/$PACKAGE.spec" sed "s/%global gitrev .*/%global gitrev $GITREV/g" "$PREFIX/$PACKAGE.spec" > "$RPMBUILD_DIR/SPECS/$PACKAGE.spec" if [ ! $? == "0" ]; then echo "Error while: cp $PREFIX/$PACKAGE.spec $RPMBUILD_DIR/SPECS/" exit 1 fi echo "Copying done" echo "> Starting rpmbuild $PACKAGE.." rpmbuild -ba "$RPMBUILD_DIR/SPECS/$PACKAGE.spec" if [ ! $? == "0" ]; then echo "Error while: rpmbuild -ba $RPMBUILD_DIR/SPECS/$PACKAGE.spec" exit 1 fi echo "rpmbuild done" echo "> Cleanup .." rpmbuild --clean "$RPMBUILD_DIR/SPECS/$PACKAGE.spec" echo "Cleanup done" echo "> Moving rpms and srpm .." mv --verbose "$RPMBUILD_DIR"/SRPMS/"$PACKAGE"-*.src.rpm "$PREFIX/." mv --verbose "$RPMBUILD_DIR"/RPMS/*/"$PACKAGE"-*.rpm "$PREFIX/." mv --verbose "$RPMBUILD_DIR"/RPMS/*/python*-"$PACKAGE"-*.rpm "$PREFIX/." echo "Moving done" echo "All done!" createrepo_c-0.17.0/utils/make_tarball.sh000077500000000000000000000004221400672373200203510ustar00rootroot00000000000000PACKAGE="createrepo_c" TARGET_DIR="./" if [ "$#" -eq "0" ]; then GITREV=`git rev-parse --short HEAD` else GITREV="$1" fi echo "Generate tarball for revision: $GITREV" git archive "${GITREV}" --prefix="$PACKAGE"/ | xz > "$TARGET_DIR"/"$PACKAGE"-"${GITREV}".tar.xz createrepo_c-0.17.0/utils/setup_for_python_metadata.py000066400000000000000000000020721400672373200232200ustar00rootroot00000000000000from distutils.core import setup import sys # This is a simple and fragile way of passing the current version # from cmake to setup as I assume no one else will use this. # # This script has to have the version always specified as last argument. version = sys.argv.pop() setup( name='createrepo_c', description='C implementation of createrepo', version=version, license='GPLv2+', author='RPM Software Management', author_email='rpm-ecosystem@lists.rpm.org', url='https://github.com/rpm-software-management', classifiers=[ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Operating System :: POSIX :: Linux', 'Programming Language :: C', 'Topic :: System :: Software Distribution', 'Topic :: System :: Systems Administration', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], ) createrepo_c-0.17.0/utils/single_test.py000077500000000000000000000035471400672373200203040ustar00rootroot00000000000000#!/usr/bin/env python3 """ Convert a single line from test output to run single test command. E.g: "2: test_download_package_via_metalink (tests.test_yum_package_downloading.TestCaseYumPackageDownloading) ... ok" To: tests/python/tests/test_yum_repo_downloading.py:TestCaseYumRepoDownloading.test_download_and_update_repo_01 """ import os import sys import argparse LIBPATH = "./build/src/python/" COMMAND = "PYTHONPATH=`readlink -f {libpath}` nosetests -s -v {testpath}" TEST_PATH_PREFIX = "tests/python" if __name__ == "__main__": parser = argparse.ArgumentParser(description='Convert a single line from '\ 'python unittesttest output to run command for this single test') parser.add_argument('test_out_line', metavar='TESTOUTPUTLINE', type=str, help='A single line from python unittesttest output') args = parser.parse_args() test_out_line = args.test_out_line # Remove suffix "... ok" or "... FAIL" test_out_line = test_out_line.split(" ... ")[0] # Remove prefix "test_number: " res = test_out_line = test_out_line.split(": ") test_out_line = res[-1] # Get test name res = test_out_line.split(" ") if len(res) != 2: print("Bad input line format") sys.exit(1) test_name, test_out_line = res # Get test case test_out_line = test_out_line.strip().lstrip("(").rstrip(")") res = test_out_line.rsplit(".", 1) if len(res) != 2: print("Bad input line format") sys.exit(1) test_out_line, test_case = res # Get path test_path = test_out_line.replace(".", "/") + ".py" full_path = os.path.join(TEST_PATH_PREFIX, test_path) testpath = "{0}:{1}.{2}".format(full_path, test_case, test_name) libpath = LIBPATH command = COMMAND.format(libpath=libpath, testpath=testpath) print(command) createrepo_c-0.17.0/utils/speed_test.sh000077500000000000000000000065351400672373200201050ustar00rootroot00000000000000#!/bin/bash # Global variables REPO="" # Path to repo CLEAR_CACHE=true # Clear cache? # Param check if [ $# -lt "1" -o $# -gt "2" ]; then echo "Usage: `basename $0` [--cache]" exit 1 fi if [ $1 == "-h" ]; then echo "Tool for comparsion of speed between createrepo and createrepo_c." echo "WARNING! This tool changes (removes) repodata if exits!" echo "Usage: `basename $0` [--cache]" echo "Options:" echo " --cache Skip cleaning of disk cache" exit 0 fi if [ $# -eq "2" ]; then if [ $2 != "--cache" ]; then echo "Unknown param $2" exit 1 else CLEAR_CACHE=false fi fi if $CLEAR_CACHE; then if [ `id --user` != "0" ]; then echo "Note:" echo "You are not root!" echo "For cleaning disk caches you have to have a root permissions." echo "You will be asked for sudo password." echo "(Maybe even several times)" sudo bash -c "echo \"OK\"" echo fi fi REPO=$1 if [ ! -d "$REPO" ]; then echo "Directory $REPO doesn't exists" exit 1 fi # Main function clear_cache { # Clear cache if CLEAR_CACHE is true if ! $CLEAR_CACHE; then return fi if [ `id --user` != "0" ]; then sudo bash -c "echo 3 > /proc/sys/vm/drop_caches" else echo 3 > /proc/sys/vm/drop_caches fi } function run { # Run - entire metadata from scratch rm -rf "$REPO"/.repodata # Just in case previous run of createrepo_c failed rm -rf "$REPO"/repodata echo -e "\n\$ createrepo_c $1 $REPO" clear_cache (time createrepo_c $1 "$REPO") 2>&1 rm -rf "$REPO"/repodata echo -e "\n\$ createrepo $1 $REPO" clear_cache (time createrepo $1 "$REPO") 2>&1 echo } function dirty_run { # Run - repodata already exists in place rm -rf "$REPO"/.repodata # Just in case previous run of createrepo_c failed # Prepare metadata rm -rf "$REPO"/repodata createrepo --quiet --database "$REPO" > /dev/null echo -e "\n\$ createrepo_c $1 $REPO" clear_cache (time createrepo_c $1 "$REPO") 2>&1 # Prepare metadata rm -rf "$REPO"/repodata createrepo --quiet --database "$REPO" > /dev/null echo -e "\n\$ createrepo $1 $REPO" clear_cache (time createrepo $1 "$REPO") 2>&1 echo } echo "Test setup" echo "+---------------------------------------------------------------+" echo "System:" uname --operating-system --kernel-release if [ -e /etc/issue ]; then head -q -n 1 /etc/issue fi grep "model name" /proc/cpuinfo uname --processor grep "MemTotal" /proc/meminfo echo echo "Package versions:" rpm -qa|grep createrepo echo echo "Test repo:" echo "$REPO" echo echo "Case-1: generating entire metadata from scratch" echo "+---------------------------------------------------------------+" echo "+ With sqlite DB" echo "+----------------------+" run "--database" echo "+ Without sqlite DB" echo "+----------------------+" run "--no-database" echo "Case-2: re-generating metadata (with existing repodata in place)" echo "+---------------------------------------------------------------+" echo "+ With sqlite DB" echo "+----------------------+" dirty_run "--update --database" echo "+ Without sqlite DB" echo "+----------------------+" dirty_run "--update --no-database" # Final clean up rm -rf "$REPO"/repodata rm -rf "$REPO"/.repodata