pax_global_header00006660000000000000000000000064133236063400014512gustar00rootroot0000000000000052 comment=9a2aa73472c4cd4a26e0c0728faf38585d85bf5b leatherman-1.4.2+dfsg/000075500000000000000000000000001332360634000145535ustar00rootroot00000000000000leatherman-1.4.2+dfsg/.gitignore000064400000000000000000000003661332360634000165500ustar00rootroot00000000000000# Misc files *.swp .DS_Store Gemfile.local Gemfile.lock /nbproject compile_commands.json # Generated files /debug*/ /release*/ /.idea/ /*build/ /*/tests/fixtures.hpp /curl/inc/leatherman/curl/export.h /curl/tests/export.h cmake/leatherman.cmake leatherman-1.4.2+dfsg/.travis.yml000064400000000000000000000026111332360634000166640ustar00rootroot00000000000000sudo: required services: - docker before_install: - docker pull gcr.io/cpp-projects/cpp-ci:1 script: - > docker run -v `pwd`:/leatherman gcr.io/cpp-projects/cpp-ci:1 /bin/bash -c " cd /leatherman && rm locales/leatherman.pot && cmake $EXTRA_VARS . && mkdir dest && make $TARGET DESTDIR=/leatherman/dest -j2 && { [ '$COVERALLS' == 'ON' ] && coveralls --gcov-options '\-lp' -r . -b . -e src -e vendor >/dev/null || true; } " - if [ "$DO_RELEASE" == "true" ]; then tar czvf leatherman.tar.gz `find dest -type f -print`; fi env: matrix: - TARGET=cpplint - TARGET=cppcheck - TARGET="all test install ARGS=-v" DO_RELEASE=true EXTRA_VARS="-DBOOST_STATIC=ON" - TARGET="all test install ARGS=-v" EXTRA_VARS="-DCMAKE_BUILD_TYPE=Debug -DCOVERALLS=ON" COVERALLS=ON - TARGET="all test install ARGS=-v" EXTRA_VARS="-DLEATHERMAN_SHARED=ON" - TARGET="all test install ARGS=-v" EXTRA_VARS="-DLEATHERMAN_USE_LOCALES=OFF" - TARGET="all test install ARGS=-v" EXTRA_VARS="-DLEATHERMAN_GETTEXT=OFF" deploy: provider: releases api_key: secure: XARXGAo5DNbqu7/EVPlKocdAAdtVqui2yaJiqw8GVXMSsK5lxqkHNfm1UF204y9ONl7DTa1hzBS8VRLupfb2aIjIZWMM68tnWYbyJYNdRUevylPTK01rO9wpR8iVe7xFqQOlDXPrX0UVfKCvf+e1j+IleO5Eyjf1mTLIRR3fuOY= file: leatherman.tar.gz skip_cleanup: true on: repo: puppetlabs/leatherman tags: true condition: '"$DO_RELEASE" == "true"' leatherman-1.4.2+dfsg/CHANGELOG.md000064400000000000000000000323401332360634000163660ustar00rootroot00000000000000# Change Log All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). ## [1.4.0] ### Changed - Updated Catch library to 1.10.0. - Updated Boost.Nowide to ec9672b - Update Travis CI to use container-based builds ### Fixed - Builds with Xcode 9 - Allow Leatherman.execute calls to opt into allowing tasks to finish without reading stdin (i.e. don't fail when the pipe is closed) via the `allow_stdin_unread` option. This specifically supports pxp-agent's task execution where input may or may not be used. (LTH-149) ## [1.3.0] ### Added - A toPrettyJson routine to Leatherman.json\_container that pretty prints a valid JSON object. ## [1.2.2] ### Fixed - Allow Leatherman.execute calls to opt into allowing tasks to finish without reading stdin (i.e. don't fail when the pipe is closed) via the `allow_stdin_unread` option. This specifically supports pxp-agent's task execution where input may or may not be used. (LTH-149) ## [1.2.1] ### Fixed - Made Leatherman.curl's download_file response accessible, with results included when an error code is returned. ## [1.2.0] ### Added - A URI parsing utility in Leatherman.util (LTH-143) ### Changed - Refactored Leatherman.curl's http_file_operation_exception to inherit from http_request_exception so that the three possible failure modes are distinguished by class ## [1.1.2] ### Fixed - Separated the possible failure modes for Leatherman.curl into three categories (LTH-142) * Curl setup errors * File operation errors (e.g. writing to disk during file download) * Server side errors (e.g. bad host) - Simplified the logic of Leatherman.curl's download_file method by abstracting out actions associated with the temporary file (creating, writing, removing) to a class that follows the RAII pattern. (LTH-142) ## [1.1.1] ### Fixed - Generate build artifact with GCC 5.2.0 on Windows. ## [1.1.0] ### Added - Execution with file redirection and `atomic_write_to_file` can specify the permissions of those files. (LTH-139) - Leatherman.curl added a download_file function for doing streaming file downloads. (LTH-140) ### Fixed - Fix redundant newlines when using `execute` that redirects output to files when not using the `trim` option. This combination now also ensures empty lines are not skipped. A side effect is that when not using `trim`, empty lines may appear when iterating over lines of output via `each_line` as well. (LTH-138) ## [1.0.0] Final tag for Leatherman 1.0.0, containing the same change set as 0.99.0. ## [0.99.0] This is a pre-release version for Leatherman 1.0.0, containing backwards-incompatible API changes. ### Changed - Remove Ruby bindings for Fixnum and Bignum, replace with Integer for Ruby 2.4 support (LTH-124) ## [0.12.3] This is a maintenance release to re-sync the code version with the tag, in order to make our internal automation happy ## [0.12.2] ### Added - Leatherman can now be built with DEP on Windows ## [0.12.1] ### Fixed - Locale files are installed relative to the Leatherman install root, taking into account support for relocatable packages (LTH-135) ### Added - Ruby API binding for rb_ll2inum (missing from 0.12.0) ## [0.12.0] ### Added - Support for finding locale files with a relocatable package, particularly on Windows (LTH-133) ## [0.11.2] ### Added - Ruby API binding for rb_ll2inum ## [0.11.1] ### Fixed - Circumvent a bug in Boost.Log's severity logger that prevented logging on AIX (LTH-128) ## [0.11.0] ### Added - Add an option to use thread-safe forking at the expense of failing to fork if maxed out on memory; more permanent fix for the Solaris deadlock issue addressed in 0.10.2 (LTH-126) ## [0.10.2] ### Fixed - Avoid deadlock on Solaris using vfork/exec in a multithreaded process with Leatherman.execution (LTH-125) ## [0.10.1] ### Fixed - Only apply large file support flags to Leatherman.execution (LTH-120) ## [0.10.0] ### Added - Solaris implementation of the `create_detached_process` execution option - execution of child processes in separate contracts (LTH-120) ### Changed - Renamed `create_new_process_group` execution option to `create_detached_process` to make the concept more broadly applicable (LTH-120) ## [0.9.4] ### Fixed - Handle null characters in JSON strings (LTH-116) - Explicitly pass release flag to pod2man ## [0.9.3] ### Fixed - Fix Boost.Log sink initialization with Boost 1.62 (LTH-115) ### Changed - Switch to compile-time unpacking vendored packages (LTH-117) ## [0.9.2] ### Changed - leatherman::execution can now be requested to convert Windows line endings to standard ones (LTH-114) ## [0.9.1] ### Fixed - Externalized some strings for localization that were missed (LTH-59) - Fixed consuming Leatherman without cflags (LTH-113) - Updated the logging backend to filter records based on log level; previously filtering was only applied when using Leatherman logging functions. ### Changed - Third-party libraries are now added as compressed files ## [0.9.0] ### Added - Add translation helper functions and plural format support (LTH-109) ## [0.8.1] ### Fixed - Fix compilation with curl 7.50.0 ## [0.8.0] ### Added - Added protect version number to libraries ### Fixed - Fix Leatherman cleanup of Ruby objects (FACT-1454) - Add inherit_locale option to execute (LTH-107) ## [0.7.5] ### Added - Added `leatherman::windows::file_util::get_programdata_dir` to properly get the ProgramData directory on Windows. ### Fixed - Changed the windows logging namespace to logging.windows. ## [0.7.4] ### Fixed - Leatherman.Ruby compatibility with Ruby 2.3. ## [0.7.3] ### Fixed - Fixed compilation with LEATHERMAN_USE_LOCALES=OFF. - Remove line numbers from .pot files generated via gettext. ### Added - Added LEATHERMAN_GETTEXT option to disable use of gettext. ## [0.7.2] ### Fixed - Fallback to multi-threaded apartments for COM on Microsoft Nano Server. ### Added - Add `Util::Timer::elapsed_milliseconds`. - Add context and plural support in Leatherman.Locale `translate` (and new `translate_c`) methods. ## [0.7.1] ### Fixed - Binary compatibility with 0.6.x has been restored ## [0.7.0] ### Fixed - `symbol_exports` helper no longer applies its macros to all targets ### Added - (LTH-97) Applications can now disable locale support in logging ## [0.6.3] ### Fixed - (LTH-96) Translate log message without substitutions - (LTH-95) Fix unit tests using shared libraries on Windows - Minor updates to tests and documentation ## [0.6.2] - 2016-04-20 ### Fixed - Runtime shared library load errors on AIX. ## [0.6.1] - 2016-04-19 ### Fixed - Missing include header in leatherman/ruby/api.h, needed on Mac OS X ## [0.6.0] - 2016-04-19 ### Fixed - Outputting WMI errors when l10n is disabled - Leatherman will no longer use installed leatherman headers when building itself ### Added - Leatherman now builds on Windows Nano Server - Ruby binding for `rb_last_status_set`, needed for Facter's execution API - The `result` struct in `execution` now contains the PID of the executed processes ## [0.5.1] - 2016-04-18 0.5.0 was incorrectly tagged, causing Travis and Appveyor to skip creating build artifacts. ## [0.5.0] - 2016-04-18 ### Fixed - Static dependency libraries will no longer be linked to Leatherman consumers when using a shared leatherman library - Interfacing with ruby APIs for 64-bit integers on Windows (See Added and Removed below for details) ### Added - Ability to spawn child processes in a new group on Windows - Ruby bindings for `is_bignum` - Ruby `num2size_t` help for consistent access to array/string sizes ### Removed - Windows 2003 / XP Support. This allows us to better take advantage of modern Windows APIs and features - Bindings to ruby `rb_num2long` and `rb_num2ulong`, as they were inconsistent across platforms ## [0.4.2] - 2016-03-07 ### Fixed - `find_package(Leatherman)` will now raise a CMake error if a consuming application requests locale support when leatherman was built without it. ### Added - A preprocessor definition `LEATHERMAN_USE_LOCALES` for consuming projects to know whether locale support is enabled. ## [0.4.1] - 2016-03-02 ### Fixed - Install `generate_translations.cmake` for internationalization - Fix builds on Mac OS X against static boost ## [0.4.0] - 2016-02-23 ### Fixed - Header search order when Leatherman is installed to a default system path - Ruby string conversion when the Ruby string is in a non-unicode locale - Link order when building a shared library on Windows ### Added - i18n support using Boost::Locale and gettext .po files ## [0.3.7] - 2016-02-10 ### Fixed - Made the pod2man CMake macro available to downstream consumers. ## [0.3.6] - 2016-02-05 ### Fixed - Added version to Leatherman CMake config, so downstream projects can depend on a particular version. ### Added - Added pod2man macro for generating man pages. ## [0.3.5] - 2016-01-14 ### Fixed - `leatherman.ruby` can now find a Ruby DLL on Windows when Leatherman is compiled as shared libraries (LTH-71) - `leatherman.dynamic_library` debug logging when searching for a library will now correctly print the name of the library - Leatherman unit tests will now run successfully under Cygwin ## [0.3.4] - 2015-12-29 ### Fixed - Fixed a compilation issue with the execution tests on OSX. ## [0.3.3] - 2015-12-23 ### Changed - The vendored `boost::nowide` has been updated to a version that supports C++11 iostream changes - `LIB_SUFFIX` is now respected for installing to `lib32` or `lib64` if needed ### Fixed - It is now possible to buld Leatherman without curl support - It is now possible to build leatherman as a set of DLLs on Windows - An order-dependant unit test issue has been resolved ### Known Issues - Leatherman cannot load ruby when built as a DLL on Windows (LTH-71) ## [0.3.2] - 2015-12-16 ### Fixed - The `windows` library incorrectly used `target_link_libraries` instead of `add_leatherman_deps` ## [0.3.1] - 2015-12-16 ### Fixed - The key for publishing builds from travis was incorrectly encrypted. ## [0.3.0] - 2015-12-16 ### Added - Option to build dynamic libraries - `leatherman_install` helper for installing targets consistently - leatherman.ruby - added rb\_num2long and rb\_cBignum to the API - add array\_len to query the length of a Ruby array - leatherman.execution - return exit\_code from execute commands, and switch to returning a struct with named members - add child process stdin support to execute - add execute() overloads for registering a callback for the PID (once known), and redirecting streams to files - leatherman.util - add scoped\_handle helper ### Changed - Updated cpplint to version `#409` - Use static libnowide by default - leatherman.curl - added const annotations to curl::response::header - leatherman.json\_container - removed unnecessary vector copying ### Fixed - Builds on AIX - Builds with Xcode 7 - Builds with GCC 5.2 - Fixed using as a stand-alone library - leatherman.curl - support redirects; added seek\_body to specify a seek function, and set\_body now requires specifying the http\_method - leatherman.execution - fixed occasionally skipping final output from stderr - protect against potential named pipe re-use - leatherman.logging - fix error message when requesting an invalid log\_level to correctly show the requested level - leatherman.json\_container - remove use of MemoryPoolAllocator with rapidjson, as it's buggy on Solaris SPARC - fixed freed memory read with getRaw ### Known Issues - Dynamic library builds fail on Windows because dllexports aren't declared ## [0.2.0] - 2015-09-09 ### Added - leatherman.curl - a C++ interface for libcurl - leatherman.dynamic\_library - cross-platform loading of dynamic libraries - leatherman.execution - cross-platform system invocation with input/output support - leatherman.file\_util - utilities for manipulating files, augmenting Boost.FileSystem - leatherman.json\_container - a simplified C++ interface for rapidjson - leatherman.ruby - support for embedding and working with the Ruby interpreter - leatherman.util - general C++ utilities - strings, augmenting Boost.Algorithms - time, augmenting Boost.Date\_time - RAII wrappers - environment variables - regex helpers, augmenting Boost.Regex - leatherman.windows - Windows-specific C++ utilities - process and user querying - registry queries - error wrapper - wmi queries - the rapidjson library - CMake utilities - get git revision - put binaries in one directory on Windows - link against CURL statically or dynamically ### Changed - Leatherman can now be installed to the system as a stand-alone library. - Logging can now include source line numbers, enabled with the CMake macro `leatherman_logging_line_numbers()`. - Logging on Windows now includes colored output ### Fixed - Now links the correct Boost optimized/debug libraries. - Builds on Solaris and FreeBSD - Remove CMake requirement that Ruby is installed. Leatherman.ruby tests will still expect Ruby and fail if missing. - CMake no longer errors if including header-only libraries such as Catch. ## [0.1.0] - 2015-06-16 ### Added - leatherman.locale - set locale across platforms - leatherman.logging - logging based on Boost.Log - CMake utilities - compile flags - Coveralls.io setup - link Boost statically or dynamically - the Catch C++ testing framework - the Boost.nowide library for cross-platform UTF-8 io leatherman-1.4.2+dfsg/CMakeLists.txt000064400000000000000000000121521332360634000173140ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.2.2) project(leatherman VERSION 1.4.2) if (WIN32) link_libraries("-Wl,--nxcompat -Wl,--dynamicbase") endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" "${PROJECT_BINARY_DIR}/cmake") # Populate locale install location configure_file(cmake/leatherman.cmake.in "${PROJECT_BINARY_DIR}/cmake/leatherman.cmake" @ONLY) include(internal) # If we're the top-level project, we want to ensure the build type is # sane, and flag ourselves as such for later checks if ("${CMAKE_PROJECT_NAME}" STREQUAL "${PROJECT_NAME}") if (NOT CMAKE_BUILD_TYPE) message(STATUS "Defaulting to a release build.") set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) endif() set(LEATHERMAN_TOPLEVEL TRUE) else() set(LEATHERMAN_TOPLEVEL FALSE) endif() # If we're the top-level project enable everything by default defoption(LEATHERMAN_DEFAULT_ENABLE "Should Leatherman libraries all be built by default" ${LEATHERMAN_TOPLEVEL}) defoption(LEATHERMAN_DEBUG "Enable verbose logging messages from leatherman macros" FALSE) defoption(LEATHERMAN_ENABLE_TESTING "Build the leatherman test binary" ${LEATHERMAN_DEFAULT_ENABLE}) defoption(LEATHERMAN_INSTALL "Install the leatherman libraries and headers" ${LEATHERMAN_DEFAULT_ENABLE}) defoption(LEATHERMAN_SHARED "Create shared libraries instead of static" FALSE) defoption(LEATHERMAN_USE_ICU "Set when Boost is built with ICU" FALSE) set(BUILDING_LEATHERMAN TRUE) #As with most things, we rely on the containing project to set up the #common flags if (LEATHERMAN_TOPLEVEL) include(options) include(cflags) endif() if (${LEATHERMAN_SHARED} AND (WIN32 OR ${CURL_STATIC})) set(MOCK_CURL FALSE) else() set(MOCK_CURL TRUE) endif() defoption(LEATHERMAN_MOCK_CURL "Use mock curl library for testing Leatherman.curl" ${MOCK_CURL}) add_definitions(${LEATHERMAN_DEFINITIONS}) if (LEATHERMAN_LOCALE_VAR AND LEATHERMAN_LOCALE_INSTALL) # Add an environment variable to look up install prefix at runtime. add_definitions(-DLEATHERMAN_LOCALE_VAR="${LEATHERMAN_LOCALE_VAR}" -DLEATHERMAN_LOCALE_INSTALL="${LEATHERMAN_LOCALE_INSTALL}") else() # Add install location instead. add_definitions(-DLEATHERMAN_LOCALE_INSTALL="${CMAKE_INSTALL_PREFIX}/share/locale") endif() file(GLOB_RECURSE ALL_LEATHERMAN_SOURCES */src/*.cc */inc/*.hpp) add_subdirectory(locales) add_leatherman_dir(nowide) add_leatherman_dir(util) add_leatherman_dir(locale) add_leatherman_dir(logging) add_leatherman_dir(json_container) add_leatherman_dir(file_util) add_leatherman_dir(curl) if (WIN32) add_leatherman_dir(windows) endif() add_leatherman_dir(dynamic_library) add_leatherman_dir(execution) add_leatherman_dir(ruby) # Ensure no LEATHERMAN_LIBS are in LEATHERMAN_DEPS, LEATHERMAN_LIBS should be declared in dependency # order above, and we don't want them to come after other dependencies. if (LEATHERMAN_LIBS) list(REMOVE_ITEM LEATHERMAN_DEPS ${LEATHERMAN_LIBS}) endif() if (LEATHERMAN_LIBS OR LEATHERRMAN_DEPS) if (LEATHERMAN_SHARED) # When using shared libraries, leave out dependencies as they're already handled in the shared libs. # Including deps after the shared libraries can lead to "multiple definition" errors. list(APPEND LEATHERMAN_LIBRARIES ${LEATHERMAN_LIBS}) else() list(APPEND LEATHERMAN_LIBRARIES ${LEATHERMAN_LIBS} ${LEATHERMAN_DEPS}) endif() endif() if (LEATHERMAN_INCLUDE_DIRS) list(REMOVE_DUPLICATES LEATHERMAN_INCLUDE_DIRS) endif() export_var(LEATHERMAN_INCLUDE_DIRS) export_var(LEATHERMAN_LIBRARIES) if(LEATHERMAN_ENABLE_TESTING) enable_testing() add_subdirectory(tests) add_cppcheck_dirs(${LEATHERMAN_CPPCHECK_DIRS}) add_cpplint_files(${ALL_LEATHERMAN_SOURCES}) # If we're toplevel we want to own these targets. If not we assume # that containing project will set them up for us. if (LEATHERMAN_TOPLEVEL) enable_cppcheck() enable_cpplint() endif() endif() # Install the cmake files we need for consumers if (LEATHERMAN_INSTALL) set(CMAKE_FILES cmake/cflags.cmake cmake/GetGitRevisionDescription.cmake cmake/GetGitRevisionDescription.cmake.in cmake/pod2man.cmake cmake/options.cmake cmake/leatherman_config.cmake cmake/normalize_pot.cmake cmake/generate_translations.cmake ) set(INSTALL_LOC "lib${LIB_SUFFIX}/cmake/leatherman") install(FILES ${CMAKE_FILES} "${PROJECT_BINARY_DIR}/cmake/leatherman.cmake" DESTINATION "${INSTALL_LOC}/cmake/") configure_file(LeathermanConfig.cmake.in "${PROJECT_BINARY_DIR}/LeathermanConfig.cmake" @ONLY) configure_file(LeathermanConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/LeathermanConfigVersion.cmake" @ONLY) install(FILES "${PROJECT_BINARY_DIR}/LeathermanConfig.cmake" "${PROJECT_BINARY_DIR}/LeathermanConfigVersion.cmake" DESTINATION ${INSTALL_LOC}) install(EXPORT LeathermanLibraries DESTINATION ${INSTALL_LOC}) install(FILES "scripts/cpplint.py" DESTINATION "${INSTALL_LOC}/scripts/") endif() leatherman-1.4.2+dfsg/CONTRIBUTING.md000064400000000000000000000104131332360634000170030ustar00rootroot00000000000000# How to contribute Third-party patches are essential for keeping leatherman great. We simply can't access the huge number of platforms and myriad configurations for running leatherman. We want to keep it as easy as possible to contribute changes that get things working in your environment. There are a few guidelines that we need contributors to follow so that we can have a chance of keeping on top of things. ## Getting Started * Make sure you have a [Jira account](http://tickets.puppetlabs.com) * Make sure you have a [GitHub account](https://github.com/signup/free) * Submit a ticket for your issue, assuming one does not already exist. * Clearly describe the issue including steps to reproduce when it is a bug. * Make sure you fill in the earliest version that you know has the issue. * Fork the repository on GitHub ## New Libraries All new libraries should include a short section in the README describing how to setup and use the library. ## Making Changes * Create a topic branch from where you want to base your work. * This is usually the master branch. * Only target release branches if you are certain your fix must be on that branch. * To quickly create a topic branch based on master; `git branch fix/master/my_contribution master` then checkout the new branch with `git checkout fix/master/my_contribution`. Please avoid working directly on the `master` branch. * Make commits of logical units. * Check for unnecessary whitespace with `git diff --check` before committing. * If you have python 2 in your path you can run `make cpplint` to ensure your code formatting is clean. The linter runs as part of Travis CI and could fail the CI build. * If you have cppcheck in your path you can run `make cppcheck` to ensure your code passes static analysis. cppcheck runs as part of Travis CI and could fail the CI build. * Make sure your commit messages are in the proper format. ```` (LTH-1234) Make the example in CONTRIBUTING imperative and concrete Without this patch applied the example commit message in the CONTRIBUTING document is not a concrete example. This is a problem because the contributor is left to imagine what the commit message should look like based on a description rather than an example. This patch fixes the problem by making the example concrete and imperative. The first line is a real life imperative statement with a ticket number from our issue tracker. The body describes the behavior without the patch, why this is a problem, and how the patch fixes the problem when applied. ```` * Make sure you have added the necessary tests for your changes. * Run _all_ the tests to assure nothing else was accidentally broken. ## Making Trivial Changes ### Documentation For changes of a trivial nature to comments and documentation, it is not always necessary to create a new ticket in Jira. In this case, it is appropriate to start the first line of a commit with '(doc)' instead of a ticket number. ```` (doc) Add documentation commit example to CONTRIBUTING There is no example for contributing a documentation commit to the Leatherman repository. This is a problem because the contributor is left to assume how a commit of this nature may appear. The first line is a real life imperative statement with '(doc)' in place of what would have been the ticket number in a non-documentation related commit. The body describes the nature of the new documentation or comments added. ```` ## Submitting Changes * Sign the [Contributor License Agreement](http://links.puppetlabs.com/cla). * Push your changes to a topic branch in your fork of the repository. * Submit a pull request to the repository in the puppetlabs organization. * Update your ticket to mark that you have submitted code and are ready for it to be reviewed. * Include a link to the pull request in the ticket # Additional Resources * [More information on contributing](http://links.puppetlabs.com/contribute-to-puppet) * [Bug tracker (Jira)](https://tickets.puppetlabs.com/browse/LTH) * [Contributor License Agreement](http://links.puppetlabs.com/cla) * [General GitHub documentation](http://help.github.com/) * [GitHub pull request documentation](http://help.github.com/send-pull-requests/) * #puppet-dev IRC channel on freenode.org leatherman-1.4.2+dfsg/LICENSE000064400000000000000000000013111332360634000155540ustar00rootroot00000000000000 leatherman - A collection of C++ and CMake utility libraries. Copyright (C) 2015 Puppet Labs Inc Puppet Labs can be contacted at: info@puppetlabs.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. leatherman-1.4.2+dfsg/LeathermanConfig.cmake.in000064400000000000000000000043171332360634000213750ustar00rootroot00000000000000get_filename_component(current_directory ${CMAKE_CURRENT_LIST_FILE} DIRECTORY) list(APPEND CMAKE_MODULE_PATH "${current_directory}/cmake") include(leatherman_config) include(options) include("${current_directory}/LeathermanLibraries.cmake") get_filename_component(LEATHERMAN_PREFIX "${current_directory}/../../../" ABSOLUTE) @LEATHERMAN_COMPONENTS@ set(LEATHERMAN_HAVE_LOCALES @LEATHERMAN_USE_LOCALES@) if (LEATHERMAN_USE_LOCALES AND NOT LEATHERMAN_HAVE_LOCALES) message(SEND_ERROR "You requested locale support, but leatherman was built without it") endif() set(LEATHERMAN_SHARED @LEATHERMAN_SHARED@) debug("Selected components: ${Leatherman_FIND_COMPONENTS}") foreach(component ${Leatherman_FIND_COMPONENTS}) string(TOUPPER "${component}" id_upper) set(exclude_var "LEATHERMAN_EXCLUDE_${id_upper}") set(include_var "LEATHERMAN_${id_upper}_INCLUDE") set(lib_var "LEATHERMAN_${id_upper}_LIB") set(deps_var "LEATHERMAN_${id_upper}_DEPS") set(libs_var "LEATHERMAN_${id_upper}_LIBS") debug("Exclude variable ${exclude_var} is ${${exclude_var}}") if(${${exclude_var}}) debug("Excluding values for ${id_upper}") debug("* include is ${include_var}: ${${include_var}}") debug("* library is ${libs_var}: ${${libs_var}}") else() debug("Appending values for ${id_upper} to common vars") debug("* include is ${${include_var}}") debug("* library is ${${libs_var}}") list(APPEND LEATHERMAN_INCLUDE_DIRS ${${include_var}}) if (NOT "" STREQUAL "${${lib_var}}") # Prepend leatherman libraries, as later libs may depend on earlier libs. list(INSERT LEATHERMAN_LIBS 0 ${${lib_var}}) endif() if (${LEATHERMAN_SHARED}) # Created with shared libraries, ignore dependencies as they're compiled-in. set(${libs_var} ${${lib_var}}) else() append_new(LEATHERMAN_DEPS ${${deps_var}}) endif() endif() endforeach() if (LEATHERMAN_LIBS OR LEATHERMAN_DEPS) set(LEATHERMAN_LIBRARIES ${LEATHERMAN_LIBS} ${LEATHERMAN_DEPS}) endif() if (LEATHERMAN_INCLUDE_DIRS) list(REMOVE_DUPLICATES LEATHERMAN_INCLUDE_DIRS) endif() set(LEATHERMAN_MODULE_DIR "${current_directory}/cmake") leatherman-1.4.2+dfsg/LeathermanConfigVersion.cmake.in000064400000000000000000000005741332360634000227440ustar00rootroot00000000000000set(PACKAGE_VERSION "@PROJECT_VERSION@") # Check whether the requested PACKAGE_FIND_VERSION is compatible if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_COMPATIBLE FALSE) else() set(PACKAGE_VERSION_COMPATIBLE TRUE) if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_EXACT TRUE) endif() endif() leatherman-1.4.2+dfsg/MAINTAINERS000064400000000000000000000013341332360634000162510ustar00rootroot00000000000000{ "version": 1, "file_format": "This MAINTAINERS file format is described at http://pup.pt/maintainers", "issues": "https://tickets.puppet.com/browse/LTH", "internal_list": "https://groups.google.com/a/puppet.com/forum/?hl=en#!forum/discuss-leatherman-maintainers", "people": [ { "github": "MikaelSmith", "email": "michael.smith@puppet.com", "name": "Michael Smith" }, { "github": "branan", "email": "branan@puppet.com", "name": "Branan Riley" }, { "github": "Magisus", "email": "maggie@puppet.com", "name": "Maggie Dreyer" }, { "github": "ahenroid", "email": "andy.henroid@puppet.com", "name": "Andy Henroid" } ] } leatherman-1.4.2+dfsg/README.md000064400000000000000000000370261332360634000160420ustar00rootroot00000000000000# Leatherman - a C++ toolkit **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Usage](#usage) - [Dependencies](#dependencies) - [As a Standalone Library](#as-a-standalone-library) - [Building Leatherman](#building-leatherman) - [Using Leatherman](#using-leatherman) - [As a Submodule](#as-a-submodule) - [Variables Set by Leatherman](#variables-set-by-leatherman) - [CMake Helpers Provided by Leatherman](#cmake-helpers-provided-by-leatherman) - [Internationalization (i18n)](#internationalization-i18n) - [Helper Functions](#helper-functions) - [Enabling i18n](#enabling-i18n) - [Extracting and Translating Text](#extracting-and-translating-text) - [Limitations](#limitations) - [Debugging](#debugging) - [Using Logging](#using-logging) - [Using Catch](#using-catch) - [Using Windows](#using-windows) - [Using JsonContainer](#using-jsoncontainer) - [Using curl](#using-curl) - [Extending Leatherman](#extending-leatherman) - [Typical Leatherman Directory Structure](#typical-leatherman-directory-structure) - [Sample Library CMakeLists.txt file](#sample-library-cmakeliststxt-file) - [Vendoring Other Libraries](#vendoring-other-libraries) - [How To Release](#how-to-release) ## Usage Leatherman can be used in one of two ways: It can be installed as a regular library, and included using the normal CMake `find_package` syntax, or it can be setup as a submodule. The recommended method is to install Leatherman and use it as a regular system library. Leatherman is broken up into a number of focused component libraries. Both methods of using Leatherman allow you to control which components are built and used. Library install locations can be controlled using the LIB_SUFFIX variable, which results in installing libraries to `lib${LIB_SUFFIX}`. ### Dependencies * Boost, at least version 1.54 ### As a Standalone Library The recommended way to use Leatherman is as a library built and installed on your system. #### Building Leatherman Leatherman is built like any other cmake project: mkdir build cd build cmake .. make sudo make install By default, all of the component libraries are built when Leatherman is used standalone. To disable a component, you can set `LEATHERMAN_ENABLE_` to any of CMake's falsy values. #### Using Leatherman Leatherman's `make install` deploys a standard CMake config file to `lib/cmake/leatherman`. This allows the normal CMake `find_package` workflow to be used. find_package(Leatherman COMPONENTS foo bar baz REQUIRED) If Leatherman is not installed to a standard system prefix, or on Windows where there is no standard prefix, you can set `CMAKE_PREFIX_PATH` to the location of Leatherman's install. ### As a Submodule Leatherman can be included as a git submodule and added as a CMake subdirectory. Consider the following: CMakeLists.txt lib/ CMakeLists.txt vendor/ leatherman/ In this setup, your CMakeLists.txt would need to contain the following: ... add_subdirectory(vendor/leatherman) ... To enable individual Leatherman components, you must set `LEATHERMAN_ENABLE_`. Any libraries not explicitly enabled will not be built or available to the containing project. ... set(LEATHERMAN_ENABLE_LOCALE TRUE) add_subdirectory(vendor/leatherman) ... ### Variables Set by Leatherman Leatherman sets two top-level CMake variables: * `LEATHERMAN_INCLUDE_DIRS` The include paths of all enabled leatherman libraries * `LEATHERMAN_LIBRARIES` The library names of all enabled leatherman libraries, as well as their dependencies. In addition, each enabled library sets a number of library-specific variables: * `LEATHERMAN__INCLUDE` The include directory or directories for the given leatherman library. * `LEATHERMAN__LIB` The library name as used by CMake. In the case of header-only leatherman libraries, this will be set to the empty string. * `LEATHERMAN__DEPS` Any dependency libraries needed by the given library. This could include other leatherman libraries or 3rd-party libraries found via CMake. * `LEATHERMAN__LIBS` The contents of both `LEATHERMAN__LIB` and `LEATHERMAN__DEPS` ### CMake Helpers Provided by Leatherman In addition to the C++ library components, Leatherman provides a few CMake helpers. These will be automatically added to your `CMAKE_MODULE_PATH` when `find_package` is processed. * `options`: Common CMake options for leatherman features. Should almost always be used. * `cflags`: Sets a `LEATHERMAN_CXX_FLAGS` variable containing the Puppet Labs standard CXXFLAGS for your compiler and platform. * `leatherman`: Additional functionality provided by Leatherman for consumers. Includes: * Helpers for dealing with variables and scopes * Debugging macros * `cpplint` and `cppcheck` configuration * Logging configuration * Install command with cross-platform defaults * Symbol visibility configuration * `pod2man`: Adds a `pod2man` macro to generate man files from source. ### Internationalization (i18n) Leatherman and its components provide support for generating and using `gettext`-based message catalogs. #### Helper Functions Two helpers are provided for generating message catalogs: * `gettext_templates `: creates a `${PROJECT_NAME}.pot` target (used by `all`) that (re)generates the .pot file from specified source files. If the project is configured with `LEATHERMAN_LOCALES` containing a list of language codes, it will add a target `${PROJECT_NAME}-${LANG}.po` to create or update translation (.po) files matching those codes. Files are put in `dir`. To avoid `make clean` deleting these files, look at how the `locales` directory is structured. * `gettext_compile `: creates a `translation` target (also used by `all`) to generate the binary message catalogs (.mo files) and configure installing them to the specified install location (`inst`). `LEATHERMAN_LOCALES` expects a quoted semi-colon separated list, as in `LEATHERMAN_LOCALES="en;fr;ja"`. Normal use of cmake/make should ensure the translation files are up-to- date. Translations can be tested by setting the `LC_CTYPE` environment variable. #### Enabling i18n By default i18n support is disabled. To enable it, define `LEATHERMAN_I18N` when compiling your project. To do so, add these two lines to your projects CMakeLists.txt file below where you have `find_package(LEATHERMAN ...)` and also below where you do `include(cflags)`. ``` add_definitions(${LEATHERMAN_DEFINITIONS}) add_definitions(-DLEATHERMAN_I18N) ``` By default locale files are installed to `${CMAKE_INSTALL_PREFIX}/share/locale`. This behavior can be changed to use environment variables for the prefix instead by defining `LEATHERMAN_LOCALE_VAR` and `LEATHERMAN_LOCALE_INSTALL`. `LEATHERMAN_LOCALE_VAR` should refer to an environment variable pointing to the root of the Leatherman install, while `LEATHERMAN_LOCALE_INSTALL` should contain a path relative to that location, where locale files should be installed and searched for at run time. For example, if Leatherman is installed to `C:/tools`, and you would like to install translation files to `C:/languages/leatherman`, you can create an environment variable (e.g. `$LEATHERMAN_LOCATION`) containing `C:\tools`, then set `LEATHERMAN_LOCALE_VAR=LEATHERMAN_LOCATION` and `LEATHERMAN_LOCALE_INSTALL=../languages/leatherman`. Then locale files will be installed to `C:/tools/../languages/leatherman` and at runtime Leatherman will search for locale files there. To ensure that consuming projects also install their locale files to the right location, it is recommended to set `LEATHERMAN_LOCALE_INSTALL` for all projects attempting to use Leatherman's i18n tooling. #### Extracting and Translating Text The format strings in logging (the first argument) will automatically be extracted for the translation template file and translated. Substitution arguments will not, and must be explicitly translated. To translate strings outside of logging, use the `leatherman::locale::translate` and `leatherman::locale::format` helpers. Strings passed to the helpers will be extracted to .po files. There are several versions of these helpers: * Basic version (`translate`, `format`) for most standard translations. ``` translate("Apple"); ``` * Pluralized (`translate_n`, `format_n`) when translation depends on number of items. ``` // Note the parameter duplication: The first count value `2` selects the appropriate // translated message, and the second `2` fills in the `{1}` substitution token. format_n("{1} Apple", "{1} Apples", 2, 2); ``` * Prefixed-context (`translate_p`, `format_p`) when a word or phrase has multiple meanings. ``` translate_p("Fruit", "Apple") ``` * Pluralized and prefixed-context (`translate_np`, `format_np`) ``` format_np("Fruit", "{1} Apple", "{1} Apples", 3, 3); ``` `leatherman::locale::format` is a replacement for [`boost::locale::format`](http://www.boost.org/doc/libs/1_58_0/libs/locale/doc/html/localized_text_formatting.html), which adds locale-aware formatting to `boost::format`, but requires different substitution tokens. To support transparently enabling `LEATHERMAN_I18N` for only some platforms in a project, `leatherman::locale::format` falls-back to using `boost::format`, and will convert substitution tokens using the regex `{(\d+)}` to `%\1%`. To be safe, assume both formats are special when using `format`, and use `{N}` in as the substitution token for your strings. If you need to support both modes and use advanced substitution strings, you'll have to use an `#ifdef LEATHERMAN_I18N` block to use the correct string. To use `leatherman::locale::translate` or `leatherman::locale::format` in your project, add an include to the top of your cpp file: ``` #include ``` Next, if you would like to use any of the functions, you could do so by following this example: ``` std::cout << leatherman::locale::translate("This is translated") << std::endl; std::cout << leatherman::locale::format("This is {1} translated message", 1) << std::endl; ``` Leatherman also provides format helpers with short names: _(), n_(), p_(), np_(). These reduce code disruption when adding i18n support, and naming is consistent with macros from other i18n libraries. ``` using namespace leatherman::locale; std::cout << _("This is translated") << std::endl; std::cout << _("This is {1} translated message", 1) << std::endl; ``` #### Limitations Note that on Windows when building Leatherman.Locale as a DLL and Boost.Locale statically, you can get some weird behavior from Boost.Locale. Avoid using it directly, and ensure all translation operations happen as part of the Leatherman.Locale DLL memory space (i.e. in source files). Translation isn't supported on AIX or Solaris, as GCC on those platforms doesn't support `std::locale`. In fact std::locale is buggy, so avoid using `get_locale` as well. The CMake option `LEATHERMAN_USE_LOCALES` can be used to enable or disable building with Boost.Locale and using `std::locale`. #### Debugging If output strings are not being translated, [gettext's FAQ](https://www.gnu.org/software/gettext/FAQ.html#integrating_noop) has some suggestions for debugging. >>>>>>> (maint) Add debugging i18n to README ### Using Logging Each `.cc` file that uses logging (or includes a header which uses logging) needs to know its logging namespace. This can be set by defining `LEATHERMAN_LOGGING_NAMESPACE` to a string such as "leatherman.logging" or "puppetlabs.facter". Since typically a large number of files at once will need to use the same logging namespace, leatherman provides a CMake macro to set it globally. This can be used as follows: ... include(leatherman) leatherman_logging_namespace("logging.namespace") ... Initializing logging via setup\_logging will configure the ostream for the default UTF-8 locale (or the specified locale). ### Using Catch Since [Catch][1] is a testing-only utility, its include directory is excluded from LEATHERMAN\_INCLUDE\_DIRS. To use Catch, explicitly add include_directories(${LEATHERMAN_CATCH_INCLUDE}) to the CMakeLists.txt file of your testing directory. ### Using Windows In order to use the Windows libraries, Logging must be set up. ### Using JsonContainer To use JsonContainer, you must enable [RapidJSON][2] that is included as a leatherman component. Please refer to the [JsonContainer documentation][3] for API details. ### Using curl To use the curl wrapper library, libcurl must be installed. On Ubuntu use the following: apt-get install libcurl4-openssl-dev On Windows, in Powershell, use: (New-Object net.webclient).DownloadFile("http://curl.haxx.se/download/curl-7.42.1.zip", "C:\tools\curl-7.42.1.zip") & 7za x "curl-7.42.1.zip" | FIND /V "ing " cd curl-7.42.1 mkdir -Path C:\tools\curl-7.42.1-x86_64_mingw-w64_4.8.4_win32_seh\include cp -r include\curl C:\tools\curl-7.42.1-x86_64_mingw-w64_4.8.4_win32_seh\include mkdir -Path C:\tools\curl-7.42.1-x86_64_mingw-w64_4.8.4_win32_seh\lib cp lib\libcurl.a C:\tools\curl-7.42.1-x86_64_mingw-w64_4.8.4_win32_seh\lib On Windows CMake must also be manually pointed to the correct directory by passing the argument `-DCMAKE_PREFIX_PATH="C:\tools\curl-7.42.1-x86_64_mingw-w64_4.8.4_win32_seh`. ## Extending Leatherman Adding a new library to leatherman is easy! * Add a new subdirectory with the name of your library * Add an appropriate `add_leatherman_dir` invocation to the top-level `CMakeLists.txt` * Fill in the headers, sources, and tests of your library. The typical directory structure is below. The `CmakeLists.txt` file for a library is used both at build time and during a `find_package` call for Leatherman. This allows library dependencies to be handled identically during both build and find operations. Because of this, certain build configuration settings might need to be gated on a check for `BUILDING_LEATHERMAN`. See the `logging` library for an example of how this is done. ### Typical Leatherman Directory Structure leatherman/ libname/ CMakeLists.txt src/ srcfile.cc inc/leatherman/ header.hpp tests/ testfile.cc ### Sample Library CMakeLists.txt file add_leatherman_library("src/srcfile.cc") add_leatherman_test("tests/testfile.cc") add_leatherman_headers("inc/leatherman") More complex libraries may have dependencies. See the `locale` library for a simple example of how dependencies are handled by leatherman libraries. ### Vendoring Other Libraries Sometimes it's necessary to vendor a 3rd-party library in Leatherman. In these cases the standard Leatherman macros probably won't help you, and you'll need to write a lower-level CMake file. This README can't cover all the possible situations here, but the `nowide` and `catch` CMake files are both solid examples. ## How To Release 1. Update [CHANGELOG.md](CHANGELOG.md) with release notes based on ``git log `git describe --abbrev=0 --tags`..HEAD`` 1. Update the version in the project declaration of [CMakeLists.txt](CMakeLists.txt) 1. Build with gettext to ensure translations are up-to-date 1. `git tag -s -m '' && git push refs/tags/` 1. Send out an announcement e-mail [1]: https://github.com/philsquared/Catch [2]: https://github.com/miloyip/rapidjson [3]: json_container/README.md leatherman-1.4.2+dfsg/appveyor.yml000064400000000000000000000033251332360634000171460ustar00rootroot00000000000000clone_depth: 10 environment: matrix: - shared: OFF - shared: ON init: - | choco install -y mingw-w64 -Version 5.2.0 -source https://www.myget.org/F/puppetlabs choco install -y cmake -Version 3.2.2 -source https://www.myget.org/F/puppetlabs choco install -y gettext -Version 0.19.6 -source https://www.myget.org/F/puppetlabs choco install -y pl-toolchain-x64 -Version 2015.12.01.1 -source https://www.myget.org/F/puppetlabs choco install -y pl-boost-x64 -Version 1.58.0.2 -source https://www.myget.org/F/puppetlabs choco install -y pl-openssl-x64 -Version 1.0.24.1 -source https://www.myget.org/F/puppetlabs choco install -y pl-curl-x64 -Version 7.46.0.1 -source https://www.myget.org/F/puppetlabs - ps: | $env:PATH = $env:PATH.Replace("Git\bin", "Git\cmd") $env:PATH = $env:PATH.Replace("Git\usr\bin", "Git\cmd") install: - SET PATH=C:\Ruby21-x64\bin;C:\tools\mingw64\bin;C:\Program Files\gettext-iconv;%PATH% build_script: - ps: | cmake -G "MinGW Makefiles" -DCMAKE_TOOLCHAIN_FILE="C:\tools\pl-build-tools\pl-build-toolchain.cmake" -DCMAKE_INSTALL_PREFIX=C:\tools\leatherman -DBOOST_STATIC=ON -DLEATHERMAN_SHARED="$env:shared" . mingw32-make -j2 test_script: - ps: | ctest -V 2>&1 | %{ if ($_ -is [System.Management.Automation.ErrorRecord]) { $_ | c++filt } else { $_ } } mingw32-make install 7z.exe a -t7z leatherman.7z C:\tools\leatherman\ artifacts: - path: leatherman.7z name: leatherman.7z deploy: description: Leatherman build from AppVeyor provider: GitHub auth_token: secure: pSPKogvXGsTpt4hZPOWEMPpiwcYCKpTuQODqmsdN34Av6nN640H0DPiK/sKwDhMP artifact: leatherman.7z on: appveyor_repo_tag: true shared: OFF leatherman-1.4.2+dfsg/catch/000075500000000000000000000000001332360634000156355ustar00rootroot00000000000000leatherman-1.4.2+dfsg/catch/CMakeLists.txt000064400000000000000000000001141332360634000203710ustar00rootroot00000000000000add_leatherman_vendored("Catch-1.10.0.zip" "Catch-1.10.0" "single_include") leatherman-1.4.2+dfsg/cmake/000075500000000000000000000000001332360634000156335ustar00rootroot00000000000000leatherman-1.4.2+dfsg/cmake/FindICU.cmake000064400000000000000000001117721332360634000200670ustar00rootroot00000000000000# This module can find the International Components for Unicode (ICU) libraries # # Requirements: # - CMake >= 2.8.3 (for new version of find_package_handle_standard_args) # # The following variables will be defined for your use: # - ICU_FOUND : were all of your specified components found? # - ICU_INCLUDE_DIRS : ICU include directory # - ICU_LIBRARIES : ICU libraries # - ICU_VERSION : complete version of ICU (x.y.z) # - ICU_VERSION_MAJOR : major version of ICU # - ICU_VERSION_MINOR : minor version of ICU # - ICU_VERSION_PATCH : patch version of ICU # - ICU__FOUND : were found? (FALSE for non specified component if it is not a dependency) # # For windows or non standard installation, define ICU_ROOT_DIR variable to point to the root installation of ICU. Two ways: # - run cmake with -DICU_ROOT_DIR= # - define an environment variable with the same name before running cmake # With cmake-gui, before pressing "Configure": # 1) Press "Add Entry" button # 2) Add a new entry defined as: # - Name: ICU_ROOT_DIR # - Type: choose PATH in the selection list # - Press "..." button and select the root installation of ICU # # Example Usage: # # 1. Copy this file in the root of your project source directory # 2. Then, tell CMake to search this non-standard module in your project directory by adding to your CMakeLists.txt: # set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}) # 3. Finally call find_package() once, here are some examples to pick from # # Require ICU 4.4 or later # find_package(ICU 4.4 REQUIRED) # # if(ICU_FOUND) # add_executable(myapp myapp.c) # include_directories(${ICU_INCLUDE_DIRS}) # target_link_libraries(myapp ${ICU_LIBRARIES}) # # with CMake >= 3.0.0, the last two lines can be replaced by the following # target_link_libraries(myapp ICU::ICU) # endif(ICU_FOUND) ########## ########## find_package(PkgConfig QUIET) ########## Private ########## if(NOT DEFINED ICU_PUBLIC_VAR_NS) set(ICU_PUBLIC_VAR_NS "ICU") # Prefix for all ICU relative public variables endif(NOT DEFINED ICU_PUBLIC_VAR_NS) if(NOT DEFINED ICU_PRIVATE_VAR_NS) set(ICU_PRIVATE_VAR_NS "_${ICU_PUBLIC_VAR_NS}") # Prefix for all ICU relative internal variables endif(NOT DEFINED ICU_PRIVATE_VAR_NS) if(NOT DEFINED PC_ICU_PRIVATE_VAR_NS) set(PC_ICU_PRIVATE_VAR_NS "_PC${ICU_PRIVATE_VAR_NS}") # Prefix for all pkg-config relative internal variables endif(NOT DEFINED PC_ICU_PRIVATE_VAR_NS) set(${ICU_PRIVATE_VAR_NS}_HINTS ) # # for future removal if(DEFINED ENV{ICU_ROOT}) list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS "$ENV{ICU_ROOT}") message(AUTHOR_WARNING "ENV{ICU_ROOT} is deprecated in favor of ENV{ICU_ROOT_DIR}") endif(DEFINED ENV{ICU_ROOT}) if (DEFINED ICU_ROOT) list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS "${ICU_ROOT}") message(AUTHOR_WARNING "ICU_ROOT is deprecated in favor of ICU_ROOT_DIR") endif(DEFINED ICU_ROOT) # if(DEFINED ENV{ICU_ROOT_DIR}) list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS "$ENV{ICU_ROOT_DIR}") endif(DEFINED ENV{ICU_ROOT_DIR}) if (DEFINED ICU_ROOT_DIR) list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS "${ICU_ROOT_DIR}") endif(DEFINED ICU_ROOT_DIR) set(${ICU_PRIVATE_VAR_NS}_COMPONENTS ) # ... macro(_icu_declare_component _NAME) list(APPEND ${ICU_PRIVATE_VAR_NS}_COMPONENTS ${_NAME}) set("${ICU_PRIVATE_VAR_NS}_COMPONENTS_${_NAME}" ${ARGN}) endmacro(_icu_declare_component) _icu_declare_component(data icudata) _icu_declare_component(uc icuuc) # Common and Data libraries _icu_declare_component(i18n icui18n icuin) # Internationalization library _icu_declare_component(io icuio ustdio) # Stream and I/O Library _icu_declare_component(le icule) # Layout library _icu_declare_component(lx iculx) # Paragraph Layout library ########## Public ########## set(${ICU_PUBLIC_VAR_NS}_FOUND FALSE) set(${ICU_PUBLIC_VAR_NS}_LIBRARIES ) set(${ICU_PUBLIC_VAR_NS}_INCLUDE_DIRS ) set(${ICU_PUBLIC_VAR_NS}_C_FLAGS "") set(${ICU_PUBLIC_VAR_NS}_CXX_FLAGS "") set(${ICU_PUBLIC_VAR_NS}_CPP_FLAGS "") set(${ICU_PUBLIC_VAR_NS}_C_SHARED_FLAGS "") set(${ICU_PUBLIC_VAR_NS}_CXX_SHARED_FLAGS "") set(${ICU_PUBLIC_VAR_NS}_CPP_SHARED_FLAGS "") foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PRIVATE_VAR_NS}_COMPONENTS}) string(TOUPPER "${${ICU_PRIVATE_VAR_NS}_COMPONENT}" ${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT) set("${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_FOUND" FALSE) # may be done in the _icu_declare_component macro endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) # Check components if(NOT ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS) # uc required at least set(${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS uc) else(NOT ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS) list(APPEND ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS uc) list(REMOVE_DUPLICATES ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS) foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS}) if(NOT DEFINED ${ICU_PRIVATE_VAR_NS}_COMPONENTS_${${ICU_PRIVATE_VAR_NS}_COMPONENT}) message(FATAL_ERROR "Unknown ICU component: ${${ICU_PRIVATE_VAR_NS}_COMPONENT}") endif(NOT DEFINED ${ICU_PRIVATE_VAR_NS}_COMPONENTS_${${ICU_PRIVATE_VAR_NS}_COMPONENT}) endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) endif(NOT ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS) # if pkg-config is available check components dependencies and append `pkg-config icu- --variable=prefix` to hints if(PKG_CONFIG_FOUND) set(${ICU_PRIVATE_VAR_NS}_COMPONENTS_DUP ${${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS}) foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PRIVATE_VAR_NS}_COMPONENTS_DUP}) pkg_check_modules(${PC_ICU_PRIVATE_VAR_NS} "icu-${${ICU_PRIVATE_VAR_NS}_COMPONENT}" QUIET) if(${PC_ICU_PRIVATE_VAR_NS}_FOUND) list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS ${${PC_ICU_PRIVATE_VAR_NS}_PREFIX}) foreach(${PC_ICU_PRIVATE_VAR_NS}_LIBRARY ${${PC_ICU_PRIVATE_VAR_NS}_LIBRARIES}) string(REGEX REPLACE "^icu" "" ${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY ${${PC_ICU_PRIVATE_VAR_NS}_LIBRARY}) if(NOT ${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY STREQUAL "data") list(FIND ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS ${${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY} ${ICU_PRIVATE_VAR_NS}_COMPONENT_INDEX) if(${ICU_PRIVATE_VAR_NS}_COMPONENT_INDEX EQUAL -1) message(WARNING "Missing component dependency: ${${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY}. Add it to your find_package(ICU) line as COMPONENTS to fix this warning.") list(APPEND ${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS ${${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY}) endif(${ICU_PRIVATE_VAR_NS}_COMPONENT_INDEX EQUAL -1) endif(NOT ${PC_ICU_PRIVATE_VAR_NS}_STRIPPED_LIBRARY STREQUAL "data") endforeach(${PC_ICU_PRIVATE_VAR_NS}_LIBRARY) endif(${PC_ICU_PRIVATE_VAR_NS}_FOUND) endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) endif(PKG_CONFIG_FOUND) # list(APPEND ${ICU_PRIVATE_VAR_NS}_HINTS ENV ICU_ROOT_DIR) # message("${ICU_PRIVATE_VAR_NS}_HINTS = ${${ICU_PRIVATE_VAR_NS}_HINTS}") # Includes find_path( ${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR NAMES unicode/utypes.h utypes.h HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS} PATH_SUFFIXES "include" DOC "Include directories for ICU" ) if(${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR) ########## ########## if(EXISTS "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/uvernum.h") # ICU >= 4.4 file(READ "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/uvernum.h" ${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS) elseif(EXISTS "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/uversion.h") # ICU [2;4.4[ file(READ "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/uversion.h" ${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS) elseif(EXISTS "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/utypes.h") # ICU [1.4;2[ file(READ "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/unicode/utypes.h" ${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS) elseif(EXISTS "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/utypes.h") # ICU 1.3 file(READ "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}/utypes.h" ${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS) else() message(FATAL_ERROR "ICU version header not found") endif() if(${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS MATCHES ".*# *define *ICU_VERSION *\"([0-9]+)\".*") # ICU 1.3 # [1.3;1.4[ as #define ICU_VERSION "3" (no patch version, ie all 1.3.X versions will be detected as 1.3.0) set(${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR "1") set(${ICU_PUBLIC_VAR_NS}_VERSION_MINOR "${CMAKE_MATCH_1}") set(${ICU_PUBLIC_VAR_NS}_VERSION_PATCH "0") elseif(${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS MATCHES ".*# *define *U_ICU_VERSION_MAJOR_NUM *([0-9]+).*") # # Since version 4.9.1, ICU release version numbering was totaly changed, see: # - http://site.icu-project.org/download/49 # - http://userguide.icu-project.org/design#TOC-Version-Numbers-in-ICU # set(${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR "${CMAKE_MATCH_1}") string(REGEX REPLACE ".*# *define *U_ICU_VERSION_MINOR_NUM *([0-9]+).*" "\\1" ${ICU_PUBLIC_VAR_NS}_VERSION_MINOR "${${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS}") string(REGEX REPLACE ".*# *define *U_ICU_VERSION_PATCHLEVEL_NUM *([0-9]+).*" "\\1" ${ICU_PUBLIC_VAR_NS}_VERSION_PATCH "${${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS}") elseif(${ICU_PRIVATE_VAR_NS}_VERSION_HEADER_CONTENTS MATCHES ".*# *define *U_ICU_VERSION *\"(([0-9]+)(\\.[0-9]+)*)\".*") # ICU [1.4;1.8[ # [1.4;1.8[ as #define U_ICU_VERSION "1.4.1.2" but it seems that some 1.4.[12](?:\.\d)? have releasing error and appears as 1.4.0 set(${ICU_PRIVATE_VAR_NS}_FULL_VERSION "${CMAKE_MATCH_1}") # copy CMAKE_MATCH_1, no longer valid on the following if if(${ICU_PRIVATE_VAR_NS}_FULL_VERSION MATCHES "^([0-9]+)\\.([0-9]+)$") set(${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR "${CMAKE_MATCH_1}") set(${ICU_PUBLIC_VAR_NS}_VERSION_MINOR "${CMAKE_MATCH_2}") set(${ICU_PUBLIC_VAR_NS}_VERSION_PATCH "0") elseif(${ICU_PRIVATE_VAR_NS}_FULL_VERSION MATCHES "^([0-9]+)\\.([0-9]+)\\.([0-9]+)") set(${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR "${CMAKE_MATCH_1}") set(${ICU_PUBLIC_VAR_NS}_VERSION_MINOR "${CMAKE_MATCH_2}") set(${ICU_PUBLIC_VAR_NS}_VERSION_PATCH "${CMAKE_MATCH_3}") endif() else() message(FATAL_ERROR "failed to detect ICU version") endif() set(${ICU_PUBLIC_VAR_NS}_VERSION "${${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR}.${${ICU_PUBLIC_VAR_NS}_VERSION_MINOR}.${${ICU_PUBLIC_VAR_NS}_VERSION_PATCH}") ########## ########## endif(${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR) # Check libraries if(MSVC) include(SelectLibraryConfigurations) endif(MSVC) foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS}) string(TOUPPER "${${ICU_PRIVATE_VAR_NS}_COMPONENT}" ${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT) if(MSVC) set(${ICU_PRIVATE_VAR_NS}_POSSIBLE_RELEASE_NAMES ) set(${ICU_PRIVATE_VAR_NS}_POSSIBLE_DEBUG_NAMES ) foreach(${ICU_PRIVATE_VAR_NS}_BASE_NAME ${${ICU_PRIVATE_VAR_NS}_COMPONENTS_${${ICU_PRIVATE_VAR_NS}_COMPONENT}}) list(APPEND ${ICU_PRIVATE_VAR_NS}_POSSIBLE_RELEASE_NAMES "${${ICU_PRIVATE_VAR_NS}_BASE_NAME}") list(APPEND ${ICU_PRIVATE_VAR_NS}_POSSIBLE_DEBUG_NAMES "${${ICU_PRIVATE_VAR_NS}_BASE_NAME}d") list(APPEND ${ICU_PRIVATE_VAR_NS}_POSSIBLE_RELEASE_NAMES "${${ICU_PRIVATE_VAR_NS}_BASE_NAME}${${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR}${${ICU_PUBLIC_VAR_NS}_VERSION_MINOR}") list(APPEND ${ICU_PRIVATE_VAR_NS}_POSSIBLE_DEBUG_NAMES "${${ICU_PRIVATE_VAR_NS}_BASE_NAME}${${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR}${${ICU_PUBLIC_VAR_NS}_VERSION_MINOR}d") endforeach(${ICU_PRIVATE_VAR_NS}_BASE_NAME) find_library( ${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_RELEASE NAMES ${${ICU_PRIVATE_VAR_NS}_POSSIBLE_RELEASE_NAMES} HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS} DOC "Release library for ICU ${${ICU_PRIVATE_VAR_NS}_COMPONENT} component" ) find_library( ${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_DEBUG NAMES ${${ICU_PRIVATE_VAR_NS}_POSSIBLE_DEBUG_NAMES} HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS} DOC "Debug library for ICU ${${ICU_PRIVATE_VAR_NS}_COMPONENT} component" ) select_library_configurations("${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}") list(APPEND ${ICU_PUBLIC_VAR_NS}_LIBRARY ${${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY}) else(MSVC) find_library( ${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY NAMES ${${ICU_PRIVATE_VAR_NS}_COMPONENTS_${${ICU_PRIVATE_VAR_NS}_COMPONENT}} PATHS ${${ICU_PRIVATE_VAR_NS}_HINTS} DOC "Library for ICU ${${ICU_PRIVATE_VAR_NS}_COMPONENT} component" ) if(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY) set("${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_FOUND" TRUE) list(APPEND ${ICU_PUBLIC_VAR_NS}_LIBRARY ${${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY}) endif(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY) endif(MSVC) endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) # Try to find out compiler flags find_program(${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE icu-config HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS}) if(${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cflags OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_C_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cxxflags OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_CXX_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cppflags OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_CPP_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cflags-dynamic OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_C_SHARED_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cxxflags-dynamic OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_CXX_SHARED_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE} --cppflags-dynamic OUTPUT_VARIABLE ${ICU_PUBLIC_VAR_NS}_CPP_SHARED_FLAGS OUTPUT_STRIP_TRAILING_WHITESPACE) endif(${ICU_PUBLIC_VAR_NS}_CONFIG_EXECUTABLE) # Check find_package arguments include(FindPackageHandleStandardArgs) if(${ICU_PUBLIC_VAR_NS}_FIND_REQUIRED AND NOT ${ICU_PUBLIC_VAR_NS}_FIND_QUIETLY) find_package_handle_standard_args( ${ICU_PUBLIC_VAR_NS} REQUIRED_VARS ${ICU_PUBLIC_VAR_NS}_LIBRARY ${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR VERSION_VAR ${ICU_PUBLIC_VAR_NS}_VERSION ) else(${ICU_PUBLIC_VAR_NS}_FIND_REQUIRED AND NOT ${ICU_PUBLIC_VAR_NS}_FIND_QUIETLY) find_package_handle_standard_args(${ICU_PUBLIC_VAR_NS} "Could NOT find ICU" ${ICU_PUBLIC_VAR_NS}_LIBRARY ${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR) endif(${ICU_PUBLIC_VAR_NS}_FIND_REQUIRED AND NOT ${ICU_PUBLIC_VAR_NS}_FIND_QUIETLY) if(${ICU_PUBLIC_VAR_NS}_FOUND) # # for compatibility with previous versions, alias old ICU_(MAJOR|MINOR|PATCH)_VERSION to ICU_VERSION_$1 set(${ICU_PUBLIC_VAR_NS}_MAJOR_VERSION ${${ICU_PUBLIC_VAR_NS}_VERSION_MAJOR}) set(${ICU_PUBLIC_VAR_NS}_MINOR_VERSION ${${ICU_PUBLIC_VAR_NS}_VERSION_MINOR}) set(${ICU_PUBLIC_VAR_NS}_PATCH_VERSION ${${ICU_PUBLIC_VAR_NS}_VERSION_PATCH}) # set(${ICU_PUBLIC_VAR_NS}_LIBRARIES ${${ICU_PUBLIC_VAR_NS}_LIBRARY}) set(${ICU_PUBLIC_VAR_NS}_INCLUDE_DIRS ${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}) if(NOT CMAKE_VERSION VERSION_LESS "3.0.0") if(NOT TARGET ICU::ICU) add_library(ICU::ICU INTERFACE IMPORTED) endif(NOT TARGET ICU::ICU) set_target_properties(ICU::ICU PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}") foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PUBLIC_VAR_NS}_FIND_COMPONENTS}) string(TOUPPER "${${ICU_PRIVATE_VAR_NS}_COMPONENT}" ${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT) add_library("ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" UNKNOWN IMPORTED) if(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_RELEASE) set_property(TARGET "ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties("ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" PROPERTIES IMPORTED_LOCATION_RELEASE "${${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_RELEASE}") endif(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_RELEASE) if(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_DEBUG) set_property(TARGET "ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG) set_target_properties("ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" PROPERTIES IMPORTED_LOCATION_DEBUG "${${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_DEBUG}") endif(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY_DEBUG) if(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY) set_target_properties("ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" PROPERTIES IMPORTED_LOCATION "${${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY}") endif(${ICU_PUBLIC_VAR_NS}_${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_LIBRARY) set_property(TARGET ICU::ICU APPEND PROPERTY INTERFACE_LINK_LIBRARIES "ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}") # set_target_properties("ICU::${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}" PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR}") endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) endif(NOT CMAKE_VERSION VERSION_LESS "3.0.0") endif(${ICU_PUBLIC_VAR_NS}_FOUND) mark_as_advanced( ${ICU_PUBLIC_VAR_NS}_INCLUDE_DIR ${ICU_PUBLIC_VAR_NS}_LIBRARY ) ########## ########## ########## ########## ########## Private ########## function(_icu_extract_locale_from_rb _BUNDLE_SOURCE _RETURN_VAR_NAME) file(READ "${_BUNDLE_SOURCE}" _BUNDLE_CONTENTS) string(REGEX REPLACE "//[^\n]*\n" "" _BUNDLE_CONTENTS_WITHOUT_COMMENTS ${_BUNDLE_CONTENTS}) string(REGEX REPLACE "[ \t\n]" "" _BUNDLE_CONTENTS_WITHOUT_COMMENTS_AND_SPACES ${_BUNDLE_CONTENTS_WITHOUT_COMMENTS}) string(REGEX MATCH "^([a-zA-Z_-]+)(:table)?{" LOCALE_FOUND ${_BUNDLE_CONTENTS_WITHOUT_COMMENTS_AND_SPACES}) set("${_RETURN_VAR_NAME}" "${CMAKE_MATCH_1}" PARENT_SCOPE) endfunction(_icu_extract_locale_from_rb) ########## Public ########## # # Prototype: # icu_generate_resource_bundle([NAME ] [PACKAGE] [DESTINATION ] [FILES ]) # # Common arguments: # - NAME : name of output package and to create dummy targets # - FILES ... : list of resource bundles sources # - DEPENDS ... : required to package as library (shared or static), a list of cmake parent targets to link to # Note: only (PREVIOUSLY DECLARED) add_executable and add_library as dependencies # - DESTINATION : optional, directory where to install final binary file(s) # - FORMAT : optional, one of none (ICU4C binary format, default), java (plain java) or xliff (XML), see below # # Arguments depending on FORMAT: # - none (default): # * PACKAGE : if present, package all resource bundles together. Default is to stop after building individual *.res files # * TYPE : one of : # + common or archive (default) : archive all ressource bundles into a single .dat file # + library or dll : assemble all ressource bundles into a separate and loadable library (.dll/.so) # + static : integrate all ressource bundles to targets designed by DEPENDS parameter (as a static library) # * NO_SHARED_FLAGS : only with TYPE in ['library', 'dll', 'static'], do not append ICU_C(XX)_SHARED_FLAGS to targets given as DEPENDS argument # - JAVA: # * BUNDLE : required, prefix for generated classnames # - XLIFF: # (none) # # # For an archive, the idea is to generate the following dependencies: # # root.txt => root.res \ # | # en.txt => en.res | # | => pkglist.txt => application.dat # fr.txt => fr.res | # | # and so on / # # Lengend: 'A => B' means B depends on A # # Steps (correspond to arrows): # 1) genrb (from .txt to .res) # 2) generate a file text (pkglist.txt) with all .res files to put together # 3) build final archive (from *.res/pkglist.txt to .dat) # function(icu_generate_resource_bundle) ##### ##### find_program(${ICU_PUBLIC_VAR_NS}_GENRB_EXECUTABLE genrb HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS}) find_program(${ICU_PUBLIC_VAR_NS}_PKGDATA_EXECUTABLE pkgdata HINTS ${${ICU_PRIVATE_VAR_NS}_HINTS}) if(NOT ${ICU_PUBLIC_VAR_NS}_GENRB_EXECUTABLE) message(FATAL_ERROR "genrb not found") endif(NOT ${ICU_PUBLIC_VAR_NS}_GENRB_EXECUTABLE) if(NOT ${ICU_PUBLIC_VAR_NS}_PKGDATA_EXECUTABLE) message(FATAL_ERROR "pkgdata not found") endif(NOT ${ICU_PUBLIC_VAR_NS}_PKGDATA_EXECUTABLE) ##### ##### ##### ##### set(TARGET_SEPARATOR "+") set(__FUNCTION__ "icu_generate_resource_bundle") set(PACKAGE_TARGET_PREFIX "ICU${TARGET_SEPARATOR}PKG") set(RESOURCE_TARGET_PREFIX "ICU${TARGET_SEPARATOR}RB") ##### ##### ##### ##### # filename extension of built resource bundle (without dot) set(BUNDLES__SUFFIX "res") set(BUNDLES_JAVA_SUFFIX "java") set(BUNDLES_XLIFF_SUFFIX "xlf") # alias: none (default) = common = archive ; dll = library ; static set(PKGDATA__ALIAS "") set(PKGDATA_COMMON_ALIAS "") set(PKGDATA_ARCHIVE_ALIAS "") set(PKGDATA_DLL_ALIAS "LIBRARY") set(PKGDATA_LIBRARY_ALIAS "LIBRARY") set(PKGDATA_STATIC_ALIAS "STATIC") # filename prefix of built package set(PKGDATA__PREFIX "") set(PKGDATA_LIBRARY_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}") set(PKGDATA_STATIC_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}") # filename extension of built package (with dot) set(PKGDATA__SUFFIX ".dat") set(PKGDATA_LIBRARY_SUFFIX "${CMAKE_SHARED_LIBRARY_SUFFIX}") set(PKGDATA_STATIC_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}") # pkgdata option mode specific set(PKGDATA__OPTIONS "-m" "common") set(PKGDATA_STATIC_OPTIONS "-m" "static") set(PKGDATA_LIBRARY_OPTIONS "-m" "library") # cmake library type for output package set(PKGDATA_LIBRARY__TYPE "") set(PKGDATA_LIBRARY_STATIC_TYPE STATIC) set(PKGDATA_LIBRARY_LIBRARY_TYPE SHARED) ##### ##### include(CMakeParseArguments) cmake_parse_arguments( PARSED_ARGS # output variable name # options (true/false) (default value: false) "PACKAGE;NO_SHARED_FLAGS" # univalued parameters (default value: "") "NAME;DESTINATION;TYPE;FORMAT;BUNDLE" # multivalued parameters (default value: "") "FILES;DEPENDS" ${ARGN} ) # assert(${PARSED_ARGS_NAME} != "") if(NOT PARSED_ARGS_NAME) message(FATAL_ERROR "${__FUNCTION__}(): no name given, NAME parameter missing") endif(NOT PARSED_ARGS_NAME) # assert(length(PARSED_ARGS_FILES) > 0) list(LENGTH PARSED_ARGS_FILES PARSED_ARGS_FILES_LEN) if(PARSED_ARGS_FILES_LEN LESS 1) message(FATAL_ERROR "${__FUNCTION__}() expects at least 1 resource bundle as FILES argument, 0 given") endif(PARSED_ARGS_FILES_LEN LESS 1) string(TOUPPER "${PARSED_ARGS_FORMAT}" UPPER_FORMAT) # assert(${UPPER_FORMAT} in ['', 'java', 'xlif']) if(NOT DEFINED BUNDLES_${UPPER_FORMAT}_SUFFIX) message(FATAL_ERROR "${__FUNCTION__}(): unknown FORMAT '${PARSED_ARGS_FORMAT}'") endif(NOT DEFINED BUNDLES_${UPPER_FORMAT}_SUFFIX) if(UPPER_FORMAT STREQUAL "JAVA") # assert(${PARSED_ARGS_BUNDLE} != "") if(NOT PARSED_ARGS_BUNDLE) message(FATAL_ERROR "${__FUNCTION__}(): java bundle name expected, BUNDLE parameter missing") endif(NOT PARSED_ARGS_BUNDLE) endif(UPPER_FORMAT STREQUAL "JAVA") if(PARSED_ARGS_PACKAGE) # assert(${PARSED_ARGS_FORMAT} == "") if(PARSED_ARGS_FORMAT) message(FATAL_ERROR "${__FUNCTION__}(): packaging is only supported for binary format, not xlif neither java outputs") endif(PARSED_ARGS_FORMAT) string(TOUPPER "${PARSED_ARGS_TYPE}" UPPER_MODE) # assert(${UPPER_MODE} in ['', 'common', 'archive', 'dll', library']) if(NOT DEFINED PKGDATA_${UPPER_MODE}_ALIAS) message(FATAL_ERROR "${__FUNCTION__}(): unknown TYPE '${PARSED_ARGS_TYPE}'") else(NOT DEFINED PKGDATA_${UPPER_MODE}_ALIAS) set(TYPE "${PKGDATA_${UPPER_MODE}_ALIAS}") endif(NOT DEFINED PKGDATA_${UPPER_MODE}_ALIAS) # Package name: strip file extension if present get_filename_component(PACKAGE_NAME_WE ${PARSED_ARGS_NAME} NAME_WE) # Target name to build package set(PACKAGE_TARGET_NAME "${PACKAGE_TARGET_PREFIX}${TARGET_SEPARATOR}${PACKAGE_NAME_WE}") # Target name to build intermediate list file set(PACKAGE_LIST_TARGET_NAME "${PACKAGE_TARGET_NAME}${TARGET_SEPARATOR}PKGLIST") # Directory (absolute) to set as "current directory" for genrb (does not include package directory, -p) # We make our "cook" there to prevent any conflict if(DEFINED CMAKE_PLATFORM_ROOT_BIN) # CMake < 2.8.10 set(RESOURCE_GENRB_CHDIR_DIR "${CMAKE_PLATFORM_ROOT_BIN}/${PACKAGE_TARGET_NAME}.dir/") else(DEFINED CMAKE_PLATFORM_ROOT_BIN) # CMake >= 2.8.10 set(RESOURCE_GENRB_CHDIR_DIR "${CMAKE_PLATFORM_INFO_DIR}/${PACKAGE_TARGET_NAME}.dir/") endif(DEFINED CMAKE_PLATFORM_ROOT_BIN) # Directory (absolute) where resource bundles are built: concatenation of RESOURCE_GENRB_CHDIR_DIR and package name set(RESOURCE_OUTPUT_DIR "${RESOURCE_GENRB_CHDIR_DIR}/${PACKAGE_NAME_WE}/") # Output (relative) path for built package if(MSVC AND TYPE STREQUAL PKGDATA_LIBRARY_ALIAS) set(PACKAGE_OUTPUT_PATH "${RESOURCE_GENRB_CHDIR_DIR}/${PACKAGE_NAME_WE}/${PKGDATA_${TYPE}_PREFIX}${PACKAGE_NAME_WE}${PKGDATA_${TYPE}_SUFFIX}") else(MSVC AND TYPE STREQUAL PKGDATA_LIBRARY_ALIAS) set(PACKAGE_OUTPUT_PATH "${RESOURCE_GENRB_CHDIR_DIR}/${PKGDATA_${TYPE}_PREFIX}${PACKAGE_NAME_WE}${PKGDATA_${TYPE}_SUFFIX}") endif(MSVC AND TYPE STREQUAL PKGDATA_LIBRARY_ALIAS) # Output (absolute) path for the list file set(PACKAGE_LIST_OUTPUT_PATH "${RESOURCE_GENRB_CHDIR_DIR}/pkglist.txt") file(MAKE_DIRECTORY "${RESOURCE_OUTPUT_DIR}") else(PARSED_ARGS_PACKAGE) set(RESOURCE_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/") # set(RESOURCE_GENRB_CHDIR_DIR "UNUSED") endif(PARSED_ARGS_PACKAGE) set(TARGET_RESOURCES ) set(COMPILED_RESOURCES_PATH ) set(COMPILED_RESOURCES_BASENAME ) foreach(RESOURCE_SOURCE ${PARSED_ARGS_FILES}) _icu_extract_locale_from_rb(${RESOURCE_SOURCE} RESOURCE_NAME_WE) get_filename_component(SOURCE_BASENAME ${RESOURCE_SOURCE} NAME) get_filename_component(ABSOLUTE_SOURCE ${RESOURCE_SOURCE} ABSOLUTE) if(UPPER_FORMAT STREQUAL "XLIFF") if(RESOURCE_NAME_WE STREQUAL "root") set(XLIFF_LANGUAGE "en") else(RESOURCE_NAME_WE STREQUAL "root") string(REGEX REPLACE "[^a-z].*$" "" XLIFF_LANGUAGE "${RESOURCE_NAME_WE}") endif(RESOURCE_NAME_WE STREQUAL "root") endif(UPPER_FORMAT STREQUAL "XLIFF") ##### ##### set(RESOURCE_TARGET_NAME "${RESOURCE_TARGET_PREFIX}${TARGET_SEPARATOR}${PARSED_ARGS_NAME}${TARGET_SEPARATOR}${RESOURCE_NAME_WE}") set(RESOURCE_OUTPUT__PATH "${RESOURCE_NAME_WE}.res") if(RESOURCE_NAME_WE STREQUAL "root") set(RESOURCE_OUTPUT_JAVA_PATH "${PARSED_ARGS_BUNDLE}.java") else(RESOURCE_NAME_WE STREQUAL "root") set(RESOURCE_OUTPUT_JAVA_PATH "${PARSED_ARGS_BUNDLE}_${RESOURCE_NAME_WE}.java") endif(RESOURCE_NAME_WE STREQUAL "root") set(RESOURCE_OUTPUT_XLIFF_PATH "${RESOURCE_NAME_WE}.xlf") set(GENRB__OPTIONS "") set(GENRB_JAVA_OPTIONS "-j" "-b" "${PARSED_ARGS_BUNDLE}") set(GENRB_XLIFF_OPTIONS "-x" "-l" "${XLIFF_LANGUAGE}") ##### ##### # build .txt from .res if(PARSED_ARGS_PACKAGE) add_custom_command( OUTPUT "${RESOURCE_OUTPUT_DIR}${RESOURCE_OUTPUT_${UPPER_FORMAT}_PATH}" COMMAND ${CMAKE_COMMAND} -E chdir ${RESOURCE_GENRB_CHDIR_DIR} ${${ICU_PUBLIC_VAR_NS}_GENRB_EXECUTABLE} ${GENRB_${UPPER_FORMAT}_OPTIONS} -d ${PACKAGE_NAME_WE} ${ABSOLUTE_SOURCE} DEPENDS ${RESOURCE_SOURCE} ) else(PARSED_ARGS_PACKAGE) add_custom_command( OUTPUT "${RESOURCE_OUTPUT_DIR}${RESOURCE_OUTPUT_${UPPER_FORMAT}_PATH}" COMMAND ${${ICU_PUBLIC_VAR_NS}_GENRB_EXECUTABLE} ${GENRB_${UPPER_FORMAT}_OPTIONS} -d ${RESOURCE_OUTPUT_DIR} ${ABSOLUTE_SOURCE} DEPENDS ${RESOURCE_SOURCE} ) endif(PARSED_ARGS_PACKAGE) # dummy target (ICU+RB++) for each locale to build the .res file from its .txt by the add_custom_command above add_custom_target( "${RESOURCE_TARGET_NAME}" ALL COMMENT "" DEPENDS "${RESOURCE_OUTPUT_DIR}${RESOURCE_OUTPUT_${UPPER_FORMAT}_PATH}" SOURCES ${RESOURCE_SOURCE} ) if(PARSED_ARGS_DESTINATION AND NOT PARSED_ARGS_PACKAGE) install(FILES "${RESOURCE_OUTPUT_DIR}${RESOURCE_OUTPUT_${UPPER_FORMAT}_PATH}" DESTINATION ${PARSED_ARGS_DESTINATION} PERMISSIONS OWNER_READ GROUP_READ WORLD_READ) endif(PARSED_ARGS_DESTINATION AND NOT PARSED_ARGS_PACKAGE) list(APPEND TARGET_RESOURCES "${RESOURCE_TARGET_NAME}") list(APPEND COMPILED_RESOURCES_PATH "${RESOURCE_OUTPUT_DIR}${RESOURCE_OUTPUT_${UPPER_FORMAT}_PATH}") list(APPEND COMPILED_RESOURCES_BASENAME "${RESOURCE_NAME_WE}.${BUNDLES_${UPPER_FORMAT}_SUFFIX}") endforeach(RESOURCE_SOURCE) # convert semicolon separated list to a space separated list # NOTE: if the pkglist.txt file starts (or ends?) with a whitespace, pkgdata add an undefined symbol (named _) for it string(REPLACE ";" " " COMPILED_RESOURCES_BASENAME "${COMPILED_RESOURCES_BASENAME}") if(PARSED_ARGS_PACKAGE) # create a text file (pkglist.txt) with the list of the *.res to package together add_custom_command( OUTPUT "${PACKAGE_LIST_OUTPUT_PATH}" COMMAND ${CMAKE_COMMAND} -E echo "${COMPILED_RESOURCES_BASENAME}" > "${PACKAGE_LIST_OUTPUT_PATH}" DEPENDS ${COMPILED_RESOURCES_PATH} ) # run pkgdata from pkglist.txt add_custom_command( OUTPUT "${PACKAGE_OUTPUT_PATH}" COMMAND ${CMAKE_COMMAND} -E chdir ${RESOURCE_GENRB_CHDIR_DIR} ${${ICU_PUBLIC_VAR_NS}_PKGDATA_EXECUTABLE} -F ${PKGDATA_${TYPE}_OPTIONS} -s ${PACKAGE_NAME_WE} -p ${PACKAGE_NAME_WE} ${PACKAGE_LIST_OUTPUT_PATH} DEPENDS "${PACKAGE_LIST_OUTPUT_PATH}" VERBATIM ) if(PKGDATA_LIBRARY_${TYPE}_TYPE) # assert(${PARSED_ARGS_DEPENDS} != "") if(NOT PARSED_ARGS_DEPENDS) message(FATAL_ERROR "${__FUNCTION__}(): static and library mode imply a list of targets to link to, DEPENDS parameter missing") endif(NOT PARSED_ARGS_DEPENDS) add_library(${PACKAGE_TARGET_NAME} ${PKGDATA_LIBRARY_${TYPE}_TYPE} IMPORTED) if(MSVC) string(REGEX REPLACE "${PKGDATA_LIBRARY_SUFFIX}\$" "${CMAKE_IMPORT_LIBRARY_SUFFIX}" PACKAGE_OUTPUT_LIB "${PACKAGE_OUTPUT_PATH}") set_target_properties(${PACKAGE_TARGET_NAME} PROPERTIES IMPORTED_LOCATION ${PACKAGE_OUTPUT_PATH} IMPORTED_IMPLIB ${PACKAGE_OUTPUT_LIB}) else(MSVC) set_target_properties(${PACKAGE_TARGET_NAME} PROPERTIES IMPORTED_LOCATION ${PACKAGE_OUTPUT_PATH}) endif(MSVC) foreach(DEPENDENCY ${PARSED_ARGS_DEPENDS}) target_link_libraries(${DEPENDENCY} ${PACKAGE_TARGET_NAME}) if(NOT PARSED_ARGS_NO_SHARED_FLAGS) get_property(ENABLED_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) list(LENGTH "${ENABLED_LANGUAGES}" ENABLED_LANGUAGES_LENGTH) if(ENABLED_LANGUAGES_LENGTH GREATER 1) message(WARNING "Project has more than one language enabled, skip automatic shared flags appending") else(ENABLED_LANGUAGES_LENGTH GREATER 1) set_property(TARGET "${DEPENDENCY}" APPEND PROPERTY COMPILE_FLAGS "${${ICU_PUBLIC_VAR_NS}_${ENABLED_LANGUAGES}_SHARED_FLAGS}") endif(ENABLED_LANGUAGES_LENGTH GREATER 1) endif(NOT PARSED_ARGS_NO_SHARED_FLAGS) endforeach(DEPENDENCY) # http://www.mail-archive.com/cmake-commits@cmake.org/msg01135.html set(PACKAGE_INTERMEDIATE_TARGET_NAME "${PACKAGE_TARGET_NAME}${TARGET_SEPARATOR}DUMMY") # dummy intermediate target (ICU+PKG++DUMMY) to link the package to the produced library by running pkgdata (see add_custom_command above) add_custom_target( ${PACKAGE_INTERMEDIATE_TARGET_NAME} COMMENT "" DEPENDS "${PACKAGE_OUTPUT_PATH}" ) add_dependencies("${PACKAGE_TARGET_NAME}" "${PACKAGE_INTERMEDIATE_TARGET_NAME}") else(PKGDATA_LIBRARY_${TYPE}_TYPE) # dummy target (ICU+PKG+) to run pkgdata (see add_custom_command above) add_custom_target( "${PACKAGE_TARGET_NAME}" ALL COMMENT "" DEPENDS "${PACKAGE_OUTPUT_PATH}" ) endif(PKGDATA_LIBRARY_${TYPE}_TYPE) # dummy target (ICU+PKG++PKGLIST) to build the file pkglist.txt add_custom_target( "${PACKAGE_LIST_TARGET_NAME}" ALL COMMENT "" DEPENDS "${PACKAGE_LIST_OUTPUT_PATH}" ) # package => pkglist.txt add_dependencies("${PACKAGE_TARGET_NAME}" "${PACKAGE_LIST_TARGET_NAME}") # pkglist.txt => *.res add_dependencies("${PACKAGE_LIST_TARGET_NAME}" ${TARGET_RESOURCES}) if(PARSED_ARGS_DESTINATION) install(FILES "${PACKAGE_OUTPUT_PATH}" DESTINATION ${PARSED_ARGS_DESTINATION} PERMISSIONS OWNER_READ GROUP_READ WORLD_READ) endif(PARSED_ARGS_DESTINATION) endif(PARSED_ARGS_PACKAGE) endfunction(icu_generate_resource_bundle) ########## ########## ########## ########## if(${ICU_PUBLIC_VAR_NS}_DEBUG) function(icudebug _VARNAME) if(DEFINED ${ICU_PUBLIC_VAR_NS}_${_VARNAME}) message("${ICU_PUBLIC_VAR_NS}_${_VARNAME} = ${${ICU_PUBLIC_VAR_NS}_${_VARNAME}}") else(DEFINED ${ICU_PUBLIC_VAR_NS}_${_VARNAME}) message("${ICU_PUBLIC_VAR_NS}_${_VARNAME} = ") endif(DEFINED ${ICU_PUBLIC_VAR_NS}_${_VARNAME}) endfunction(icudebug) # IN (args) icudebug("FIND_COMPONENTS") icudebug("FIND_REQUIRED") icudebug("FIND_QUIETLY") icudebug("FIND_VERSION") # OUT # Found icudebug("FOUND") # Flags icudebug("C_FLAGS") icudebug("CPP_FLAGS") icudebug("CXX_FLAGS") icudebug("C_SHARED_FLAGS") icudebug("CPP_SHARED_FLAGS") icudebug("CXX_SHARED_FLAGS") # Linking icudebug("INCLUDE_DIRS") icudebug("LIBRARIES") # Version icudebug("VERSION_MAJOR") icudebug("VERSION_MINOR") icudebug("VERSION_PATCH") icudebug("VERSION") # _(FOUND|LIBRARY) set(${ICU_PRIVATE_VAR_NS}_COMPONENT_VARIABLES "FOUND" "LIBRARY" "LIBRARY_RELEASE" "LIBRARY_DEBUG") foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT ${${ICU_PRIVATE_VAR_NS}_COMPONENTS}) string(TOUPPER "${${ICU_PRIVATE_VAR_NS}_COMPONENT}" ${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT) foreach(${ICU_PRIVATE_VAR_NS}_COMPONENT_VARIABLE ${${ICU_PRIVATE_VAR_NS}_COMPONENT_VARIABLES}) icudebug("${${ICU_PRIVATE_VAR_NS}_UPPER_COMPONENT}_${${ICU_PRIVATE_VAR_NS}_COMPONENT_VARIABLE}") endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT_VARIABLE) endforeach(${ICU_PRIVATE_VAR_NS}_COMPONENT) endif(${ICU_PUBLIC_VAR_NS}_DEBUG) ########## ########## leatherman-1.4.2+dfsg/cmake/GetGitRevisionDescription.cmake000064400000000000000000000100261332360634000237420ustar00rootroot00000000000000# - Returns a version string from Git # # These functions force a re-configure on each git commit so that you can # trust the values of the variables in your build system. # # get_git_head_revision( [ ...]) # # Returns the refspec and sha hash of the current head revision # # git_describe( [ ...]) # # Returns the results of git describe on the source tree, and adjusting # the output so that it tests false if an error occurs. # # git_get_exact_tag( [ ...]) # # Returns the results of git describe --exact-match on the source tree, # and adjusting the output so that it tests false if there was no exact # matching tag. # # Requires CMake 2.6 or newer (uses the 'function' command) # # Original Author: # 2009-2010 Ryan Pavlik # http://academic.cleardefinition.com # Iowa State University HCI Graduate Program/VRAC # # Copyright Iowa State University 2009-2010. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) if(__get_git_revision_description) return() endif() set(__get_git_revision_description YES) # We must run the following at "include" time, not at function call time, # to find the path to this module rather than the path to a calling list file get_filename_component(_gitdescmoddir ${CMAKE_CURRENT_LIST_FILE} PATH) function(get_git_head_revision _refspecvar _hashvar) set(GIT_PARENT_DIR "${CMAKE_CURRENT_SOURCE_DIR}") set(GIT_DIR "${GIT_PARENT_DIR}/.git") while(NOT EXISTS "${GIT_DIR}") # .git dir not found, search parent directories set(GIT_PREVIOUS_PARENT "${GIT_PARENT_DIR}") get_filename_component(GIT_PARENT_DIR ${GIT_PARENT_DIR} PATH) if(GIT_PARENT_DIR STREQUAL GIT_PREVIOUS_PARENT) # We have reached the root directory, we are not in git set(${_refspecvar} "GITDIR-NOTFOUND" PARENT_SCOPE) set(${_hashvar} "GITDIR-NOTFOUND" PARENT_SCOPE) return() endif() set(GIT_DIR "${GIT_PARENT_DIR}/.git") endwhile() # check if this is a submodule if(NOT IS_DIRECTORY ${GIT_DIR}) file(READ ${GIT_DIR} submodule) string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" GIT_DIR_RELATIVE ${submodule}) get_filename_component(SUBMODULE_DIR ${GIT_DIR} PATH) get_filename_component(GIT_DIR ${SUBMODULE_DIR}/${GIT_DIR_RELATIVE} ABSOLUTE) endif() set(GIT_DATA "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/git-data") if(NOT EXISTS "${GIT_DATA}") file(MAKE_DIRECTORY "${GIT_DATA}") endif() if(NOT EXISTS "${GIT_DIR}/HEAD") return() endif() set(HEAD_FILE "${GIT_DATA}/HEAD") configure_file("${GIT_DIR}/HEAD" "${HEAD_FILE}" COPYONLY) configure_file("${_gitdescmoddir}/GetGitRevisionDescription.cmake.in" "${GIT_DATA}/grabRef.cmake" @ONLY) include("${GIT_DATA}/grabRef.cmake") set(${_refspecvar} "${HEAD_REF}" PARENT_SCOPE) set(${_hashvar} "${HEAD_HASH}" PARENT_SCOPE) endfunction() function(git_describe _var) if(NOT GIT_FOUND) find_package(Git QUIET) endif() get_git_head_revision(refspec hash) if(NOT GIT_FOUND) set(${_var} "GIT-NOTFOUND" PARENT_SCOPE) return() endif() if(NOT hash) set(${_var} "HEAD-HASH-NOTFOUND" PARENT_SCOPE) return() endif() # TODO sanitize #if((${ARGN}" MATCHES "&&") OR # (ARGN MATCHES "||") OR # (ARGN MATCHES "\\;")) # message("Please report the following error to the project!") # message(FATAL_ERROR "Looks like someone's doing something nefarious with git_describe! Passed arguments ${ARGN}") #endif() #message(STATUS "Arguments to execute_process: ${ARGN}") execute_process(COMMAND "${GIT_EXECUTABLE}" describe ${hash} ${ARGN} WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" RESULT_VARIABLE res OUTPUT_VARIABLE out ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) if(NOT res EQUAL 0) set(out "${out}-${res}-NOTFOUND") endif() set(${_var} "${out}" PARENT_SCOPE) endfunction() function(git_get_exact_tag _var) git_describe(out --exact-match ${ARGN}) set(${_var} "${out}" PARENT_SCOPE) endfunction() leatherman-1.4.2+dfsg/cmake/GetGitRevisionDescription.cmake.in000064400000000000000000000022621332360634000243520ustar00rootroot00000000000000# # Internal file for GetGitRevisionDescription.cmake # # Requires CMake 2.6 or newer (uses the 'function' command) # # Original Author: # 2009-2010 Ryan Pavlik # http://academic.cleardefinition.com # Iowa State University HCI Graduate Program/VRAC # # Copyright Iowa State University 2009-2010. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) set(HEAD_HASH) file(READ "@HEAD_FILE@" HEAD_CONTENTS LIMIT 1024) string(STRIP "${HEAD_CONTENTS}" HEAD_CONTENTS) if(HEAD_CONTENTS MATCHES "ref") # named branch string(REPLACE "ref: " "" HEAD_REF "${HEAD_CONTENTS}") if(EXISTS "@GIT_DIR@/${HEAD_REF}") configure_file("@GIT_DIR@/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY) elseif(EXISTS "@GIT_DIR@/logs/${HEAD_REF}") configure_file("@GIT_DIR@/logs/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY) set(HEAD_HASH "${HEAD_REF}") endif() else() # detached HEAD configure_file("@GIT_DIR@/HEAD" "@GIT_DATA@/head-ref" COPYONLY) endif() if(NOT HEAD_HASH) file(READ "@GIT_DATA@/head-ref" HEAD_HASH LIMIT 1024) string(STRIP "${HEAD_HASH}" HEAD_HASH) endif() leatherman-1.4.2+dfsg/cmake/cflags.cmake000064400000000000000000000135571332360634000201070ustar00rootroot00000000000000# Set compiler-specific flags # Each of our project dirs sets CMAKE_CXX_FLAGS based on these. We do # not set CMAKE_CXX_FLAGS globally because gtest is not warning-clean. if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "\\w*Clang") set(LEATHERMAN_CXX_FLAGS "-std=c++11 -Wall -Wextra -Werror -Wno-unused-parameter -Wno-tautological-constant-out-of-range-compare ${CMAKE_CXX_FLAGS}") # Clang warns that 'register' is deprecated; 'register' is used throughout boost, so it can't be an error yet. # The warning flag is different on different clang versions so we need to extract the clang version. # And the Mavericks version of clang report its version in its own special way (at least on 10.9.5) - yay EXECUTE_PROCESS( COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE clang_full_version_string ) if ("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") string (REGEX REPLACE ".*based on LLVM ([0-9]+\\.[0-9]+).*" "\\1" CLANG_VERSION_STRING ${clang_full_version_string}) # Clang's output changed in Xcode 7. if (NOT ${clang_full_version_string}) string(REGEX REPLACE "Apple LLVM version ([0-9]+\\.[0-9]+).*" "\\1" CLANG_VERSION_STRING ${clang_full_version_string}) endif() else() string (REGEX REPLACE ".*clang version ([0-9]+\\.[0-9]+).*" "\\1" CLANG_VERSION_STRING ${clang_full_version_string}) endif() MESSAGE( STATUS "CLANG_VERSION_STRING: " ${CLANG_VERSION_STRING} ) # Now based on clang version set the appropriate warning flag if ("${CLANG_VERSION_STRING}" VERSION_GREATER "3.4") set(LEATHERMAN_CXX_FLAGS "-Wno-deprecated-register ${LEATHERMAN_CXX_FLAGS}") else() set(LEATHERMAN_CXX_FLAGS "-Wno-deprecated ${LEATHERMAN_CXX_FLAGS}") endif() if ("${CLANG_VERSION_STRING}" VERSION_GREATER "6.9") set(LEATHERMAN_CXX_FLAGS "-Wno-unused-local-typedef ${LEATHERMAN_CXX_FLAGS}") endif() # FreeBSD needs -fPIC if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") set(LEATHERMAN_LIBRARY_FLAGS "-fPIC") endif() elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") # maybe-uninitialized is a relatively new GCC warning that Boost 1.57 violates; disable it for now until it's available in Clang as well # it's also sometimes wrong set(CMAKE_CXX_FLAGS "-Wno-maybe-uninitialized ${CMAKE_CXX_FLAGS}") # missing-field-initializers is disabled because GCC can't make up their mind how to treat C++11 initializers set(LEATHERMAN_CXX_FLAGS "-std=c++11 -Wall -Werror -Wno-unused-parameter -Wno-unused-local-typedefs -Wno-unknown-pragmas -Wno-missing-field-initializers ${CMAKE_CXX_FLAGS}") if (NOT "${CMAKE_SYSTEM_NAME}" MATCHES "SunOS") set(LEATHERMAN_CXX_FLAGS "-Wextra ${LEATHERMAN_CXX_FLAGS}") endif() # On Windows with GCC 5.2.0, disable deprecated declarations because it causes warnings with Boost's use of auto_ptr if (WIN32) set(LEATHERMAN_CXX_FLAGS "-Wno-deprecated-declarations ${LEATHERMAN_CXX_FLAGS}") endif() # On unix systems we want to be sure to specify -fPIC for libraries if (NOT WIN32) set(LEATHERMAN_LIBRARY_FLAGS "-fPIC -nostdlib -nodefaultlibs") endif() elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") #set(LEATHERMAN_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Wall") endif() # Add code coverage if (COVERALLS) set(LEATHERMAN_CXX_FLAGS "-fprofile-arcs -ftest-coverage ${LEATHERMAN_CXX_FLAGS}") set(CMAKE_SHARED_LINKER_FLAGS "--coverage ${CMAKE_SHARED_LINKER_FLAGS}") set(CMAKE_EXE_LINKER_FLAGS "--coverage ${CMAKE_EXE_LINKER_FLAGS}") endif() if (WIN32) # Update standard link libraries to explicitly exclude kernel32. It isn't necessary, and when compiling with # MinGW makes the executable unusable on Microsoft Nano Server due to including __C_specific_handler. SET(CMAKE_C_STANDARD_LIBRARIES "-luser32 -lgdi32 -lwinspool -lshell32 -lole32 -loleaut32 -luuid -lcomdlg32 -ladvapi32" CACHE STRING "Standard C link libraries." FORCE) SET(CMAKE_CXX_STANDARD_LIBRARIES "-luser32 -lgdi32 -lwinspool -lshell32 -lole32 -loleaut32 -luuid -lcomdlg32 -ladvapi32" CACHE STRING "Standard C++ link libraries." FORCE) # We currently support Windows Vista and later APIs, see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa383745(v=vs.85).aspx for version strings. list(APPEND LEATHERMAN_DEFINITIONS -DWINVER=0x0600 -D_WIN32_WINNT=0x0600) # The GetUserNameEx function requires the application have a defined security level. # We define security sufficient to get the current user's info. # Also force use of UNICODE APIs, following the pattern outlined at http://utf8everywhere.org/. list(APPEND LEATHERMAN_DEFINITIONS -DUNICODE -D_UNICODE -DSECURITY_WIN32) endif() # Enforce UTF-8 in Leatherman.Logging; disable deprecated names in Boost.System to avoid warnings on Windows. list(APPEND LEATHERMAN_DEFINITIONS -DBOOST_LOG_WITHOUT_WCHAR_T -DBOOST_SYSTEM_NO_DEPRECATED) # Set project name for locale customization. Also set build directory for testing. list(APPEND LEATHERMAN_DEFINITIONS -DPROJECT_NAME="${CMAKE_PROJECT_NAME}" -DPROJECT_DIR="${PROJECT_BINARY_DIR}") if (NOT BOOST_STATIC) # Boost.Log requires that BOOST_LOG_DYN_LINK is set when using dynamic linking. We set ALL for consistency. list(APPEND LEATHERMAN_DEFINITIONS -DBOOST_ALL_DYN_LINK) endif() set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin) if (WIN32) # On Windows, DLL paths aren't hardcoded in the executable. We place all the executables and libraries # in the same directory to avoid having to setup the DLL search path in the dev environment. set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin) else() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib) endif() if ((LEATHERMAN_TOPLEVEL OR LEATHERMAN_HAVE_LOCALES) AND LEATHERMAN_USE_LOCALES) list(APPEND LEATHERMAN_DEFINITIONS -DLEATHERMAN_USE_LOCALES) endif() leatherman-1.4.2+dfsg/cmake/generate_translations.cmake000064400000000000000000000011331332360634000232260ustar00rootroot00000000000000# Generates or updates translation files from a pot file using msginit or msgmerge. # # Usage: # cmake -DPOT_FILE= \ # -DLANG_FILE= \ # -DLANG= \ # -DMSGMERGE_EXE= \ # -DMSGINIT_EXE= \ # -P generate_translations.cmake if (EXISTS ${LANG_FILE}) message(STATUS "Updating ${LANG_FILE}") execute_process(COMMAND ${MSGMERGE_EXE} -U ${LANG_FILE} ${POT_FILE}) else() execute_process(COMMAND ${MSGINIT_EXE} --no-translator -l ${LANG}.UTF-8 -o ${LANG_FILE} -i ${POT_FILE}) endif() leatherman-1.4.2+dfsg/cmake/internal.cmake000064400000000000000000000212351332360634000204540ustar00rootroot00000000000000# This file contains the macros used to add and manage leatherman # libraries. If you are adding a new library to leatherman, this is # probably the place to go for documentation. If you're just using # Leatherman, you should check out the README for information on its # interface. include(leatherman) # contains some helpers we use #### # Macros for use by leatherman libraries # # These are the API that libraries use to build themselves as # "standard" leatherman components" #### # Usage: add_leatherman_deps(${DEP1_LIB} ${DEP2_LIB}) # # Append to the LEATHERMAN__DEPS variable. macro(add_leatherman_deps) list(APPEND ${deps_var} ${ARGV}) export_var(${deps_var}) endmacro() # Usage: add_leatherman_includes(${DIR1} ${DIR2}) # # Append to the LEATHERMAN__INCLUDE variable macro(add_leatherman_includes) list(APPEND ${include_var} ${ARGV}) list(REMOVE_DUPLICATES ${include_var}) export_var(${include_var}) endmacro() # Usage: leatherman_dependency("libname") # # Automatically handle include directories and library linking for the # given leatherman library. # # Will throw a fatal error if the dependency cannot be found. macro(leatherman_dependency library) string(MAKE_C_IDENTIFIER "${library}" id) string(TOUPPER "${id}" name) set(option "LEATHERMAN_USE_${name}") set(dep_lib "LEATHERMAN_${name}_LIB") set(dep_deps "LEATHERMAN_${name}_DEPS") set(dep_include "LEATHERMAN_${name}_INCLUDE") if(${${option}}) debug("Found ${library} as ${name}, using it in current context") if ((NOT "" STREQUAL "${${dep_deps}}") AND (NOT LEATHERMAN_SHARED)) debug("Adding ${${dep_deps}} to deps for ${dirname}") append_new(${deps_var} ${${dep_deps}}) export_var(${deps_var}) endif() if (NOT "" STREQUAL "${${dep_lib}}") debug("Adding ${${dep_lib}} to deps for ${dirname}") list(FIND ${deps_var} ${${dep_lib}} found) if (${found} EQUAL -1) list(APPEND ${deps_var} ${${dep_lib}}) endif() export_var(${deps_var}) endif() if (NOT "" STREQUAL "${${dep_include}}") debug("Adding ${${dep_include}} to include directories for ${dirname}") list(APPEND ${include_var} ${${dep_include}}) list(REMOVE_DUPLICATES ${include_var}) export_var(${include_var}) endif() else() message(FATAL_ERROR "${library} not found as a dependency for ${dirname}") endif() endmacro() # Usage: add_leatherman_library(${SOURCES} [OPTS] [EXPORTS HEADER]) # # Creates a static CMake library built from the provided sources. Sets # LEATHERMAN__LIB to the name of this library. # # This macro directly calls add_library, so any add_library options # can be passed along with the sources. # # If the EXPORTS keyword is given, the string following it describes # the location to put an export header using the symbol_exports # helper. # # This macro cannot be invoked multiple times macro(add_leatherman_library) include_directories(BEFORE ${${include_var}}) set(LIBRARY_ARGS ${ARGV}) list(FIND LIBRARY_ARGS EXPORTS EXPORTS_IDX) if (NOT ${EXPORTS_IDX} EQUAL -1) list(REMOVE_AT LIBRARY_ARGS ${EXPORTS_IDX}) list(GET LIBRARY_ARGS ${EXPORTS_IDX} EXPORT_HEADER) list(REMOVE_AT LIBRARY_ARGS ${EXPORTS_IDX}) endif() if(LEATHERMAN_SHARED) add_library(${libname} SHARED ${LIBRARY_ARGS}) target_link_libraries(${libname} PRIVATE ${${deps_var}}) else() add_library(${libname} STATIC ${LIBRARY_ARGS}) endif() set_target_properties(${libname} PROPERTIES COMPILE_FLAGS "${LEATHERMAN_CXX_FLAGS} ${LEATHERMAN_LIBRARY_FLAGS}" VERSION ${PROJECT_VERSION}) if(LEATHERMAN_INSTALL) leatherman_install(${libname} EXPORT LeathermanLibraries) endif() if (EXPORT_HEADER) symbol_exports(${libname} ${EXPORT_HEADER}) endif() set(${lib_var} "${libname}" PARENT_SCOPE) endmacro() # Usage: add_leatherman_test(${SOURCES} [OPTS]) # # Adds the listed files to the set which will be built for the # leatherman unit test executable. macro(add_leatherman_test) foreach(FILE ${ARGV}) if (IS_ABSOLUTE FILE) list(APPEND LEATHERMAN_TEST_SRCS "${FILE}") else() list(APPEND LEATHERMAN_TEST_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/${FILE}") endif() endforeach() export_var(LEATHERMAN_TEST_SRCS) endmacro() # Usage: add_leatherman_headers(${DIRECTORIES}) # # Adds the listed directories to the set which will be installed to # $PREFIX/include macro(add_leatherman_headers) if(LEATHERMAN_INSTALL) foreach(DIR ${ARGV}) install(DIRECTORY "${DIR}" DESTINATION include) endforeach() endif() endmacro() # Usage: add_leatherman_vendored("pkg.zip" "abcdef..." "include" [SOURCE_DIR]) # # Unpacks a vendored package and installs the headers to include/leatherman/vendor # Optionally, a variable can be passed as the last argument that's set to the # unpacked location, in case it's needed for compiling a simple library. macro(add_leatherman_vendored pkg md5 header_path) unpack_vendored(${pkg} ${md5} SOURCE_DIR) set(include_dir "${SOURCE_DIR}/${header_path}") add_leatherman_includes(${include_dir}) if (LEATHERMAN_INSTALL) install(DIRECTORY "${include_dir}/" DESTINATION "include/leatherman/vendor") endif() add_custom_target(${dirname} DEPENDS ${pkg}) if (ARGV4) set(${ARGV4} ${SOURCE_DIR}) endif() endmacro() #### # Macros for use in the top-level leatherman CMakeLists.txt # # These macros are used to build up the variables which are passed # into whatever project is including leatherman #### # Usage: add_leatherman_dir(subdir [EXCLUDE_FROM_VARS]) # # Creates all of the CMake variables intended to be used by consumers # of leatherman, including the ENABLE flag. # # If the enable flag is set, also sets up the variables used by the # library API macros and adds the named subdirectory to the CMake # project. # # If EXCLUDE_FROM_VARS is present, this library will not be added to # the LEATHERMAN_LIBRARIES and LEATHERMAN_INCLUDE variables. The # LEATHERMAN__ variables will still be set. macro(add_leatherman_dir dir) debug("Setting up leatherman library for ${dir}") string(MAKE_C_IDENTIFIER "${dir}" id) string(TOUPPER "${id}" id_upper) set(dirname "${dir}") # Used by other macros to know our human-readable name set(option "LEATHERMAN_USE_${id_upper}") set(include_dir "${CMAKE_CURRENT_SOURCE_DIR}/${dir}/inc") set(libname "leatherman_${id}") defoption(${option} "Should ${dir} be built and used?" ${LEATHERMAN_DEFAULT_ENABLE}) if (${${option}}) set(include_var "LEATHERMAN_${id_upper}_INCLUDE") set(lib_var "LEATHERMAN_${id_upper}_LIB") set(deps_var "LEATHERMAN_${id_upper}_DEPS") set(${include_var} ${include_dir}) set(${lib_var} "") # if library is built, this will be set automatically # By adding the subdirectory after setting all variables, but # before exporting, we give the library an opportunity to # munge them (for example, to add vendor dirs) add_subdirectory("${dir}") if (LEATHERMAN_INSTALL) if ("${ARGV1}" STREQUAL EXCLUDE_FROM_VARS) set(COMPONENT_STRING "leatherman_component(${id} EXCLUDE_FROM_VARS)") else() set(COMPONENT_STRING "leatherman_component(${id})") endif() install(FILES "${dir}/CMakeLists.txt" DESTINATION "lib${LIB_SUFFIX}/cmake/leatherman" RENAME "${id}.cmake") set(LEATHERMAN_COMPONENTS "${LEATHERMAN_COMPONENTS}\n${COMPONENT_STRING}") endif() # We set this one afterwards because it doesn't need # overriding set(libs_var "LEATHERMAN_${id_upper}_LIBS") set(${libs_var} ${${lib_var}} ${${deps_var}}) if(NOT "${ARGV1}" STREQUAL EXCLUDE_FROM_VARS) debug("Appending values for ${id_upper} to common vars") list(APPEND LEATHERMAN_INCLUDE_DIRS ${${include_var}}) if (NOT "" STREQUAL "${${lib_var}}") # Prepend leatherman libraries, as later libs may depend on earlier libs. list(INSERT LEATHERMAN_LIBS 0 ${${lib_var}}) endif() append_new(LEATHERMAN_DEPS ${${deps_var}}) else() debug("Excluding values for ${id_upper} from common vars") endif() export_var(${include_var}) export_var(${lib_var}) export_var(${libs_var}) export_var(${deps_var}) # Enable cppcheck on this library list(APPEND LEATHERMAN_CPPCHECK_DIRS "${CMAKE_SOURCE_DIR}/${dir}") endif() endmacro(add_leatherman_dir) leatherman-1.4.2+dfsg/cmake/leatherman.cmake.in000064400000000000000000000347341332360634000213750ustar00rootroot00000000000000# This file contains utilities used by both leatherman and consuming # projects. # Save the directory containing other cmake script files. # If we're top-level, this file is generated and dropped # in a different directory from the other script files. if(LEATHERMAN_TOPLEVEL) set(LEATHERMAN_CMAKE_DIR ${CMAKE_SOURCE_DIR}/cmake) else() set(LEATHERMAN_CMAKE_DIR ${CMAKE_CURRENT_LIST_DIR}) endif() # Usage: leatherman_logging_namespace("namespace") # # Sets the LEATHERMAN_LOGGING_NAMESPACE preprocessor definition to the # value passed as "namespace". macro(leatherman_logging_namespace namespace) add_definitions("-DLEATHERMAN_LOGGING_NAMESPACE=\"${namespace}\"") endmacro() # Usage: leatherman_logging_line_numbers() # # Sets the LEATHERMAN_LOGGING_LINE_NUMBERS preprocessor definition. macro(leatherman_logging_line_numbers) add_definitions("-DLEATHERMAN_LOGGING_LINE_NUMBERS") endmacro() # Usage: debug("Something cool is happening") # # Print message if LEATHERMAN_DEBUG is set. Used to introspect macro # logic. macro(debug str) if (LEATHERMAN_DEBUG) message(STATUS ${str}) endif() endmacro(debug) # Usage: export_var("foobar") # # Sets variable "foobar" in the parent scope to the same value as # "foobar" in the invoking scope. Remember that a macro does not # create a new scope, but a function does. macro(export_var varname) if (NOT "${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_SOURCE_DIR}") debug("Exporting ${varname}") set(${varname} ${${varname}} PARENT_SCOPE) else() debug("Skipping export of ${varname} because I'm top-level") endif() debug("It's value is: ${${varname}}") endmacro(export_var) # Usage: defoption(VARNAME "Documentation String" ${DEFAULT_VALUE}") # # Define an option that will only be set to DEFAULT_VALUE if it does # not already exist in this scope. If the variable is available in the # scope, the option will keep the current value. This works around a # weird CMake behavior where set(OPTION_VAR TRUE) does not cause # option() to ignore its default. macro(defoption name doc default) if(DEFINED ${name}) debug("${name} is already set, using it") set(enabled ${${name}}) else() debug("${name} unset, using default") set(enabled ${default}) endif() option(${name} ${doc} ${enabled}) endmacro() # Usage: leatherman_install(TARGETS) # # Installs targets using common cross-platform configuration. # On Windows shared libraries go in bin, import and archive libraries # go in lib. On Linux shared libraries go in lib. Binaries go in bin. # # Also always drop the prefix; give the target its expected name. # We often have binaries and related dynamic libraries, and this # simplifies giving them different but related names, such as # `facter` and `libfacter`. macro(leatherman_install) install(TARGETS ${ARGV} RUNTIME DESTINATION bin LIBRARY DESTINATION lib${LIB_SUFFIX} ARCHIVE DESTINATION lib${LIB_SUFFIX}) foreach(ARG ${ARGV}) if (TARGET ${ARG}) set_target_properties(${ARG} PROPERTIES PREFIX "" IMPORT_PREFIX "") endif() endforeach() endmacro() # Usage: add_cppcheck_dirs(dir1 dir2) # # Add the listed directories to the set that cppcheck will be run # against macro(add_cppcheck_dirs) list(APPEND CPPCHECK_DIRS ${ARGV}) export_var(CPPCHECK_DIRS) endmacro() # Usage: add_cpplint_files(file1 file2) # # Add the listed files to the set that cpplint will be run against macro(add_cpplint_files) list(APPEND CPPLINT_FILES ${ARGV}) export_var(CPPLINT_FILES) endmacro() # Usage: enable_cppcheck() # # Create the cppcheck custom target with all the directories specified # in previous calls to `add_cppcheck_dirs` macro(enable_cppcheck) add_custom_target(cppcheck COMMAND cppcheck --enable=warning,performance --error-exitcode=2 --quiet --inline-suppr ${CPPCHECK_DIRS}) endmacro() # We set this here so that enable_cpplint() can find it set(LEATHERMAN_CPPLINT_PATH "${LEATHERMAN_CMAKE_DIR}/../scripts/cpplint.py") # Usage: enable_cpplint() # # Create the cpplint custom target with all the specified in previous # calls to `add_cpplint_files` macro(enable_cpplint) include(FindPythonInterp) if (NOT PYTHONINTERP_FOUND) message(STATUS "Python not found; 'cpplint' target will not be available") else() set(CPPLINT_FILTER "-build/c++11" # , , etc... "-whitespace/indent" # We use 4 space indentation "-build/include" # Why? "-build/namespaces" # What's a namespace to do "-legal/copyright" # Not yet "-runtime/references" # Not sure about this religion "-readability/streams" # What? "-readability/namespace" # Ignore nested namespace comment formatting "-whitespace/braces" # Is there a k&r setting? "-whitespace/line_length" # Well yeah, but ... not just now "-runtime/arrays" # Sizing an array with a 'const int' doesn't make it variable sized "-readability/todo" # Seriously? todo comments need to identify an owner? pffft "-whitespace/empty_loop_body" # Can't handle do { ... } while(expr); "-runtime/int" # Some C types are needed for library interop "-runtime/explicit" # Using implicit conversion from string to regex for regex calls. "-build/header_guard" # Disable header guards (cpplint doesn't yet support enforcing #pragma once) "-runtime/indentation_namespace" # Our namespace indentation is not consistent "-readability/inheritance" # virtual/override sometimes used together "-whitespace/operators" # Expects spaces around perfect forwarding (&&) ) set(CPPLINT_ARGS "--extensions=cc,cpp,hpp,h") if (CPPLINT_FILTER) string(REPLACE ";" "," CPPLINT_FILTER "${CPPLINT_FILTER}") set(CPPLINT_ARGS "${CPPLINT_ARGS};--filter=${CPPLINT_FILTER}") endif() if (MSVC) set(CPPLINT_ARGS "${CPPLINT_ARGS};--output=vs7") endif() add_custom_target(cpplint COMMAND ${PYTHON_EXECUTABLE} ${LEATHERMAN_CPPLINT_PATH} ${CPPLINT_ARGS} ${CPPLINT_FILES} VERBATIM ) endif() endmacro() # Usage: gettext_templates(dir) # # Create templates for gettext in `dir` from the source files specified as additional arguments. # Creates a custom target `translation`. macro(gettext_templates dir) # Don't even try to find gettext on AIX or Solaris, we don't want it. if (LEATHERMAN_USE_LOCALES AND LEATHERMAN_GETTEXT) find_program(XGETTEXT_EXE xgettext) endif() if (XGETTEXT_EXE) set(TRANSLATION_DIR "${dir}") file(MAKE_DIRECTORY ${TRANSLATION_DIR}) set(ALL_PROJECT_SOURCES ${ARGN}) set(lang_template ${TRANSLATION_DIR}/${PROJECT_NAME}.pot) add_custom_command(OUTPUT ${lang_template} COMMAND ${XGETTEXT_EXE} --sort-by-file --copyright-holder "Puppet \\" --package-name=${PROJECT_NAME} --package-version=${PROJECT_VERSION} --msgid-bugs-address "docs@puppet.com" -d ${PROJECT_NAME} -o ${lang_template} --keyword=LOG_DEBUG:1,\\"debug\\" --keyword=LOG_INFO:1,\\"info\\" --keyword=LOG_WARNING:1,\\"warning\\" --keyword=LOG_ERROR:1,\\"error\\" --keyword=LOG_FATAL:1,\\"fatal\\" --keyword=log:2,\\"log\\" --keyword=translate:1 --keyword=translate_n:1,2 --keyword=translate_p:1c,2 --keyword=translate_np:1c,2,3 --keyword=format:1 --keyword=format_n:1,2 --keyword=format_p:1c,2 --keyword=format_np:1c,2,3 --keyword=_:1 --keyword=n_:1,2 --keyword=p_:1c,2 --keyword=np_:1c,2,3 --add-location=file --add-comments=LOCALE ${ALL_PROJECT_SOURCES} COMMAND ${CMAKE_COMMAND} -DPOT_FILE=${lang_template} -DSOURCE_DIR=${CMAKE_SOURCE_DIR} -P ${LEATHERMAN_CMAKE_DIR}/normalize_pot.cmake DEPENDS ${ALL_PROJECT_SOURCES}) add_custom_target(${PROJECT_NAME}.pot ALL DEPENDS ${lang_template}) find_program(MSGINIT_EXE msginit) find_program(MSGMERGE_EXE msgmerge) if (MSGINIT_EXE AND MSGMERGE_EXE) foreach(lang ${LEATHERMAN_LOCALES}) set(lang_file ${TRANSLATION_DIR}/${lang}.po) add_custom_command(OUTPUT ${lang_file} COMMAND ${CMAKE_COMMAND} -DPOT_FILE=${lang_template} -DLANG_FILE=${lang_file} -DLANG=${lang} -DMSGMERGE_EXE=${MSGMERGE_EXE} -DMSGINIT_EXE=${MSGINIT_EXE} -P ${LEATHERMAN_CMAKE_DIR}/generate_translations.cmake DEPENDS ${lang_template}) add_custom_target(${PROJECT_NAME}-${lang}.pot ALL DEPENDS ${lang_file}) endforeach() endif() else() message(STATUS "Could not find gettext executables, skipping gettext_templates.") endif() endmacro() # Usage: gettext_compile(dir inst) # # Compile gettext .po files into .mo files and configure installing to inst # Creates a custom target `translations`. # # Does nothing if msgfmt (part of gettext) isn't found. Sets GETTEXT_ENABLED # to ON if we can compile .mo files, otherwise sets to OFF. This variable can # be used to disable functionality (such as testing) that requires gettext # translation files. macro(gettext_compile dir inst) # Don't even try to find gettext on AIX or Solaris, we don't want it. if (LEATHERMAN_USE_LOCALES AND LEATHERMAN_GETTEXT) find_program(MSGFMT_EXE msgfmt) endif() if (MSGFMT_EXE) file(GLOB TRANSLATIONS ${dir}/*.po) if (NOT TARGET translations) add_custom_target(translations ALL) endif() # Add LEATHERMAN_LOCALES, as they may not have been generated yet. foreach(locale ${LEATHERMAN_LOCALES}) set(fpath ${dir}/${locale}.po) list(FIND TRANSLATIONS ${fpath} FOUND) if (${FOUND} EQUAL -1) list(APPEND TRANSLATIONS ${fpath}) endif() endforeach() foreach(fpath ${TRANSLATIONS}) get_filename_component(lang ${fpath} NAME_WE) set(mo ${CMAKE_BINARY_DIR}/${lang}/LC_MESSAGES/${PROJECT_NAME}.mo) add_custom_command(OUTPUT ${mo} COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_BINARY_DIR}/${lang}/LC_MESSAGES COMMAND ${MSGFMT_EXE} -c -v -o ${mo} ${fpath} 2>&1 DEPENDS ${fpath}) add_custom_target(${lang}-${PROJECT_NAME} DEPENDS ${mo}) add_dependencies(translations ${lang}-${PROJECT_NAME}) if(LEATHERMAN_LOCALE_INSTALL) install(FILES ${mo} DESTINATION "@CMAKE_INSTALL_PREFIX@/${LEATHERMAN_LOCALE_INSTALL}/${lang}/LC_MESSAGES") else() install(FILES ${mo} DESTINATION "@CMAKE_INSTALL_PREFIX@/share/locale/${lang}/LC_MESSAGES") endif() endforeach() set(GETTEXT_ENABLED ON) else() message(STATUS "Could not find gettext executables, skipping gettext_compile.") set(GETTEXT_ENABLED OFF) endif() endmacro() include(GetGitRevisionDescription) # Usage: get_commit_string(VARNAME) # # Sets VARNAME to the git commit revision string, i.e. (commit SHA1) function(get_commit_string varname) get_git_head_revision(GIT_REFSPEC GIT_SHA1) debug("Git SHA1 is ${GIT_SHA1}") if ("${GIT_SHA1}" STREQUAL "" OR "${GIT_SHA1}" STREQUAL "GITDIR-NOTFOUND") set(${varname} "" PARENT_SCOPE) else() set(${varname} " (commit ${GIT_SHA1})" PARENT_SCOPE) endif() endfunction() include(GenerateExportHeader) # Usage: symbol_exports(TARGET HEADER) # # Generate the export header for restricting symbols exported from the library, # and configure the compiler. Restricting symbols has several advantages, noted # at https://gcc.gnu.org/wiki/Visibility. macro(symbol_exports target header) generate_export_header(${target} EXPORT_FILE_NAME "${header}") # Export on Apple resulted in issues finding symbols from library dependencies # that we haven't solved. For now avoid the problem. # AIX doesn't support inline headers, and CMake warns if you try to apply the # option on static libraries. get_target_property(target_type ${target} TYPE) if ((NOT APPLE) AND (NOT CMAKE_SYSTEM_NAME MATCHES "AIX") AND (${target_type} STREQUAL SHARED_LIBRARY)) set_target_properties(${target} PROPERTIES VISIBILITY_INLINES_HIDDEN ON) endif() # If the target name is not a C identifier, generate_export_header will # convert it to one. Fix the define to do the same. string(MAKE_C_IDENTIFIER ${target} target_c_name) string(TOLOWER ${target_c_name} target_name_lower) target_compile_definitions(${target} PRIVATE "-D${target_name_lower}_EXPORTS") endmacro() # Usage: append_new(VARNAME VAR1 VAR2 ...) # # Append ARGN items to VARNAME list if not already present. Accounts for # optimized/debug flags. function(append_new varname) set(prefix "") foreach(DEP ${ARGN}) if ((${DEP} STREQUAL optimized) OR (${DEP} STREQUAL debug)) set(prefix ${DEP}) else() list(FIND ${varname} ${DEP} found) if (${found} EQUAL -1) if (prefix) list(APPEND ${varname} ${prefix}) endif() list(APPEND ${varname} ${DEP}) endif() set(prefix "") endif() endforeach() set(${varname} ${${varname}} PARENT_SCOPE) endfunction() # Usage: unpack_vendored("pkg.zip" "pkg" SOURCE_DIR) # # Unpacks a compressed pkg.zip in the vendor directory to # ${PROJECT_BINARY_DIR}/src/pkg and saves the unpacked location to a variable. macro(unpack_vendored pkg extracted_dir dir) set(pkgfile ${PROJECT_SOURCE_DIR}/vendor/${pkg}) set(${dir} ${PROJECT_BINARY_DIR}/src/${extracted_dir}) message(STATUS "Unpacking ${pkgfile} into ${${dir}}") file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/src) execute_process( COMMAND ${CMAKE_COMMAND} -E tar xzf ${PROJECT_SOURCE_DIR}/vendor/${pkg} WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/src ) endmacro(unpack_vendored) leatherman-1.4.2+dfsg/cmake/leatherman_config.cmake000064400000000000000000000046721332360634000223130ustar00rootroot00000000000000include(leatherman) # Usage: add_leatherman_deps(${DEP1_LIB} ${DEP2_LIB}) # # Append to the LEATHERMAN__DEPS variable. macro(add_leatherman_deps) list(APPEND ${deps_var} ${ARGV}) endmacro() # Usage: add_leatherman_includes(${DIR1} ${DIR2}) # # Append to the LEATHERMAN__INCLUDE variable macro(add_leatherman_includes) list(APPEND ${include_var} ${ARGV}) list(REMOVE_DUPLICATES ${include_var}) endmacro() # Usage: leatherman_dependency("libname") # # Automatically handle include directories and library linking for the # given leatherman library. # # Will throw a fatal error if the dependency cannot be found. macro(leatherman_dependency library) string(MAKE_C_IDENTIFIER "${library}" lib) string(TOUPPER "${lib}" name) set(option "LEATHERMAN_USE_${name}") set(dep_lib "LEATHERMAN_${name}_LIB") set(dep_deps "LEATHERMAN_${name}_DEPS") set(dep_include "LEATHERMAN_${name}_INCLUDE") if (NOT "" STREQUAL "${${dep_deps}}") debug("Adding ${${dep_deps}} to deps for ${id_upper}") list(APPEND ${deps_var} ${${dep_deps}}) endif() if (NOT "" STREQUAL "${${dep_lib}}") debug("Adding ${${dep_lib}} to deps for ${id_upper}") list(APPEND ${deps_var} ${${dep_lib}}) endif() if (NOT "" STREQUAL "${${dep_include}}") debug("Adding ${${dep_include}} to include directories for ${id_upper}") list(APPEND ${include_var} ${${dep_include}}) list(REMOVE_DUPLICATES ${include_var}) endif() endmacro() macro(add_leatherman_library) set(${lib_var} "${libname}") endmacro() macro(add_leatherman_headers) endmacro() macro(add_leatherman_test) endmacro() macro(add_leatherman_vendored pkg md5 header_path) add_leatherman_includes("${LEATHERMAN_PREFIX}/include/leatherman/vendor") endmacro() macro(leatherman_component id) string(TOUPPER "${id}" id_upper) set(include_var "LEATHERMAN_${id_upper}_INCLUDE") set(lib_var "LEATHERMAN_${id_upper}_LIB") set(deps_var "LEATHERMAN_${id_upper}_DEPS") set(include_dir "${LEATHERMAN_PREFIX}/include") set(libname "leatherman_${id}") set(${include_var} ${include_dir}) set(${lib_var} "") include("${current_directory}/${id}.cmake") set(libs_var "LEATHERMAN_${id_upper}_LIBS") set(${libs_var} ${${lib_var}} ${${deps_var}}) if("${ARGV1}" STREQUAL EXCLUDE_FROM_VARS) set(exclude_var "LEATHERMAN_EXCLUDE_${id_upper}") set(${exclude_var} TRUE) endif() endmacro() leatherman-1.4.2+dfsg/cmake/normalize_pot.cmake000064400000000000000000000012101332360634000215110ustar00rootroot00000000000000# Normalizes a pot file generated by xgettext, so we can compare new files # to see if they've changed. Normalization removes creation date, and sets # the charset to UTF-8. # # Usage: # cmake -DPOT_FILE= \ # -DSOURCE_DIR= \ # -P normalize_pot.cmake file(READ ${POT_FILE} FILE_CONTENT) string(REPLACE "charset=CHARSET" "charset=UTF-8" FILE_CONTENT "${FILE_CONTENT}") string(REGEX REPLACE "POT-Creation-Date: [^\\]*" "POT-Creation-Date: " FILE_CONTENT "${FILE_CONTENT}") string(REPLACE "${SOURCE_DIR}/" "" FILE_CONTENT "${FILE_CONTENT}") file(WRITE ${POT_FILE} "${FILE_CONTENT}") leatherman-1.4.2+dfsg/cmake/options.cmake000064400000000000000000000017071332360634000203350ustar00rootroot00000000000000include(leatherman) defoption(COVERALLS "Generate code coverage using Coveralls.io" OFF) defoption(BOOST_STATIC "Use Boost's static libraries" OFF) defoption(CURL_STATIC "Use curl's static libraries" OFF) set(CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON CACHE BOOL "Prepend project includes before system includes") set(LIB_SUFFIX "" CACHE STRING "Library install suffix") # Solaris and AIX have poor support for std::locale and boost::locale # with GCC. Don't use them by default. if (CMAKE_SYSTEM_NAME MATCHES "AIX" OR CMAKE_SYSTEM_NAME MATCHES "SunOS") set(USE_BOOST_LOCALE FALSE) else() set(USE_BOOST_LOCALE TRUE) endif() defoption(LEATHERMAN_USE_LOCALES "Use locales for internationalization" ${USE_BOOST_LOCALE}) # Provided so it can be disabled temporarily when we don't have gettext built. defoption(LEATHERMAN_GETTEXT "Support localization with gettext" ON) # Map our boost option to the for-realsies one set(Boost_USE_STATIC_LIBS ${BOOST_STATIC}) leatherman-1.4.2+dfsg/cmake/pod2man.cmake000064400000000000000000000024421332360634000201770ustar00rootroot00000000000000# Taken from https://github.com/tarantool/tarantool # Generate man pages of the project by using the POD header # written in the tool source code. To use it - include this # file in CMakeLists.txt and invoke # pod2man(
) find_program(POD2MAN pod2man) if(NOT POD2MAN) message(STATUS "Could not find pod2man - man pages disabled") endif(NOT POD2MAN) macro(pod2man PODFILE MANFILE SECTION OUTPATH CENTER) if(NOT EXISTS ${PODFILE}) message(FATAL ERROR "Could not find pod file ${PODFILE} to generate man page") endif(NOT EXISTS ${PODFILE}) if(POD2MAN) set(OUTPATH_NEW "${PROJECT_BINARY_DIR}/${OUTPATH}") add_custom_command( OUTPUT ${OUTPATH_NEW}/${MANFILE}.${SECTION} COMMAND ${POD2MAN} --section ${SECTION} --center ${CENTER} --release "\"\"" --name ${MANFILE} ${PODFILE} ${OUTPATH_NEW}/${MANFILE}.${SECTION} ) set(MANPAGE_TARGET "man-${MANFILE}") add_custom_target(${MANPAGE_TARGET} ALL DEPENDS ${OUTPATH_NEW}/${MANFILE}.${SECTION} ) install( FILES ${OUTPATH_NEW}/${MANFILE}.${SECTION} DESTINATION ${OUTPATH}/man${SECTION} ) endif() endmacro(pod2man PODFILE MANFILE SECTION OUTPATH CENTER) leatherman-1.4.2+dfsg/curl/000075500000000000000000000000001332360634000155205ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/CMakeLists.txt000064400000000000000000000025661332360634000202710ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex system filesystem) add_leatherman_deps("${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") if (BUILDING_LEATHERMAN AND LEATHERMAN_MOCK_CURL) # Create a mock curl library; it needs to be separate to allow for dllimport in the client source # Do it first to avoid symbol_exports defined later. add_subdirectory(tests) export_var(LEATHERMAN_INT_CURL_LIBS) export_var(LEATHERMAN_TEST_CURL_LIB) endif() find_package(CURL REQUIRED) if (CURL_STATIC) add_definitions(-DCURL_STATICLIB) if (WIN32) # Linking statically on Windows requires some extra libraries. set(CURL_DEPS wldap32.lib ws2_32.lib) endif() endif() add_leatherman_includes("${CURL_INCLUDE_DIRS}") leatherman_dependency(locale) leatherman_dependency(logging) leatherman_dependency(util) leatherman_dependency(file_util) add_leatherman_deps(${CURL_LIBRARIES} ${CURL_DEPS}) if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.curl") leatherman_logging_line_numbers() endif() add_leatherman_library(src/client.cc src/request.cc src/response.cc EXPORTS "${CMAKE_CURRENT_LIST_DIR}/inc/leatherman/curl/export.h") add_leatherman_headers(inc/leatherman) if (BUILDING_LEATHERMAN AND LEATHERMAN_MOCK_CURL) add_leatherman_test(tests/client_test.cc tests/request_test.cc tests/response_test.cc) endif() leatherman-1.4.2+dfsg/curl/inc/000075500000000000000000000000001332360634000162715ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/inc/leatherman/000075500000000000000000000000001332360634000204115ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/inc/leatherman/curl/000075500000000000000000000000001332360634000213565ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/inc/leatherman/curl/client.hpp000064400000000000000000000350271332360634000233540ustar00rootroot00000000000000/** * @file * Declares the HTTP client. */ #pragma once #include #include #include "request.hpp" #include "response.hpp" #include #include #include #include #include "export.h" namespace leatherman { namespace curl { /** * Resource for a cURL handle. */ struct LEATHERMAN_CURL_EXPORT curl_handle : util::scoped_resource { /** * Constructs a cURL handle. */ curl_handle(); private: static void cleanup(CURL* curl); }; /** * Resource for a cURL linked-list. */ struct LEATHERMAN_CURL_EXPORT curl_list : util::scoped_resource { /** * Constructs a curl_list. */ curl_list(); /** * Appends the given string onto the list. * @param value The string to append onto the list. */ void append(std::string const& value); private: static void cleanup(curl_slist* list); }; /** * Resource for a cURL escaped string. */ struct LEATHERMAN_CURL_EXPORT curl_escaped_string : util::scoped_resource { /** * Constructs a cURL escaped string. * @param handle The cURL handle to use to perform the escape. * @param str The string to escape. */ curl_escaped_string(curl_handle const& handle, std::string const& str); private: static void cleanup(char const* str); }; /** * Resource for a temporary file used during download */ struct LEATHERMAN_CURL_NO_EXPORT download_temp_file { /** * Constructs a temporary file that will be used to store the downloaded file's * contents. * @param req The HTTP request. * @param file_path The file path that this temporary file's contents will be written to. * @param perms The (optional) permissions of the downloaded file. */ download_temp_file(request const& req, std::string const& file_path, boost::optional perms); ~download_temp_file(); /* * Returns the underlying file pointer. */ FILE* get_fp(); /* * Writes the temporary file's contents to its file path. */ void write(); /* * Writes the temporary file's contents to the body of the * given response. * @param response The HTTP response to write the contents to */ void write(response& res); private: void close_fp(); void cleanup(); FILE* _fp; request _req; std::string _file_path; boost::filesystem::path _temp_path; }; /** * The exception for HTTP. */ struct LEATHERMAN_CURL_EXPORT http_exception : std::runtime_error { /** * Constructs an http_exception. * @param message The exception message. */ http_exception(std::string const& message) : runtime_error(message) { } }; /** * The exception for HTTP requests. */ struct LEATHERMAN_CURL_EXPORT http_request_exception : http_exception { /** * Constructs an http_request_exception. * @param req The HTTP request that caused the exception. * @param message The exception message. */ http_request_exception(request req, std::string const &message) : http_exception(message), _req(std::move(req)) { } /** * Gets the request associated with the exception * @return Returns the request associated with the exception. */ request const& req() const { return _req; } private: request _req; }; /** * The exception for curl_easy_setopt errors. */ struct LEATHERMAN_CURL_EXPORT http_curl_setup_exception : http_request_exception { /** * Constructs an http_curl_setup_exception. * @param req The HTTP request that caused the exception. * @param message The exception message. * @param curl_opt The CURL option that failed. */ http_curl_setup_exception(request req, CURLoption curl_opt, std::string const &message) : http_request_exception(req, message), _curl_opt(std::move(curl_opt)) { } /** * Gets the CURL option associated with the exception * @return Returns the CURL option associated with the exception. */ CURLoption const& curl_opt() const { return _curl_opt; } private: CURLoption _curl_opt; }; /** * The exception for HTTP file download server-side errors. */ struct LEATHERMAN_CURL_EXPORT http_file_download_exception : http_request_exception { /** * Constructs an http_file_download_exception. * @param request The request that caused the exception * @param file_path The file that was meant to be downloaded * @param message The exception message. */ http_file_download_exception(request req, std::string file_path, std::string const &message) : http_request_exception(req, message), _file_path(std::move(file_path)) { } /** * Gets the file_path associated with the exception * @return Returns the file_path associated with the exception. */ std::string const& file_path() const { return _file_path; } private: std::string _file_path; }; /** * The exception for HTTP file download file operation errors. */ struct LEATHERMAN_CURL_EXPORT http_file_operation_exception : http_request_exception { /** * Constructs an http_file_operation_exception. * @param request The request that caused the exception * @param file_path The file that was meant to be downloaded * @param message The exception message. */ http_file_operation_exception(request req, std::string file_path, std::string const &message) : http_file_operation_exception(req, file_path, "", message) { } /** * Constructs an http_file_operation_exception. * @param request The request that caused the exception * @param file_path The file that was meant to be downloaded * @param temp_path The path to the temporary file that wasn't successfully cleaned up. * @param message The exception message. */ http_file_operation_exception(request req, std::string file_path, std::string temp_path, std::string const &message) : http_request_exception(req, message), _file_path(file_path), _temp_path(std::move(temp_path)) { } /** * Gets the file_path associated with the exception * @return Returns the file_path associated with the exception. */ std::string const& file_path() const { return _file_path; } /** * Gets the temp_path associated with the exception * @return Returns the temp_path associated with the exception. */ std::string const& temp_path() const { return _temp_path; } private: std::string _file_path; std::string _temp_path; }; /** * Implements a client for HTTP. * Note: this class is not thread-safe. */ struct LEATHERMAN_CURL_EXPORT client { /** * Constructs an HTTP client. */ client(); /** * Moves the given client into this client. * @param other The client to move into this client. */ client(client&& other); /** * Moves the given client into this client. * @param other The client to move into this client. * @return Returns this client. */ client& operator=(client&& other); /** * Performs a GET with the given request. * @param req The HTTP request to perform. * @return Returns the HTTP response. */ response get(request const& req); /** * Performs a POST with the given request. * @param req The HTTP request to perform. * @return Returns the HTTP response. */ response post(request const& req); /** * Performs a PUT with the given request. * @param req The HTTP request to perform. * @return Returns the HTTP response. */ response put(request const& req); /** * Downloads the file from the specified url. * Throws http_file_download_exception if anything goes wrong. * @param req The HTTP request to perform. * @param file_path The file that the downloaded contents will be written to. * @param perms The file permissions to apply when writing to file_path. * On Windows this only toggles read-only. */ void download_file(request const& req, std::string const& file_path, boost::optional perms = {}); /** * Downloads the file from the specified url. * Throws http_file_download_exception if anything goes wrong. * @param req The HTTP request to perform. * @param file_path The file that the downloaded contents will be written to. * @param response The HTTP response. The body will only be included if the response status is >= 400. * @param perms The file permissions to apply when writing to file_path. * On Windows this only toggles read-only. */ void download_file(request const& req, std::string const& file_path, response& res, boost::optional perms = {}); /** * Sets the path to the CA certificate file. * @param cert_file The path to the CA certificate file. */ void set_ca_cert(std::string const& cert_file); /** * Set client SSL certificate and key. * @param client_cert The path to the client's certificate file. * @param client_key The path to the client's key file. */ void set_client_cert(std::string const& client_cert, std::string const& client_key); /** * Set proxy information. * @param proxy String with following components [scheme]://[hostname]:[port]. * (see more: https://curl.haxx.se/libcurl/c/CURLOPT_PROXY.html) */ void set_proxy(std::string const& proxy); /** * Set and limit what protocols curl will support * @param client_protocols bitmask of CURLPROTO_* * (see more: http://curl.haxx.se/libcurl/c/CURLOPT_PROTOCOLS.html) */ void set_supported_protocols(long client_protocols); private: client(client const&) = delete; client& operator=(client const&) = delete; enum struct http_method { get, put, post }; struct context { context(request const& req, response& res) : req(req), res(res), read_offset(0) { } request const& req; response& res; size_t read_offset; curl_list request_headers; std::string response_buffer; }; std::string _ca_cert; std::string _client_cert; std::string _client_key; std::string _proxy; long _client_protocols = CURLPROTO_ALL; response perform(http_method method, request const& req); void download_file_helper(request const& req, std::string const& file_path, boost::optional res = {}, boost::optional perms = {}); LEATHERMAN_CURL_NO_EXPORT void set_method(context& ctx, http_method method); LEATHERMAN_CURL_NO_EXPORT void set_url(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_headers(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_cookies(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_body(context& ctx, http_method method); LEATHERMAN_CURL_NO_EXPORT void set_timeouts(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_header_write_callbacks(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_write_callbacks(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_write_callbacks(context& ctx, FILE* fp); LEATHERMAN_CURL_NO_EXPORT void set_client_info(context &ctx); LEATHERMAN_CURL_NO_EXPORT void set_ca_info(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_client_protocols(context& ctx); LEATHERMAN_CURL_NO_EXPORT void set_proxy_info(context& ctx); template LEATHERMAN_CURL_NO_EXPORT void curl_easy_setopt_maybe( context &ctx, CURLoption option, ParamType const& param ) { auto result = curl_easy_setopt(_handle, option, param); if (result != CURLE_OK) { throw http_curl_setup_exception(ctx.req, option, leatherman::locale::_("Failed setting up libcurl. Reason: {1}", curl_easy_strerror(result))); } } static size_t read_body(char* buffer, size_t size, size_t count, void* ptr); static int seek_body(void* ptr, curl_off_t offset, int origin); static size_t write_header(char* buffer, size_t size, size_t count, void* ptr); static size_t write_body(char* buffer, size_t size, size_t count, void* ptr); static size_t write_file(char *buffer, size_t size, size_t count, void* ptr); static int debug(CURL* handle, curl_infotype type, char* data, size_t size, void* ptr); curl_handle _handle; protected: /** * Returns a reference to a cURL handle resource used in the request. * This is primarily exposed for testing. * @return Returns a const reference to the cURL handle resource. */ curl_handle const& get_handle(); }; }} // namespace leatherman::curl leatherman-1.4.2+dfsg/curl/inc/leatherman/curl/request.hpp000064400000000000000000000075761332360634000235760ustar00rootroot00000000000000/** * @file * Declares the HTTP request. */ #pragma once #include #include #include #include "export.h" namespace leatherman { namespace curl { /** * Implements the HTTP request. */ struct LEATHERMAN_CURL_EXPORT request { /** * Constructs a HTTP request. * @param url The URL for the request. */ explicit request(std::string url); /** * Gets the URL for the request. * @return Returns the URL for the request. */ std::string const& url() const; /** * Adds a header to the request. * @param name The header name. * @param value The header value. */ void add_header(std::string name, std::string value); /** * Enumerates each header in the request. * @param callback The function to call for each header in the request. */ void each_header(std::function callback) const; /** * Gets a header by name. * @param name The header name to get. * @return Returns a pointer to the header's value or nullptr if the header is not present. */ std::string* header(std::string const& name); /** * Removes a header from the request. * @param name The name of the header to remove. */ void remove_header(std::string const& name); /** * Adds a cookie to the request. * @param name The cookie name. * @param value The cookie value. */ void add_cookie(std::string name, std::string value); /** * Enumerates each cookie in the request. * @param callback The function to call for each cookie in the request. */ void each_cookie(std::function callback) const; /** * Gets a cookie by name. * @param name The cookie name to get. * @return Returns a pointer to the cookie's value or nullptr if the cookie is not present. */ std::string* cookie(std::string const& name); /** * Removes a cookie from the request. * @param name The name of the cookie to remove. */ void remove_cookie(std::string const& name); /** * Sets the body of the request. * @param body The body of the request. * @param content_type The type of content (sets the Content-Type header). */ void body(std::string body, std::string content_type); /** * Gets the body of the request. * The type of the content is represented by the Content-Type header. * @return Returns the body of the request. */ std::string const& body() const; /** * Gets the overall request timeout, in milliseconds. * @return Returns the overall request timeout, in milliseconds. */ long timeout() const; /** * Sets the overall request timeout, in milliseconds. * @param value The timeout value, in milliseconds. */ void timeout(long value); /** * Gets the timeout for connecting to the remote host, in milliseconds. * @return Returns the connection timeout, in milliseconds. */ long connection_timeout() const; /** * Sets the timeout for connecting to the remote host, in milliseconds. * @param value The timeout value, in milliseconds. */ void connection_timeout(long value); private: std::string _url; std::string _body; long _timeout; long _connection_timeout; std::map _headers; std::map _cookies; }; }} // namespace leatherman::curl leatherman-1.4.2+dfsg/curl/inc/leatherman/curl/response.hpp000064400000000000000000000040571332360634000237330ustar00rootroot00000000000000/** * @file * Declares the HTTP response. */ #pragma once #include #include #include #include "export.h" namespace leatherman { namespace curl { /** * Implements the HTTP response. */ struct LEATHERMAN_CURL_EXPORT response { /** * Constructs a HTTP response. */ response(); /** * Adds a header to the response. * @param name The header name. * @param value The header value. */ void add_header(std::string name, std::string value); /** * Enumerates each header in the response. * @param callback The function to call for each header in the response. */ void each_header(std::function callback) const; /** * Gets a header by name. * @param name The header name to get. * @return Returns a pointer to the header's value or nullptr if the header is not present. */ const std::string* header(std::string const& name) const; /** * Removes a header from the response. * @param name The name of the header to remove. */ void remove_header(std::string const& name); /** * Sets the body of the response. * @param body The body of the response. */ void body(std::string body); /** * Gets the body of the response. * @return Returns the body of the response. */ std::string const& body() const; /** * Sets the status code of the response. * @param code The status code of the response. */ void status_code(int code); /** * Gets the status code of the response. * @return Returns the status code of the response. */ int status_code() const; private: int _status_code; std::string _body; std::map _headers; }; }} // namespace leatherman::curl leatherman-1.4.2+dfsg/curl/src/000075500000000000000000000000001332360634000163075ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/src/client.cc000064400000000000000000000434551332360634000201070ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; namespace fs = boost::filesystem; namespace leatherman { namespace curl { // Helper for globally initializing curl struct curl_init_helper { curl_init_helper() { _result = curl_global_init(CURL_GLOBAL_DEFAULT); } ~curl_init_helper() { if (_result == CURLE_OK) { curl_global_cleanup(); } } CURLcode result() const { return _result; } private: CURLcode _result; }; curl_handle::curl_handle() : scoped_resource(nullptr, cleanup) { // Perform initialization static curl_init_helper init_helper; if (init_helper.result() != CURLE_OK) { throw http_exception(curl_easy_strerror(init_helper.result())); } _resource = curl_easy_init(); } void curl_handle::cleanup(CURL* curl) { if (curl) { curl_easy_cleanup(curl); } } curl_list::curl_list() : scoped_resource(nullptr, cleanup) { } void curl_list::append(string const& value) { _resource = curl_slist_append(_resource, value.c_str()); } void curl_list::cleanup(curl_slist* list) { if (list) { curl_slist_free_all(list); } } curl_escaped_string::curl_escaped_string(curl_handle const& handle, string const& str) : scoped_resource(nullptr, cleanup) { _resource = curl_easy_escape(handle, str.c_str(), str.size()); if (!_resource) { throw http_exception(_("curl_easy_escape failed to escape string.")); } } void curl_escaped_string::cleanup(char const* str) { if (str) { curl_free(const_cast(str)); } } static std::string make_file_err_msg(std::string const& reason) { return _("File operation error: {1}", reason); } download_temp_file::download_temp_file(request const& req, std::string const& file_path, boost::optional perms) : _req(req), _file_path(file_path) { try { _temp_path = fs::path(file_path).parent_path() / fs::unique_path("temp_file_%%%%-%%%%-%%%%-%%%%"); _fp = boost::nowide::fopen(_temp_path.string().c_str(), "wb"); if (!_fp) { throw http_file_operation_exception(_req, _file_path, make_file_err_msg(_("failed to open temporary file for writing"))); } if (!perms) { return; } boost::system::error_code ec; fs::permissions(_temp_path.string(), *perms, ec); if (ec) { cleanup(); throw http_file_operation_exception(_req, _file_path, make_file_err_msg(_("failed to modify permissions of temporary file"))); } } catch (fs::filesystem_error& e) { throw http_file_operation_exception(_req, _file_path, make_file_err_msg(e.what())); } } download_temp_file::~download_temp_file() { cleanup(); } FILE* download_temp_file::get_fp() { return _fp; } void download_temp_file::write() { LOG_DEBUG("Download completed, now writing result to file {1}", _file_path); close_fp(); boost::system::error_code ec; fs::rename(_temp_path, _file_path, ec); if (ec) { LOG_WARNING("Failed to write the results of the temporary file to the actual file {1}", _file_path); throw http_file_operation_exception(_req, _file_path, make_file_err_msg(_("failed to move over the temporary file's downloaded contents"))); } } void download_temp_file::write(response& res) { LOG_DEBUG("Writing the temp file's contents to the response body"); close_fp(); string res_body; if (!leatherman::file_util::read(_temp_path.string(), res_body)) { LOG_WARNING("Failed to write the contents of the temporary file to the response body."); throw http_file_operation_exception(_req, _file_path, make_file_err_msg(_("failed to write the temporary file's contents to the response body"))); } res.body(res_body); } void download_temp_file::close_fp() { fclose(_fp); _fp = nullptr; } void download_temp_file::cleanup() { if (_fp) { fclose(_fp); } boost::system::error_code ec; fs::remove(_temp_path, ec); if (ec) { LOG_WARNING("Failed to properly clean-up the temporary file {1}", _temp_path); } } client::client() { if (!_handle) { throw http_exception(_("failed to create cURL handle.")); } } client::client(client && other) { *this = move(other); } client &client::operator=(client && other) { _handle = move(other._handle); return *this; } response client::get(request const& req) { return perform(http_method::get, req); } response client::post(request const& req) { return perform(http_method::post, req); } response client::put(request const& req) { return perform(http_method::put, req); } response client::perform(http_method method, request const& req) { response res; context ctx(req, res); // Reset the options curl_easy_reset(_handle); // Set common options curl_easy_setopt_maybe(ctx, CURLOPT_NOPROGRESS, 1); curl_easy_setopt_maybe(ctx, CURLOPT_FOLLOWLOCATION, 1); // Set tracing from libcurl if enabled (we don't care if this fails) if (LOG_IS_DEBUG_ENABLED()) { curl_easy_setopt(_handle, CURLOPT_DEBUGFUNCTION, debug); curl_easy_setopt(_handle, CURLOPT_VERBOSE, 1); } // Setup the request set_method(ctx, method); set_url(ctx); set_headers(ctx); set_cookies(ctx); set_body(ctx, method); set_timeouts(ctx); set_write_callbacks(ctx); set_ca_info(ctx); set_client_info(ctx); set_client_protocols(ctx); set_proxy_info(ctx); // Perform the request auto result = curl_easy_perform(_handle); if (result != CURLE_OK) { throw http_request_exception(req, curl_easy_strerror(result)); } LOG_DEBUG("request completed (status {1}).", res.status_code()); // Set the body of the response res.body(move(ctx.response_buffer)); return res; } void client::download_file(request const& req, std::string const& file_path, boost::optional perms) { download_file_helper(req, file_path, {}, perms); } void client::download_file(request const& req, std::string const& file_path, response& res, boost::optional perms) { download_file_helper(req, file_path, res, perms); } void client::download_file_helper(request const& req, std::string const& file_path, boost::optional res, boost::optional perms) { response _res; context ctx(req, _res); // Reset the options curl_easy_reset(_handle); char errbuf[CURL_ERROR_SIZE] = { '\0' }; download_temp_file temp_file(req, file_path, perms); curl_easy_setopt_maybe(ctx, CURLOPT_NOPROGRESS, 1); // Setup the remaining request set_url(ctx); set_headers(ctx); set_timeouts(ctx); set_write_callbacks(ctx, temp_file.get_fp()); set_ca_info(ctx); set_client_info(ctx); set_client_protocols(ctx); set_proxy_info(ctx); // More detailed error messages curl_easy_setopt_maybe(ctx, CURLOPT_ERRORBUFFER, errbuf); // Perform the request auto result = curl_easy_perform(_handle); if (result == CURLE_WRITE_ERROR) { throw http_file_operation_exception(req, file_path, make_file_err_msg(_("failed to write to the temporary file during download"))); } else if (result != CURLE_OK) { throw http_file_download_exception(req, file_path, _("File download server side error: {1}", errbuf)); } // Check the status code. If 400+, fill in the response LOG_DEBUG("request completed (status {1}).", _res.status_code()); if (_res.status_code() >= 400 && res) { temp_file.write(_res); } else { temp_file.write(); } if (res) { (*res) = move(_res); } } void client::set_ca_cert(string const& cert_file) { _ca_cert = cert_file; } void client::set_proxy(string const& proxy) { _proxy = proxy; } void client::set_client_cert(string const& client_cert, string const& client_key) { _client_cert = client_cert; _client_key = client_key; } void client::set_supported_protocols(long client_protocols) { _client_protocols = client_protocols; } void client::set_method(context& ctx, http_method method) { switch (method) { case http_method::get: // Unnecessary since we're resetting the handle before calling this function return; case http_method::post: { curl_easy_setopt_maybe(ctx, CURLOPT_POST, 1); break; } case http_method::put: { curl_easy_setopt_maybe(ctx, CURLOPT_UPLOAD, 1); break; } default: throw http_request_exception(ctx.req, _("unexpected HTTP method specified.")); } } void client::set_url(context& ctx) { // TODO: support an easy interface for setting escaped query parameters curl_easy_setopt_maybe(ctx, CURLOPT_URL, ctx.req.url().c_str()); LOG_DEBUG("requesting {1}.", ctx.req.url()); } void client::set_headers(context& ctx) { ctx.req.each_header([&](string const& name, string const& value) { ctx.request_headers.append(name + ": " + value); return true; }); curl_easy_setopt_maybe(ctx, CURLOPT_HTTPHEADER, static_cast(ctx.request_headers)); } void client::set_cookies(context& ctx) { ostringstream cookies; ctx.req.each_cookie([&](string const& name, string const& value) { if (cookies.tellp() != 0) { cookies << "; "; } cookies << name << "=" << value; return true; }); curl_easy_setopt_maybe(ctx, CURLOPT_COOKIE, cookies.str().c_str()); } void client::set_body(context& ctx, http_method method) { curl_easy_setopt_maybe(ctx, CURLOPT_READFUNCTION, read_body); curl_easy_setopt_maybe(ctx, CURLOPT_READDATA, &ctx); curl_easy_setopt_maybe(ctx, CURLOPT_SEEKFUNCTION, seek_body); curl_easy_setopt_maybe(ctx, CURLOPT_SEEKDATA, &ctx); switch (method) { case http_method::post: { curl_easy_setopt_maybe(ctx, CURLOPT_POSTFIELDSIZE_LARGE, ctx.req.body().size()); break; } case http_method::put: { curl_easy_setopt_maybe(ctx, CURLOPT_INFILESIZE_LARGE, ctx.req.body().size()); break; } default: break; } } void client::set_timeouts(context& ctx) { curl_easy_setopt_maybe(ctx, CURLOPT_CONNECTTIMEOUT_MS, ctx.req.connection_timeout()); curl_easy_setopt_maybe(ctx, CURLOPT_TIMEOUT_MS, ctx.req.timeout()); } void client::set_header_write_callbacks(context& ctx) { curl_easy_setopt_maybe(ctx, CURLOPT_HEADERFUNCTION, write_header); curl_easy_setopt_maybe(ctx, CURLOPT_HEADERDATA, &ctx); } void client::set_write_callbacks(context& ctx) { set_header_write_callbacks(ctx); curl_easy_setopt_maybe(ctx, CURLOPT_WRITEFUNCTION, write_body); curl_easy_setopt_maybe(ctx, CURLOPT_WRITEDATA, &ctx); } void client::set_write_callbacks(context& ctx, FILE* fp) { set_header_write_callbacks(ctx); curl_easy_setopt_maybe(ctx, CURLOPT_WRITEFUNCTION, write_file); curl_easy_setopt_maybe(ctx, CURLOPT_WRITEDATA, fp); } void client::set_ca_info(context& ctx){ if (_ca_cert == "") { return; } curl_easy_setopt_maybe(ctx, CURLOPT_CAINFO, _ca_cert.c_str()); } void client::set_client_info(context &ctx) { if (_client_cert == "" || _client_key == "") { return; } curl_easy_setopt_maybe(ctx, CURLOPT_SSLCERT, _client_cert.c_str()); curl_easy_setopt_maybe(ctx, CURLOPT_SSLKEY, _client_key.c_str()); } void client::set_proxy_info(context &ctx) { if (_proxy == "") { return; } curl_easy_setopt_maybe(ctx, CURLOPT_PROXY, _proxy.c_str()); } void client::set_client_protocols(context& ctx) { curl_easy_setopt_maybe(ctx, CURLOPT_PROTOCOLS, _client_protocols); } size_t client::read_body(char* buffer, size_t size, size_t count, void* ptr) { auto ctx = reinterpret_cast(ptr); size_t requested = size * count; auto const& body = ctx->req.body(); if (requested > (body.size() - ctx->read_offset)) { requested = (body.size() - ctx->read_offset); } if (requested > 0) { memcpy(buffer, body.c_str() + ctx->read_offset, requested); ctx->read_offset += requested; } return requested; } int client::seek_body(void* ptr, curl_off_t offset, int origin) { auto ctx = reinterpret_cast(ptr); // Only setting offset from the beginning is supported and the CURL docs // claim this is the only way this gets called if (origin != SEEK_SET) { return CURL_SEEKFUNC_FAIL; } // Since we only support an absolute offset, we should not support // negative offsets to prevent reading data from before the buffer if (offset < 0) { return CURL_SEEKFUNC_FAIL; } ctx->read_offset = offset; return CURL_SEEKFUNC_OK; } size_t client::write_header(char* buffer, size_t size, size_t count, void* ptr) { size_t written = size * count; boost::string_ref input(buffer, written); auto ctx = reinterpret_cast(ptr); // If the header starts with "HTTP/", then we have the response status if (input.starts_with("HTTP/")) { // Reset the response buffer ctx->response_buffer.clear(); // Parse out the error code static boost::regex regex("HTTP/\\d\\.\\d (\\d\\d\\d).*"); int status_code = 0; if (util::re_search(input.to_string(), regex, &status_code)) { ctx->res.status_code(status_code); } return written; } else if (input == "\r\n") { // Ignore the response delimiter return written; } auto pos = input.find_first_of(':'); if (pos == boost::string_ref::npos) { LOG_WARNING("unexpected HTTP response header: {1}.", input); return written; } auto name = input.substr(0, pos).to_string(); auto value = input.substr(pos + 1).to_string(); boost::trim(name); boost::trim(value); // If this is the "Content-Length" header, reserve the response buffer as an optimization if (name == "Content-Length") { try { ctx->response_buffer.reserve(stoi(value)); } catch (logic_error&) { } } ctx->res.add_header(move(name), move(value)); return written; } size_t client::write_body(char* buffer, size_t size, size_t count, void* ptr) { size_t written = size * count; auto ctx = reinterpret_cast(ptr); if (written > 0) { ctx->response_buffer.append(buffer, written); } return written; } size_t client::write_file(char *buffer, size_t size, size_t count, void* ptr) { return fwrite(buffer, size, count, reinterpret_cast(ptr)); } int client::debug(CURL* handle, curl_infotype type, char* data, size_t size, void* ptr) { if (type > CURLINFO_DATA_OUT) { return 0; } string str(data, size); boost::trim(str); if (str.empty()) { return 0; } // Only log cURL's text to debug if (type == CURLINFO_TEXT) { LOG_DEBUG(str); return 0; } else if (!LOG_IS_TRACE_ENABLED()) { return 0; } ostringstream header; if (type == CURLINFO_HEADER_IN) { header << "[response headers: " << size << " bytes]\n"; } else if (type == CURLINFO_HEADER_OUT) { header << "[request headers: " << size << " bytes]\n"; } else if (type == CURLINFO_DATA_IN) { header << "[response body: " << size << " bytes]\n"; } else if (type == CURLINFO_DATA_OUT) { header << "[request body: " << size << " bytes]\n"; } LOG_TRACE("{1}{2}", header.str(), str); return 0; } curl_handle const& client::get_handle() { return _handle; } }} // leatherman::curl leatherman-1.4.2+dfsg/curl/src/request.cc000064400000000000000000000045541332360634000203160ustar00rootroot00000000000000#include using namespace std; namespace leatherman { namespace curl { request::request(string url) : _url(move(url)), _timeout(0), _connection_timeout(0) { } string const& request::url() const { return _url; } void request::add_header(string name, string value) { _headers.emplace(make_pair(move(name), move(value))); } void request::each_header(function callback) const { if (!callback) { return; } for (auto const& kvp : _headers) { if (!callback(kvp.first, kvp.second)) { return; } } } string* request::header(string const& name) { auto header = _headers.find(name); if (header == _headers.end()) { return nullptr; } return &header->second; } void request::remove_header(string const& name) { _headers.erase(name); } void request::add_cookie(string name, string value) { _cookies.emplace(make_pair(move(name), move(value))); } void request::each_cookie(function callback) const { if (!callback) { return; } for (auto const& kvp : _cookies) { if (!callback(kvp.first, kvp.second)) { return; } } } string* request::cookie(string const& name) { auto cookie = _cookies.find(name); if (cookie == _cookies.end()) { return nullptr; } return &cookie->second; } void request::remove_cookie(string const& name) { _cookies.erase(name); } void request::body(string body, string content_type) { _body = move(body); add_header("Content-Type", move(content_type)); } string const& request::body() const { return _body; } long request::timeout() const { return _timeout; } void request::timeout(long value) { _timeout = value < 0 ? 0 : value; } long request::connection_timeout() const { return _connection_timeout; } void request::connection_timeout(long value) { _connection_timeout = value < 0 ? 0 : value; } }} // leatherman::curl leatherman-1.4.2+dfsg/curl/src/response.cc000064400000000000000000000023661332360634000204630ustar00rootroot00000000000000#include using namespace std; namespace leatherman { namespace curl { response::response() : _status_code(0) { } void response::add_header(string name, string value) { _headers.emplace(make_pair(move(name), move(value))); } void response::each_header(function callback) const { if (!callback) { return; } for (auto const& kvp : _headers) { if (!callback(kvp.first, kvp.second)) { return; } } } const string* response::header(string const& name) const { auto header = _headers.find(name); if (header == _headers.end()) { return nullptr; } return &header->second; } void response::remove_header(string const& name) { _headers.erase(name); } void response::body(string body) { _body = move(body); } string const& response::body() const { return _body; } int response::status_code() const { return _status_code; } void response::status_code(int status) { _status_code = status; } }} // leatherman::curl leatherman-1.4.2+dfsg/curl/tests/000075500000000000000000000000001332360634000166625ustar00rootroot00000000000000leatherman-1.4.2+dfsg/curl/tests/CMakeLists.txt000064400000000000000000000012231332360634000214200ustar00rootroot00000000000000find_package(CURL REQUIRED) include_directories(BEFORE ${CURL_INCLUDE_DIRS}) if (CURL_STATIC) set(CURL_LINK STATIC) else() set(CURL_LINK SHARED) endif() add_library(mock_curl ${CURL_LINK} mock_curl.cc) set_target_properties(mock_curl PROPERTIES COMPILE_FLAGS "${LEATHERMAN_CXX_FLAGS}") if (WIN32) symbol_exports(mock_curl "${CMAKE_CURRENT_LIST_DIR}/export.h") endif() # Namespacing curl libs so we don't pollute the global namespace # Needed to properly link libmock_curl in tests set(LEATHERMAN_INT_CURL_LIBS ${CURL_LIBRARIES}) export_var(LEATHERMAN_INT_CURL_LIBS) set(LEATHERMAN_TEST_CURL_LIB mock_curl) export_var(LEATHERMAN_TEST_CURL_LIB) leatherman-1.4.2+dfsg/curl/tests/client_test.cc000064400000000000000000000642331332360634000215160ustar00rootroot00000000000000#include #include "mock_curl.hpp" #include "fixtures.hpp" #include #include #include #include #include #include #include #include #include #include using namespace std; namespace fs = boost::filesystem; namespace nw = boost::nowide; using namespace leatherman::curl; #define REQUIRE_THROWS_AS_WITH(expression, exception_type, msg_matcher) {\ try {\ expression;\ REQUIRE(false);\ } catch (exception_type& e) {\ REQUIRE_THAT(e.what(), msg_matcher);\ } catch (exception& e) {\ REQUIRE(false);\ }\ } // TODO: Move non-test code to "fixtures.hpp" and "fixtures.cc". fs::path find_matching_file(const boost::regex& re) { auto file = find_if( fs::recursive_directory_iterator(fs::current_path()), fs::recursive_directory_iterator(), [re](const fs::path& f) { return boost::regex_match(f.filename().string(), re); }); // throw exception, as this means that the matching file does not exist. if (file == fs::recursive_directory_iterator()) { throw std::runtime_error("matching file not found"); } return *file; } void remove_temp_file() { auto temp_path = find_matching_file(boost::regex(TEMP_FILE_REGEX)); fs::remove(temp_path); } struct mock_client : client { curl_handle const& get_handle() { return client::get_handle(); } }; TEST_CASE("curl::client HTTP methods") { mock_client test_client; request test_request {"http://valid.com/"}; SECTION("GET succeeds on a given URL") { auto resp = test_client.get(test_request); REQUIRE(resp.status_code() == 200); } SECTION("POST succeeds on a given URL") { auto resp = test_client.post(test_request); REQUIRE(resp.status_code() == 200); } SECTION("PUT succeeds on a given URL") { auto resp = test_client.put(test_request); REQUIRE(resp.status_code() == 200); } SECTION("Request returns status code 404 on invalid URL") { request invalid_test_request {"http://invalid.com/"}; auto resp = test_client.get(invalid_test_request); REQUIRE(resp.status_code() == 404); } } TEST_CASE("curl::client HTTP request setup") { mock_client test_client; request test_request {"http://valid.com"}; SECTION("HTTP method is set to GET given a GET request") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->method == curl_impl::http_method::get); } SECTION("HTTP method is set to POST given a POST request") { auto resp = test_client.post(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->method == curl_impl::http_method::post); } SECTION("HTTP method is set to PUT given a PUT request") { auto resp = test_client.put(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->method == curl_impl::http_method::put); } SECTION("cURL should receive the URL specified in the request") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->request_url == "http://valid.com"); } } TEST_CASE("curl::client header and body writing and reading") { mock_client test_client; /* * Header writing and reading tests */ SECTION("Custom request headers should be honored in the request to the server") { request test_request {"http://valid.com"}; test_request.add_header("header_name", "header_value"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->header); REQUIRE(test_impl->header->data == string("header_name: header_value")); } SECTION("The header response delimiter should be ignored") { request test_request {"http://response-delimiter.com/"}; auto resp = test_client.get(test_request); int headers = 0; resp.each_header([&](string const& name, string const& value) { ++headers; return true; }); REQUIRE(headers == 0); } SECTION("Non-standard response header should be parsed for name and value") { request test_request {"http://nonstd-header.com/"}; auto resp = test_client.get(test_request); REQUIRE(resp.header("nonstd_header_name")); REQUIRE(*(resp.header("nonstd_header_name")) == "nonstd_header_value"); } SECTION("Invalid headers should not be parsed or returned in the response") { request test_request {"http://invalid-header.com/"}; auto resp = test_client.get(test_request); int headers = 0; resp.each_header([&](string const& name, string const& value) { ++headers; return true; }); REQUIRE(headers == 0); } /* * Body writing and reading tests */ SECTION("Request body should be settable and readable") { request test_request {"http://valid.com"}; test_request.body("Hello, I am a request body!", "message"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->read_buffer == "Hello, I am a request body!"); } SECTION("Response body should be what is in the data part of the cURL response") { CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); test_impl->resp_body = "Hello, I am a response body!"; request test_request {"http://valid.com"}; auto resp = test_client.get(test_request); REQUIRE(resp.body() == "Hello, I am a response body!"); } } TEST_CASE("curl::client cookies") { mock_client test_client; request test_request {"http://valid.com"}; SECTION("There should be no cookies in the request by default") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cookie == ""); } SECTION("Cookies should be present in the request when added") { test_request.add_cookie("cookie_name", "cookie_val"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cookie == "cookie_name=cookie_val"); } SECTION("Cookies should be removable from the request") { test_request.add_cookie("cookie_0", "cookie_val_0"); test_request.add_cookie("cookie_1", "cookie_val_1"); test_request.remove_cookie("cookie_1"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cookie == "cookie_0=cookie_val_0"); } SECTION("cURL should receieve cookies specified in the request") { test_request.add_cookie("cookie_0", "cookie_val_0"); test_request.add_cookie("cookie_1", "cookie_val_1"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cookie == "cookie_0=cookie_val_0; cookie_1=cookie_val_1"); } } TEST_CASE("curl::client CA bundle and SSL setup") { mock_client test_client; request test_request {"http://valid.com"}; SECTION("Path to CA certificate should be unspecified by default") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cacert == ""); } SECTION("cURL should receive the path to the CA certificate specified in the request") { test_client.set_ca_cert("cacert"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->cacert == "cacert"); } SECTION("Proxy should be unspecified by default") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->proxy == ""); } SECTION("cURL should receive the proxy specified in the request") { test_client.set_proxy("proxy"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->proxy == "proxy"); } SECTION("Client cert name should be unspecified by default") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->client_cert == ""); } SECTION("cURL should receive the client cert name specified in the request") { test_client.set_client_cert("cert", "key"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->client_cert == "cert"); } SECTION("Private keyfile name should be unspecified by default") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->client_key == ""); } SECTION("cURL should receive the private keyfile name specified in the request") { test_client.set_client_cert("cert", "key"); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->client_key == "key"); } SECTION("cURL should make an HTTP request with the specified HTTP protocol") { test_client.set_supported_protocols(CURLPROTO_HTTP); auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->protocols == CURLPROTO_HTTP); } SECTION("cURL defaults to all protocols if no protocols are specified") { auto resp = test_client.get(test_request); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); REQUIRE(test_impl->protocols == CURLPROTO_ALL); } } TEST_CASE("curl::client errors") { mock_client test_client; request test_request {"http://valid.com/"}; CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); /* * Note: we do not currently test the case where cURL errors * on curl_global_init, as the global init is done as part of * static initialization in the cURL helper, and there is * currently no way to force it to be reinitialized. */ SECTION("client fails to initialize a libcurl easy session") { curl_fail_init mock_error {easy_init_error}; REQUIRE_THROWS_AS(mock_client(), http_exception); } SECTION("client fails to perform a cURL request") { test_impl->test_failure_mode = curl_impl::error_mode::easy_perform_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_request_exception); } SECTION("client fails to set HTTP method to POST") { test_impl->test_failure_mode = curl_impl::error_mode::http_post_error; REQUIRE_THROWS_AS(test_client.post(test_request), http_curl_setup_exception); } SECTION("client fails to set HTTP method to PUT") { test_impl->test_failure_mode = curl_impl::error_mode::http_put_error; REQUIRE_THROWS_AS(test_client.put(test_request), http_curl_setup_exception); } SECTION("client fails to set the request URL") { test_impl->test_failure_mode = curl_impl::error_mode::set_url_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the request headers") { test_impl->test_failure_mode = curl_impl::error_mode::set_header_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set cookies in the request") { test_impl->test_failure_mode = curl_impl::error_mode::set_cookie_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the header callback function") { test_impl->test_failure_mode = curl_impl::error_mode::header_function_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the header write location") { test_impl->test_failure_mode = curl_impl::error_mode::header_context_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the body writing callback function") { test_impl->test_failure_mode = curl_impl::error_mode::write_body_function_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the body write location") { test_impl->test_failure_mode = curl_impl::error_mode::write_body_context_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the read_body callback function") { test_impl->test_failure_mode = curl_impl::error_mode::read_body_function_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the read_body data source") { test_impl->test_failure_mode = curl_impl::error_mode::read_body_context_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the connection timeout") { test_impl->test_failure_mode = curl_impl::error_mode::connect_timeout_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set the request timeout") { test_impl->test_failure_mode = curl_impl::error_mode::request_timeout_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set certificate authority info") { test_client.set_ca_cert("certfile"); test_impl->test_failure_mode = curl_impl::error_mode::ca_bundle_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set SSL cert info") { test_client.set_client_cert("cert", "key"); test_impl->test_failure_mode = curl_impl::error_mode::ssl_cert_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to set SSL key info") { test_client.set_client_cert("cert", "key"); test_impl->test_failure_mode = curl_impl::error_mode::ssl_key_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } SECTION("client fails to make http call with https protocol only enabled") { test_client.set_supported_protocols(CURLPROTO_HTTPS); test_impl->test_failure_mode = curl_impl::error_mode::protocol_error; REQUIRE_THROWS_AS(test_client.get(test_request), http_curl_setup_exception); } } TEST_CASE("curl::client download_file") { mock_client test_client; temp_directory temp_dir; fs::path temp_dir_path = fs::path(temp_dir.get_dir_name()); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); std::string url = "https://download.com"; SECTION("when a response is not passed in") { SECTION("successfully downloads the file to the specified location") { std::string ca_file = "ca"; std::string cert_file = "client"; std::string key_file = "key"; test_client.set_ca_cert(ca_file); test_client.set_client_cert(cert_file, key_file); test_client.set_supported_protocols(CURLPROTO_HTTPS); std::string file_path = (temp_dir_path / "test_file").string(); std::string token = "token"; long connect_timeout = 300000; request req(url); req.add_header("X-Authentication", token); req.connection_timeout(connect_timeout); test_client.download_file(req, file_path); // ensure that the correct curl parameters were used. REQUIRE(test_impl->cacert == ca_file); REQUIRE(test_impl->client_cert == cert_file); REQUIRE(test_impl->client_key == key_file); REQUIRE(test_impl->protocols == CURLPROTO_HTTPS); REQUIRE(test_impl->connect_timeout == connect_timeout); REQUIRE(std::string(test_impl->header->data) == ("X-Authentication: " + token)); if (test_impl->header->next) { FAIL("X-Authentication should be the only header"); } // now check that the file was actually downloaded and written with the right // contents. REQUIRE(fs::exists(file_path)); nw::ifstream in(file_path); stringstream stream; stream << in.rdbuf(); REQUIRE(stream.str() == "successfully downloaded file"); } #ifndef _WIN32 SECTION("sets permissions if requested") { auto file_path = (temp_dir_path / "other_test_file").string(); request req(url); auto perms = boost::filesystem::owner_read | boost::filesystem::owner_write; test_client.download_file(req, file_path, perms); REQUIRE(fs::exists(file_path)); REQUIRE(fs::status(file_path).permissions() == perms); } #endif SECTION("downloads the response body for a 400+ status") { std::string url = "https://download_trigger_404.com"; auto file_path = (temp_dir_path / "404_test_file").string(); request req(url); test_client.download_file(req, file_path); // now check that the file was actually downloaded and written with the right // contents. REQUIRE(fs::exists(file_path)); nw::ifstream in(file_path); stringstream stream; stream << in.rdbuf(); REQUIRE(stream.str() == "Not found"); } } SECTION("when a response is passed in") { SECTION("successfully downloads the file to the specified location, and includes the response") { std::string ca_file = "ca"; std::string cert_file = "client"; std::string key_file = "key"; test_client.set_ca_cert(ca_file); test_client.set_client_cert(cert_file, key_file); test_client.set_supported_protocols(CURLPROTO_HTTPS); std::string file_path = (temp_dir_path / "test_file").string(); std::string token = "token"; long connect_timeout = 300000; request req(url); req.add_header("X-Authentication", token); req.connection_timeout(connect_timeout); response res; test_client.download_file(req, file_path, res); // ensure that the correct curl parameters were used. REQUIRE(test_impl->cacert == ca_file); REQUIRE(test_impl->client_cert == cert_file); REQUIRE(test_impl->client_key == key_file); REQUIRE(test_impl->protocols == CURLPROTO_HTTPS); REQUIRE(test_impl->connect_timeout == connect_timeout); REQUIRE(std::string(test_impl->header->data) == ("X-Authentication: " + token)); if (test_impl->header->next) { FAIL("X-Authentication should be the only header"); } // now check that the file was actually downloaded and written with the right // contents. REQUIRE(fs::exists(file_path)); nw::ifstream in(file_path); stringstream stream; stream << in.rdbuf(); REQUIRE(stream.str() == "successfully downloaded file"); // now check the response REQUIRE(res.status_code() == 200); REQUIRE(res.body().empty()); } #ifndef _WIN32 SECTION("sets permissions if requested") { auto file_path = (temp_dir_path / "other_test_file").string(); request req(url); response res; auto perms = boost::filesystem::owner_read | boost::filesystem::owner_write; test_client.download_file(req, file_path, res, perms); REQUIRE(fs::exists(file_path)); REQUIRE(fs::status(file_path).permissions() == perms); } #endif SECTION("does not download anything for a 400+ status") { std::string url = "https://download_trigger_404.com"; auto file_path = (temp_dir_path / "404_test_file").string(); request req(url); response res; test_client.download_file(req, file_path, res); REQUIRE(res.status_code() == 404); REQUIRE(res.body() == "Not found"); // check that the file was not downloaded REQUIRE(!fs::exists(file_path)); } } } TEST_CASE("curl::client download_file errors") { mock_client test_client; temp_directory temp_dir; fs::path temp_dir_path = fs::path(temp_dir.get_dir_name()); CURL* const& handle = test_client.get_handle(); auto test_impl = reinterpret_cast(handle); SECTION("when fopen fails, an http_file_operation_exception is thrown") { fs::path parent_dir = temp_dir_path / "parent"; std::string file_path = (parent_dir / "child").string(); request req(""); REQUIRE_THROWS_AS_WITH( test_client.download_file(req, file_path), http_file_operation_exception, Catch::Equals("File operation error: failed to open temporary file for writing")); } SECTION("when curl_easy_setopt fails, an http_curl_setup_exception is thrown and the temporary file is removed") { request req(""); std::string file_path = (temp_dir_path / "file").string(); test_impl->test_failure_mode = curl_impl::error_mode::set_url_error; REQUIRE_THROWS_AS(test_client.download_file(req, file_path), http_curl_setup_exception); // Ensure that the temp file was removed REQUIRE(fs::is_empty(temp_dir_path)); } SECTION("when curl_easy_perform fails due to a CURLE_WRITE_ERROR, but the temporary file is removed, an http_file_operation_exception is thrown") { std::string file_path = (temp_dir_path / "file").string(); request req(""); test_impl->test_failure_mode = curl_impl::error_mode::easy_perform_write_error; REQUIRE_THROWS_AS_WITH( test_client.download_file(req, file_path), http_file_operation_exception, Catch::StartsWith("File operation error: failed to write to the temporary file during download")); } SECTION("when curl_easy_perform fails for reasons other than a CURLE_WRITE_ERROR, but the temporary file is removed, only the errbuf message is contained in the thrown http_file_download_exception") { std::string file_path = (temp_dir_path / "file").string(); request req(""); test_impl->test_failure_mode = curl_impl::error_mode::easy_perform_error; REQUIRE_THROWS_AS_WITH( test_client.download_file(req, file_path), http_file_download_exception, Catch::Equals("File download server side error: easy perform failed")); // Ensure that the temp file was removed REQUIRE(fs::is_empty(temp_dir_path)); } SECTION("when renaming the temporary file to the user-provided file path fails, an http_file_operation_exception is thrown") { std::string file_path = (temp_dir_path / "file").string(); request req("https://download.com"); test_impl->trigger_external_failure = remove_temp_file; REQUIRE_THROWS_AS_WITH( test_client.download_file(req, file_path), http_file_operation_exception, Catch::StartsWith("File operation error: failed to move over the temporary file's downloaded contents")); } SECTION("when writing the temporary file's contents to the response body fails, an http_file_operation_exception is thrown") { std::string file_path = (temp_dir_path / "file").string(); request req("https://download_trigger_404.com"); test_impl->trigger_external_failure = remove_temp_file; response res; REQUIRE_THROWS_AS_WITH( test_client.download_file(req, file_path, res), http_file_operation_exception, Catch::StartsWith("File operation error: failed to write the temporary file's contents to the response body")); } } leatherman-1.4.2+dfsg/curl/tests/fixtures.cc000064400000000000000000000012211332360634000210360ustar00rootroot00000000000000#include "fixtures.hpp" #include #include #include #include #include namespace fs = boost::filesystem; temp_directory::temp_directory() { auto unique_path = unique_fixture_path(); dir_name = unique_path.string(); fs::::create_directory(unique_path); } temp_directory::~temp_directory() { fs::::remove_all(dir_name); } std::string const& temp_directory::get_dir_name() const { return dir_name; } fs::::path unique_fixture_path() { return fs::::unique_path("file_util_fixture_%%%%-%%%%-%%%%-%%%%"); } leatherman-1.4.2+dfsg/curl/tests/fixtures.hpp000064400000000000000000000013401332360634000212420ustar00rootroot00000000000000#pragma once #include #include #include /** * Class to create a temporary directory with a unique name * and destroy it once it is no longer needed. * * This was taken directly from file_util/tests -- might be * worthwhile to include a testutils directory for common test * code in leatherman? * */ class temp_directory { public: temp_directory(); ~temp_directory(); std::string const& get_dir_name() const; private: std::string dir_name; }; /** Generates a unique string for use as a file path. */ boost::filesystem::path unique_fixture_path(); boost::regex TEMP_FILE_REGEX("\\Atemp_file.*"); boost::regex TEMP_DIR_REGEX("\\Afile_util_fixture_.*"); leatherman-1.4.2+dfsg/curl/tests/mock_curl.cc000064400000000000000000000357571332360634000211700ustar00rootroot00000000000000#define BUILDING_LIBCURL #include #include #include #include #include #include "mock_curl.hpp" using namespace std; /* * Global error_mode struct implementation. This is needed to test * cURL global and easy initialization, before the client object exists. */ static error_mode test_failure_mode = success; curl_fail_init::curl_fail_init(error_mode mode) { test_failure_mode = mode; } curl_fail_init::~curl_fail_init() { test_failure_mode = success; } /* * libcurl implementations below. We are mocking necessary methods * of the CURL API to ensure that our wrapper is making the calls * to libcurl that we expect. */ /* * Sets up the program environment that libcurl needs. The mock * implementation simply returns successfully, unless we are * specifically testing global initialization failure. */ CURLcode curl_global_init(long flags) { if (test_failure_mode == global_init_error) { return CURLE_FAILED_INIT; } else { return CURLE_OK; } } /* * Reclaim memory obtained from a libcurl call by deleting * a mock curl object. */ void curl_free(void *p) { delete reinterpret_cast(p); } /* * End a libcurl easy handle. The mock implementation simply * calls curl_free to delete the mock curl object argument. */ void curl_easy_cleanup(CURL * handle) { curl_free(handle); } /* * Mock implementation of curl_easy_escape which simply returns a * nullptr. URL encoding the given string is not necessary for * testing. */ char *curl_easy_escape(CURL * curl, const char * string, int length) { return nullptr; } /* * Start a libcurl easy session. The mock implementation simply returns * a new mock curl object, unless we are specifcally testing easy_init * exception handling, in which case we return nullptr. */ CURL *curl_easy_init() { if (test_failure_mode == easy_init_error) { return nullptr; } else { return reinterpret_cast(new curl_impl()); } } /* * Set options for an easy curl handle. We use this method in the mock * implementation to ensure that the correct CURL API calls are being * made. Given a particular CURL option, we store received data from * the varargs parameter in the mock curl object. The data is * then verified in the tests. * * Each option has the potential to fail while being set, which is * covered by a suite of exception tests. If the given test error * option is set, we'll return CURLE_COULDNT_CONNECT for the current * option. */ #pragma clang diagnostic push // This function signature is required to mock curl, so disable a warning generated from it. #pragma clang diagnostic ignored "-Wvarargs" CURLcode curl_easy_setopt(CURL *handle, CURLoption option, ...) { auto h = reinterpret_cast(handle); va_list vl; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wvarargs" va_start(vl, option); #pragma clang diagnostic pop switch (option) { case CURLOPT_HEADERFUNCTION: // Set client::write_header as the function to be called as soon as mock curl has received header data. if (h->test_failure_mode == curl_impl::error_mode::header_function_error) { va_end(vl); return CURLE_UNKNOWN_OPTION; } h->write_header = va_arg(vl, size_t (*)(char*, size_t, size_t, void*)); break; case CURLOPT_HEADERDATA: // Pointer to the context to write the header part of the received data to. if (h->test_failure_mode == curl_impl::error_mode::header_context_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->header_context = va_arg(vl, void*); break; case CURLOPT_WRITEFUNCTION: // Set client::write_body as the function to be called as soon as mock curl has received data. if (h->test_failure_mode == curl_impl::error_mode::write_body_function_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->write_body = va_arg(vl, size_t (*)(char*, size_t, size_t, void*)); break; case CURLOPT_WRITEDATA: // Pointer to the context to write the body part of the received data to. if (h->test_failure_mode == curl_impl::error_mode::write_body_context_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->body_context = va_arg(vl, void*); break; case CURLOPT_READFUNCTION: // Set client::read_body as the function to be called for mock curl to read the request body. if (h->test_failure_mode == curl_impl::error_mode::read_body_function_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->read_function = va_arg(vl, size_t (*)(char*, size_t, size_t, void*)); break; case CURLOPT_READDATA: // Pointer to the context to read the request body from by the READFUNCTION callback. if (h->test_failure_mode == curl_impl::error_mode::read_body_context_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->read_data = va_arg(vl, void*); break; case CURLOPT_URL: // Set the mock curl URL as the URL specified in the request. if (h->test_failure_mode == curl_impl::error_mode::set_url_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->request_url = va_arg(vl, char*); break; case CURLOPT_POST: // Set the mock curl HTTP method as POST if (h->test_failure_mode == curl_impl::error_mode::http_post_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->method = curl_impl::http_method::post; break; case CURLOPT_UPLOAD: case CURLOPT_PUT: // Set the mock curl HTTP method as PUT if (h->test_failure_mode == curl_impl::error_mode::http_put_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->method = curl_impl::http_method::put; break; case CURLOPT_HTTPHEADER: // Set the mock curl list of custom headers to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::set_header_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->header = va_arg(vl, curl_slist*); break; case CURLOPT_COOKIE: // Set the mock curl Cookie header to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::set_cookie_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->cookie = va_arg(vl, char*); break; case CURLOPT_CAINFO: // Set the mock curl Certificate Authority path to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::ca_bundle_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->cacert = va_arg(vl, char*); break; case CURLOPT_SSLCERT: // Set the mock curl SSL client cert name to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::ssl_cert_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->client_cert = va_arg(vl, char*); break; case CURLOPT_PROXY: // Set the mock curl proxy to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::proxy_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->proxy = va_arg(vl, char*); break; case CURLOPT_SSLKEY: // Set the mock curl private keyfile name to that which was passed in the request. if (h->test_failure_mode == curl_impl::error_mode::ssl_key_error) { va_end(vl); return CURLE_OUT_OF_MEMORY; } h->client_key = va_arg(vl, char*); break; case CURLOPT_CONNECTTIMEOUT_MS: if (h->test_failure_mode == curl_impl::error_mode::connect_timeout_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->connect_timeout = va_arg(vl, long); break; case CURLOPT_TIMEOUT_MS: if (h->test_failure_mode == curl_impl::error_mode::request_timeout_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } break; case CURLOPT_PROTOCOLS: if (h->test_failure_mode == curl_impl::error_mode::protocol_error) { va_end(vl); return CURLE_COULDNT_CONNECT; } h->protocols = va_arg(vl, long); break; case CURLOPT_ERRORBUFFER: h->errbuf = va_arg(vl, char*); break; default: break; } va_end(vl); return CURLE_OK; } /* * Perform a cURL transfer. the mock implementation uses this method to * write the response header and body. */ CURLcode curl_easy_perform(CURL * easy_handle) { auto h = reinterpret_cast(easy_handle); if (h->test_failure_mode == curl_impl::error_mode::easy_perform_write_error) { return CURLE_WRITE_ERROR; } if (h->test_failure_mode == curl_impl::error_mode::easy_perform_error) { if (h->errbuf) { strcpy(h->errbuf, "easy perform failed"); } return CURLE_COULDNT_CONNECT; } /* * Fill the read buffer in multiple chunks to better simulate real libcurl. */ if (h->read_function) { size_t bytes_returned; char buf[10] = {}; while ((bytes_returned = h->read_function(buf, 1, 10, h->read_data))) { h->read_buffer.append(buf, bytes_returned); } } static const array VALID_URLS{{ "http://valid.com/", "https://download.com", "https://remove_temp_file.com" }}; /* * If we pass 'valid.com' in the test, return HTTP status 200. Otherwise, return status 404. */ if (h->write_header) { bool is_valid_url = find(VALID_URLS.begin(), VALID_URLS.end(), h->request_url) != VALID_URLS.end(); if (is_valid_url) { string header_content = "HTTP/1.1 200 OK\n" "Connection: keep-alive\n" "Date: Thu, 16 Jul 2015 18:41:08 GMT\n" "Content-Type: text/html;charset=UTF-8\n" "Server: Jetty(7.x.y-SNAPSHOT)\n" "Via: 1.1 vegur"; h->write_header(&header_content[0], 1, header_content.size(), h->header_context); } else if (h->request_url == "http://nonstd-header.com/") { string header_content = "nonstd_header_name:nonstd_header_value"; h->write_header(&header_content[0], 1, header_content.size(), h->header_context); } else if (h->request_url == "http://response-delimiter.com/") { string header_content = "\r\n"; h->write_header(&header_content[0], 1, header_content.size(), h->header_context); } else if (h->request_url == "http://invalid-header.com/") { string header_content = "This is an invalid header"; h->write_header(&header_content[0], 1, header_content.size(), h->header_context); } else { string header_content = "HTTP/1.1 404 NOT FOUND\n" "Connection: keep-alive\n" "Date: Thu, 16 Jul 2015 18:41:08 GMT\n" "Content-Type: text/html;charset=UTF-8\n" "Server: Jetty(7.x.y-SNAPSHOT)\n" "Via: 1.1 vegur"; h->write_header(&header_content[0], 1, header_content.size(), h->header_context); } } /* * For file download. It is OK if exception is thrown if write_body is not set, * that means something went wrong in our code's setup so we want our test to * fail. */ if (h->request_url == "https://download.com" || h->request_url == "https://download_trigger_404.com") { string download_msg = (h->request_url == "https://download.com") ? "successfully downloaded file" : "Not found"; h->write_body(const_cast(download_msg.c_str()), 1, reinterpret_cast(download_msg.size()), h->body_context); if (h->trigger_external_failure) { #ifdef _WIN32 fclose(reinterpret_cast(h->body_context)); #endif h->trigger_external_failure(); } return CURLE_OK; } /* * We set resp_body internally in write_body tests. */ if (h->write_body) { h->write_body(&h->resp_body[0], 1, h->resp_body.size(), h->body_context); } return CURLE_OK; } /* * Unimplemented, as resetting options is not necessary for testing. */ void curl_easy_reset(CURL *handle) { } /* * We throw CURLE_FAILED_INIT to test cURL handle initialization, * and CURLE_COULDNT_CONNECT for all other possible errors. */ const char *curl_easy_strerror(CURLcode errornum) { switch (errornum) { case CURLE_OK: break; case CURLE_FAILED_INIT: return "cURL failed with: CURLE_FAILED_INIT"; case CURLE_COULDNT_CONNECT: return "cURL failed with: CURLE_COULDNT_CONNECT"; case CURLE_OUT_OF_MEMORY: return "cURL failed with: CURLE_OUT_OF_MEMORY"; case CURLE_UNKNOWN_OPTION: return "cURL failed with CURLE_UNKNOWN_OPTION"; default: return nullptr; } return nullptr; } /* * Unimplemented, as we don't allocate many objects to clean up * in tests. */ void curl_global_cleanup(void) { } /* * Add a string to an slist. If list already includes curl_slist * objects, we must traverse the linked list to append the new * object at the end. Otherwise, create a new curl_slist linked * list. */ struct curl_slist *curl_slist_append(struct curl_slist * list, const char * string ) { curl_slist* new_slist_obj = new curl_slist(); new_slist_obj->data = new char[strlen(string) + 1]; new_slist_obj->data = strcpy(new_slist_obj->data, string); if (list) { curl_slist* ptr = list; while(ptr->next) { ptr = ptr->next; } ptr->next = new_slist_obj; return list; } else { return new_slist_obj; } } /* * Unimplemented, as we allocate very little memory for curl_slist * objects in tests. This may have to change if we decide to run * memory checks on unit tests. */ void curl_slist_free_all(struct curl_slist * list) { } leatherman-1.4.2+dfsg/curl/tests/mock_curl.hpp000064400000000000000000000044761332360634000213640ustar00rootroot00000000000000#pragma once #include #include #include #ifdef _WIN32 #include "export.h" #else #define MOCK_CURL_EXPORT #endif struct curl_impl { enum struct http_method { get, put, post }; enum struct error_mode { success, easy_perform_error, easy_perform_write_error, http_post_error, http_put_error, set_url_error, set_header_error, set_cookie_error, header_function_error, header_context_error, write_body_function_error, write_body_context_error, read_body_function_error, read_body_context_error, connect_timeout_error, request_timeout_error, ca_bundle_error, ssl_cert_error, ssl_key_error, protocol_error, proxy_error }; error_mode test_failure_mode = error_mode::success; // Pointer for client::write_header as a callback function in curl_easy_setopt std::function write_header; void* header_context; // Where to write the header part of the received data to // Pointer for client::write_body as a callback function in curl_easy_setopt std::function write_body; void* body_context; // Where to write the body part of the received data to // Pointer for client::read_body as a callback function in curl_easy_setopt std::function read_function; void* read_data; // Where to read the request body from std::string request_url, cookie, cacert, client_cert, client_key, proxy; long protocols; long connect_timeout; http_method method = http_method::get; curl_slist* header; // List of custom request headers to be passed to the server std::string read_buffer; // Buffer to test reading the request body std::string resp_body; // Response body which should be written to a context using the write_body function callback char* errbuf = 0; // Pointer to trigger failure callbacks std::function trigger_external_failure; }; enum error_mode { success, easy_init_error, global_init_error }; struct MOCK_CURL_EXPORT curl_fail_init { curl_fail_init(error_mode mode); ~curl_fail_init(); }; leatherman-1.4.2+dfsg/curl/tests/request_test.cc000064400000000000000000000057651332360634000217350ustar00rootroot00000000000000#include #include "mock_curl.hpp" #include #include using namespace std; using namespace leatherman::curl; struct mock_client : client { curl_handle const& get_handle() { return client::get_handle(); } }; TEST_CASE("curl::request") { request test_request {"http://valid.com"}; SECTION("Headers should be addable and retrievable from the request") { test_request.add_header("header_name", "header_value"); auto header = test_request.header("header_name"); REQUIRE(header); REQUIRE(*(header) == "header_value"); } SECTION("Headers should be removable from the request") { test_request.add_header("header_name", "header_value"); test_request.remove_header("header_name"); auto header = test_request.header("header_name"); REQUIRE(header == nullptr); } SECTION("Headers should be enumerable") { int i = 0; string expected_name, expected_value; test_request.add_header("header_0", "header_value_0"); test_request.add_header("header_1", "header_value_1"); test_request.add_header("header_2", "header_value_2"); test_request.each_header([&](string const& name, string const& value) { expected_name = "header_" + to_string(i); expected_value = "header_value_" + to_string(i); REQUIRE(name == expected_name); REQUIRE(value == expected_value); ++i; return true; }); } SECTION("A cookie should be retrievable by name") { test_request.add_cookie("cookie_0", "cookie_val_0"); test_request.add_cookie("cookie_1", "cookie_val_1"); REQUIRE(*(test_request.cookie("cookie_0")) == "cookie_val_0"); } SECTION("Cookies should be enumerable") { int i = 0; string expected_name, expected_value; test_request.add_cookie("cookie_0", "cookie_value_0"); test_request.add_cookie("cookie_1", "cookie_value_1"); test_request.add_cookie("cookie_2", "cookie_value_2"); test_request.each_cookie([&](string const& name, string const& value) { expected_name = "cookie_" + to_string(i); expected_value = "cookie_value_" + to_string(i); REQUIRE(name == expected_name); REQUIRE(value == expected_value); ++i; return true; }); } SECTION("Request body should be addable and retrievable") { test_request.body("Hello, I am a request body!", "message"); auto body = test_request.body(); REQUIRE(body == "Hello, I am a request body!"); } SECTION("Overall request timeout should be configurable and retrievable") { test_request.timeout(100); REQUIRE(test_request.timeout() == 100); } SECTION("Connection timeout should be configurable and retrievable") { test_request.connection_timeout(100); REQUIRE(test_request.connection_timeout() == 100); } } leatherman-1.4.2+dfsg/curl/tests/response_test.cc000064400000000000000000000036701332360634000220740ustar00rootroot00000000000000#include #include "mock_curl.hpp" #include #include using namespace std; using namespace leatherman::curl; struct mock_client : client { curl_handle const& get_handle() { return client::get_handle(); } }; TEST_CASE("curl::response") { response test_response; SECTION("Headers should be addable and retrievable from the response") { test_response.add_header("header_name", "header_value"); auto header = test_response.header("header_name"); REQUIRE(header); REQUIRE(*(header) == "header_value"); } SECTION("Headers should be removable from the response") { test_response.add_header("header_name", "header_value"); test_response.remove_header("header_name"); auto header = test_response.header("header_name"); REQUIRE(header == nullptr); } SECTION("Headers should be enumerable") { int i = 0; string expected_name, expected_value; test_response.add_header("header_0", "header_value_0"); test_response.add_header("header_1", "header_value_1"); test_response.add_header("header_2", "header_value_2"); test_response.each_header([&](string const& name, string const& value) { expected_name = "header_" + to_string(i); expected_value = "header_value_" + to_string(i); REQUIRE(name == expected_name); REQUIRE(value == expected_value); ++i; return true; }); } SECTION("Response body should be addable and retrievable") { test_response.body("Hello, I am a response body!"); auto body = test_response.body(); REQUIRE(body == "Hello, I am a response body!"); } SECTION("Status code should be addable and retrievable") { test_response.status_code(200); auto code = test_response.status_code(); REQUIRE(code == 200); } } leatherman-1.4.2+dfsg/dynamic_library/000075500000000000000000000000001332360634000177235ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/CMakeLists.txt000064400000000000000000000024361332360634000224700ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex) add_leatherman_deps("${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(locale) leatherman_dependency(logging) leatherman_dependency(util) if(WIN32) leatherman_dependency(windows) else() if(NOT CMAKE_SYSTEM_NAME MATCHES "FreeBSD|OpenBSD") add_leatherman_deps(dl) endif() endif() if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.dynamic_library") leatherman_logging_line_numbers() endif() add_leatherman_headers(inc/leatherman) if(WIN32) add_leatherman_library(src/windows/dynamic_library.cc src/dynamic_library.cc) else() add_leatherman_library(src/posix/dynamic_library.cc src/dynamic_library.cc) endif() add_leatherman_test(tests/dynamic_library_tests.cc) if (BUILDING_LEATHERMAN) #Build dummy dynamic libraries for use in testing add_library(libtest SHARED tests/test-lib/hello.cc) set_target_properties(libtest PROPERTIES PREFIX "" SUFFIX ".so") add_library(libtest1 SHARED tests/test-lib/hello.cc tests/test-lib/goodbye.cc) set_target_properties(libtest1 PROPERTIES PREFIX "" SUFFIX ".so") configure_file ( "${CMAKE_CURRENT_LIST_DIR}/tests/fixtures.hpp.in" "${CMAKE_CURRENT_LIST_DIR}/tests/fixtures.hpp" ) endif() leatherman-1.4.2+dfsg/dynamic_library/inc/000075500000000000000000000000001332360634000204745ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/inc/leatherman/000075500000000000000000000000001332360634000226145ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/inc/leatherman/dynamic_library/000075500000000000000000000000001332360634000257645ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/inc/leatherman/dynamic_library/dynamic_library.hpp000064400000000000000000000100171332360634000316440ustar00rootroot00000000000000/** * @file * Declares the dynamic library type. */ #pragma once #include #include #include namespace leatherman { namespace dynamic_library { /** * Exception thrown for missing imported symbols. */ struct missing_import_exception : std::runtime_error { /** * Constructs a missing_import_exception. * @param message The exception message. */ explicit missing_import_exception(std::string const& message); }; /** * Represents a dynamic library. */ struct dynamic_library { /** * Constructs a dynamic_library. */ dynamic_library(); /** * Destructs a dynamic_library. */ ~dynamic_library(); /** * Prevents the dynamic_library from being copied. */ dynamic_library(dynamic_library const&) = delete; /** * Prevents the dynamic_library from being copied. * @returns Returns this dynamic_library. */ dynamic_library& operator=(dynamic_library const&) = delete; /** * Moves the given dynamic_library into this dynamic_library. * @param other The dynamic_library to move into this dynamic_library. */ dynamic_library(dynamic_library&& other); /** * Moves the given dynamic_library into this dynamic_library. * @param other The dynamic_library to move into this dynamic_library. * @return Returns this dynamic_library. */ dynamic_library& operator=(dynamic_library&& other); /** * Finds an already loaded library by file name regex pattern. * @param pattern The regex pattern of the library to find. * @return Returns the already loaded library if found or an unloaded library if not found. */ static dynamic_library find_by_pattern(std::string const& pattern); /** * Finds an already loaded library by symbol. * @param symbol The symbol to find. * @return Returns the already loaded library if found or an unloaded library if not found. */ static dynamic_library find_by_symbol(std::string const& symbol); /** * Loads the given dynamic library. * The current library will be closed before the given library is loaded. * If you rely on the value of first_load(), you should try to call find_by_symbol * before calling this function. * @param name The name of the library to load. * @return Returns true if the library loaded or false if it did not. */ bool load(std::string const& name, bool global = false); /** * Determines if the library is loaded. * @return Returns true if the library is loaded or false if it is not. */ bool loaded() const; /** * Determines if the library's load was the first. * @return Returns true if the library was loaded for the first time or false if it was previously loaded. */ bool first_load() const; /** * Gets the name of the library. * @return Returns the name of the library. */ std::string const& name() const; /** * Closes the library. */ void close(); /** * Finds a symbol in the library by name. * @param name The name of the symbol to find. * @param throw_if_missing if true, throws an exception if the symbol is missing. If false, returns nullptr if the symbol is missing. * @param alias The alias of the symbol to load if the given symbol isn't found. * @return Returns the symbol's address or nullptr if not found. */ void* find_symbol(std::string const& name, bool throw_if_missing = false, std::string const& alias = {}) const; private: void* _handle; std::string _name; bool _first_load; }; }} // namespace leatherman::load_library leatherman-1.4.2+dfsg/dynamic_library/src/000075500000000000000000000000001332360634000205125ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/src/dynamic_library.cc000064400000000000000000000023071332360634000241730ustar00rootroot00000000000000#include using namespace std; namespace leatherman { namespace dynamic_library { missing_import_exception::missing_import_exception(string const& message) : runtime_error(message) { } dynamic_library::dynamic_library() : _handle(nullptr), _first_load(false) { } dynamic_library::~dynamic_library() { close(); } dynamic_library::dynamic_library(dynamic_library && other) : _handle(nullptr), _first_load(false) { *this = move(other); } dynamic_library &dynamic_library::operator=(dynamic_library && other) { close(); _handle = other._handle; _name = other._name; _first_load = other._first_load; other._handle = nullptr; other._name.clear(); other._first_load = false; return *this; } bool dynamic_library::loaded() const { return _handle != nullptr; } bool dynamic_library::first_load() const { return _first_load; } string const& dynamic_library::name() const { return _name; } }} // namespace leatherman::dynamic_library leatherman-1.4.2+dfsg/dynamic_library/src/posix/000075500000000000000000000000001332360634000216545ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/src/posix/dynamic_library.cc000064400000000000000000000055511332360634000253410ustar00rootroot00000000000000#include #include #include using namespace std; namespace lth_locale = leatherman::locale; namespace leatherman { namespace dynamic_library { dynamic_library dynamic_library::find_by_pattern(std::string const& pattern) { // POSIX doesn't have this capability return dynamic_library(); } dynamic_library dynamic_library::find_by_symbol(std::string const& symbol) { dynamic_library library; // Load the "null" library; this will cause dlsym to search for the symbol void* handle = dlopen(nullptr, RTLD_GLOBAL | RTLD_LAZY); if (!handle) { return library; } // Check to see if a search for the symbol succeeds if (!dlsym(handle, symbol.c_str())) { dlclose(handle); return library; } // At least one loaded module will resolve the given symbol // Return this handle to allow the caller to search for other symbols library._handle = handle; library._first_load = false; return library; } bool dynamic_library::load(string const& name, bool global) { close(); auto load_mode = (global ? RTLD_GLOBAL : RTLD_LOCAL) | RTLD_LAZY; _handle = dlopen(name.c_str(), load_mode); if (!_handle) { LOG_DEBUG("library {1} not found {2} ({3}).", name.c_str(), strerror(errno), errno); return false; } _first_load = true; _name = name; return true; } void dynamic_library::close() { if (_handle) { dlclose(_handle); _handle = nullptr; } _name.clear(); _first_load = false; } void* dynamic_library::find_symbol(string const& name, bool throw_if_missing, string const& alias) const { if (!_handle) { if (throw_if_missing) { throw missing_import_exception("library is not loaded."); } else { LOG_DEBUG("library {1} is not loaded when attempting to load symbol {2}.", _name.c_str(), name.c_str()); } return nullptr; } void* symbol = dlsym(_handle, name.c_str()); if (!symbol && !alias.empty()) { LOG_DEBUG("symbol {1} not found in library {2}, trying alias {3}.", name.c_str(), _name.c_str(), alias.c_str()); symbol = dlsym(_handle, alias.c_str()); } if (!symbol) { if (throw_if_missing) { throw missing_import_exception(lth_locale::format("symbol {1} was not found in {2}.", name, _name)); } else { LOG_DEBUG("symbol {1} not found in library {2}.", name.c_str(), _name.c_str()); } } return symbol; } }} // namespace leatherman::dynamic_library leatherman-1.4.2+dfsg/dynamic_library/src/windows/000075500000000000000000000000001332360634000222045ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/src/windows/dynamic_library.cc000064400000000000000000000117611332360634000256710ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace leatherman::util; using namespace leatherman::windows; namespace lth_locale = leatherman::locale; namespace leatherman { namespace dynamic_library { dynamic_library dynamic_library::find_by_pattern(std::string const& pattern) { dynamic_library library; // Check to see if the library is loaded. Walk the list of loaded modules and match against pattern. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686849(v=vs.85).aspx for details on // the Tool Help library. HANDLE hModuleSnap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, GetCurrentProcessId()); if (hModuleSnap == INVALID_HANDLE_VALUE) { LOG_DEBUG("library matching pattern {1} not found, CreateToolhelp32Snapshot failed: {2}.", pattern.c_str(), windows::system_error()); return library; } scoped_resource hModSnap(hModuleSnap, CloseHandle); MODULEENTRY32 me32 = {}; me32.dwSize = sizeof(MODULEENTRY32); if (!Module32First(hModSnap, &me32)) { LOG_DEBUG("library matching pattern {1} not found, Module32First failed: {2}.", pattern.c_str(), windows::system_error()); return library; } boost::regex rx(pattern); do { auto libname = boost::nowide::narrow(me32.szModule); if (re_search(libname, rx)) { // Use GetModuleHandleEx to ensure the reference count is incremented. If the module has been // unloaded since the snapshot was made, this may fail and we should return an empty library. HMODULE hMod; if (GetModuleHandleEx(0, me32.szModule, &hMod)) { library._handle = hMod; library._first_load = false; LOG_DEBUG("library {1} found from pattern {2}", libname, pattern); } else { LOG_DEBUG("library {1} found from pattern {2}, but unloaded before handle was acquired", libname, pattern); } return library; } else { LOG_TRACE("library %1% didn't match pattern %2%", libname, pattern); } } while (Module32Next(hModSnap, &me32)); LOG_DEBUG("no loaded libraries found matching pattern {1}", pattern); return library; } dynamic_library dynamic_library::find_by_symbol(std::string const& symbol) { // Windows doesn't have this capability. return dynamic_library(); } bool dynamic_library::load(string const& name, bool global) { close(); // Check if the module has already been loaded (and increment the ref count). HMODULE hMod; auto wname = boost::nowide::widen(name); if (!GetModuleHandleExW(0, wname.c_str(), &hMod)) { // Load now hMod = LoadLibraryW(wname.c_str()); if (!hMod) { LOG_DEBUG("library {1} not found {2}.", name.c_str(), windows::system_error()); return false; } _first_load = true; } _handle = hMod; _name = name; return true; } void dynamic_library::close() { if (_handle) { FreeLibrary(static_cast(_handle)); _handle = nullptr; } _name.clear(); _first_load = false; } void* dynamic_library::find_symbol(string const& name, bool throw_if_missing, string const& alias) const { if (!_handle) { if (throw_if_missing) { throw missing_import_exception(_("library is not loaded")); } else { LOG_DEBUG("library {1} is not loaded when attempting to load symbol {2}.", _name.c_str(), name.c_str()); } return nullptr; } auto symbol = GetProcAddress(static_cast(_handle), name.c_str()); if (!symbol && !alias.empty()) { LOG_DEBUG("symbol {1} not found in library {2}, trying alias {3}.", name.c_str(), _name.c_str(), alias.c_str()); symbol = GetProcAddress(static_cast(_handle), alias.c_str()); } if (!symbol) { if (throw_if_missing) { throw missing_import_exception(_("symbol {1} was not found in {2}.", name, _name)); } else { LOG_DEBUG("symbol {1} not found in library {2}.", name.c_str(), _name.c_str()); } } return reinterpret_cast(symbol); } }} // namespace leatherman::dynamic_library leatherman-1.4.2+dfsg/dynamic_library/tests/000075500000000000000000000000001332360634000210655ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/tests/dynamic_library_tests.cc000064400000000000000000000057531332360634000260000ustar00rootroot00000000000000#include #include #include "fixtures.hpp" using namespace leatherman::dynamic_library; std::string const lib_path = TEST_LIB_DIRECTORY + std::string("/libtest.so"); std::string const lib_path2 = TEST_LIB_DIRECTORY + std::string("/libtest1.so"); TEST_CASE("dynamic_library::load and dynamic_library::close", "[dyn-lib]") { SECTION("should not be loaded by default") { dynamic_library lib; REQUIRE_FALSE(lib.loaded()); } SECTION("should load library from path, then close it"){ dynamic_library lib; REQUIRE(lib.load(lib_path)); REQUIRE(lib.loaded()); REQUIRE(lib.first_load()); lib.close(); REQUIRE_FALSE(lib.loaded()); } SECTION("should fail to load a nonexistent library") { dynamic_library lib; REQUIRE_FALSE(lib.load("no_such_library")); } } TEST_CASE("dynamic_library::find_symbol", "[dyn-lib]"){ dynamic_library lib; REQUIRE(lib.load(lib_path)); SECTION("should fail to find nonexistent symbol") { REQUIRE_FALSE(lib.find_symbol("not_here")); REQUIRE_THROWS(lib.find_symbol("not_here", true)); } SECTION("should find library function") { REQUIRE(lib.find_symbol("hello")); } SECTION("should find aliased symbol"){ dynamic_library lib2; REQUIRE(lib2.load(lib_path2)); REQUIRE(lib2.find_symbol("not_here", false, "goodbye")); } } TEST_CASE("dynamic_library::dyanmic_library(dynamic_library && other)", "[dyn-lib]") { SECTION("should move library to new variable") { dynamic_library lib; REQUIRE(lib.load(lib_path)); REQUIRE(lib.loaded()); dynamic_library lib2 = std::move(lib); REQUIRE(lib2.loaded()); REQUIRE(lib2.name() == lib_path); REQUIRE_FALSE(lib.loaded()); } } #ifdef _WIN32 TEST_CASE("dynamic_library::find_by_pattern", "[dyn-lib]"){ SECTION("should fail to find a missing library"){ REQUIRE_FALSE(dynamic_library::find_by_pattern("libtest1").loaded()); } SECTION("should find a library matching a pattern"){ dynamic_library lib; lib.load(lib_path); REQUIRE_FALSE(dynamic_library::find_by_pattern("libtest1").loaded()); dynamic_library lib2; lib2.load(lib_path2); REQUIRE(dynamic_library::find_by_pattern("libtest1").loaded()); } } #else TEST_CASE("dynamic_library::find_by_symbol", "[dyn-lib]"){ SECTION("should fail to find a missing symbol"){ REQUIRE_FALSE(dynamic_library::find_by_symbol("no_such_symbol").loaded()); } SECTION("should find a library with the given symbol"){ dynamic_library lib; REQUIRE(lib.load(lib_path)); REQUIRE_FALSE(dynamic_library::find_by_symbol("goodbye").loaded()); dynamic_library lib2; REQUIRE(lib2.load(lib_path2, true)); REQUIRE(lib2.find_symbol("goodbye")); REQUIRE(dynamic_library::find_by_symbol("goodbye").loaded()); } } #endif leatherman-1.4.2+dfsg/dynamic_library/tests/fixtures.hpp.in000064400000000000000000000001141332360634000240500ustar00rootroot00000000000000#pragma once #define TEST_LIB_DIRECTORY "@CMAKE_LIBRARY_OUTPUT_DIRECTORY@" leatherman-1.4.2+dfsg/dynamic_library/tests/test-lib/000075500000000000000000000000001332360634000226105ustar00rootroot00000000000000leatherman-1.4.2+dfsg/dynamic_library/tests/test-lib/goodbye.cc000064400000000000000000000001171332360634000245460ustar00rootroot00000000000000#include extern "C" void goodbye(){ std::cout << "Goodbye!"; } leatherman-1.4.2+dfsg/dynamic_library/tests/test-lib/hello.cc000064400000000000000000000001421332360634000242170ustar00rootroot00000000000000#include void goodbye(); extern "C" void hello(){ std::cout << "Hello world!"; } leatherman-1.4.2+dfsg/execution/000075500000000000000000000000001332360634000165565ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/CMakeLists.txt000064400000000000000000000050561332360634000213240ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex filesystem system) add_leatherman_deps("${Boost_LIBRARIES}") if ("${CMAKE_SYSTEM_NAME}" MATCHES "SunOS") # We use functions provided by this library in the implementation # of the create_detached_process execution option on Solaris to # execute the child processes in their own contracts add_leatherman_deps(contract) endif() add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(util) leatherman_dependency(nowide) leatherman_dependency(locale) leatherman_dependency(logging) leatherman_dependency(file_util) if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.execution") leatherman_logging_line_numbers() endif() if(WIN32) leatherman_dependency(windows) endif() add_leatherman_headers(inc/leatherman) if(WIN32) add_leatherman_library(src/execution.cc src/windows/execution.cc) else() if("${CMAKE_SYSTEM_NAME}" MATCHES "SunOS") # LFS flags are needed to compile the posix/solaris/platform.cc such that it links correctly # against the libcontract library. They're not applied universally as they impact the ability # to use other OS functions. This usage is safe as long as global variables based on these flags # are avoided, according to http://docs.oracle.com/cd/E19455-01/806-0634/6j9vo5alu/index.html EXECUTE_PROCESS( COMMAND getconf LFS_CFLAGS OUTPUT_VARIABLE LFS_CFLAGS OUTPUT_STRIP_TRAILING_WHITESPACE ) set(LEATHERMAN_CXX_FLAGS "${LEATHERMAN_CXX_FLAGS} ${LFS_CFLAGS}") add_leatherman_library(src/execution.cc src/posix/execution.cc src/posix/solaris/platform.cc) else() add_leatherman_library(src/execution.cc src/posix/execution.cc src/posix/generic/platform.cc) endif() endif() if(WIN32) set(PLATFORM_TESTS tests/windows/execution.cc) else() set(PLATFORM_TESTS tests/posix/execution.cc) if("${CMAKE_SYSTEM_NAME}" MATCHES "SunOS") list(APPEND PLATFORM_TESTS tests/posix/solaris/execution.cc) endif() endif() add_leatherman_test(tests/log_capture.cc ${PLATFORM_TESTS}) if (BUILDING_LEATHERMAN) # Dumb implementation of cat.exe for testing stdin/stdout/stderr handling. include_directories(BEFORE ${LEATHERMAN_NOWIDE_INCLUDE}) add_executable(lth_cat tests/lth_cat.cc) target_link_libraries(lth_cat ${LEATHERMAN_NOWIDE_LIBS}) set_target_properties(lth_cat PROPERTIES COMPILE_FLAGS "${LEATHERMAN_CXX_FLAGS}") configure_file ( "${CMAKE_CURRENT_LIST_DIR}/tests/fixtures.hpp.in" "${CMAKE_CURRENT_LIST_DIR}/tests/fixtures.hpp" ) endif() leatherman-1.4.2+dfsg/execution/inc/000075500000000000000000000000001332360634000173275ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/inc/leatherman/000075500000000000000000000000001332360634000214475ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/inc/leatherman/execution/000075500000000000000000000000001332360634000234525ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/inc/leatherman/execution/execution.hpp000064400000000000000000000536211332360634000261750ustar00rootroot00000000000000/** * @file * Declares functions used for executing commands. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include namespace lth_util = leatherman::util; namespace leatherman { namespace execution { /** * The supported execution options. */ enum class execution_options { /** * No options. */ none = 0, /** * Redirect stderr to stdout. This will override redirect_stderr_to_null if both are set. */ redirect_stderr_to_stdout = (1 << 1), /** * Throw an exception if the child process exits with a nonzero status. */ throw_on_nonzero_exit = (1 << 2), /** * Throw an exception if the child process is terminated due to a signal. */ throw_on_signal = (1 << 3), /** * Automatically trim output leading and trailing whitespace. */ trim_output = (1 << 4), /** * Merge specified environment with the current process environment. */ merge_environment = (1 << 5), /** * Redirect stderr to "null". */ redirect_stderr_to_null = (1 << 6), /** * Preserve (do not quote) arguments. */ preserve_arguments = (1 << 7), /** * Create a new process such that it can outlive its parent. This involves running it * in a separate process group on Windows, and in a separate contract on Solaris. */ create_detached_process = (1 << 8), /** * Inherit locale environment variables from the current process. Limited to LC_ALL and * LOCALE, which are specifically overridden to "C" with merge_environment. * Will not override those variables if explicitly passed in an environment map. */ inherit_locale = (1 << 9), /** * On windows, converts \r\n newlines to standard \n */ convert_newlines = (1 << 10), /** * On POSIX systems, use `fork()` instead of `vfork()` when creating the new process. * Ignored on windows. * The `fork()` is typically slower than `vfork()` because it creates a copy * of the parent's address space for the child process (`vfork()` lets the * child process re-use the parent's address space) but safer from deadlocks * if called from multi threaded processes. */ thread_safe = (1 << 11), /** * Allow standard input to be unread, rather than failing if it is ignored. */ allow_stdin_unread = (1 << 12), /** * A combination of all throw options. */ throw_on_failure = throw_on_nonzero_exit | throw_on_signal, }; /** * System command shell available for executing shell scripts. * Uses 'cmd' on Windows and 'sh' on *nix systems. */ extern const char *const command_shell; /** * System command shell arguments to accept a script as an argument. * Uses '/c' on Windows and '-c' on *nix systems. */ extern const char *const command_args; /** * Base class for execution exceptions. */ struct execution_exception : std::runtime_error { /** * Constructs a execution_exception. * @param message The exception message. */ explicit execution_exception(std::string const& message); }; /** * Base class for execution failures. */ struct execution_failure_exception : execution_exception { /** * Constructs a execution_failure_exception. * @param message The exception message. * @param output The child process stdout output. * @param error The child process stderr output. */ execution_failure_exception(std::string const& message, std::string output, std::string error); /** * Gets the child process stdout output. * @return Returns the child process stdout output. */ std::string const& output() const; /** * Gets the child process stderr output. * @return Returns the child process stderr output. */ std::string const& error() const; private: std::string _output; std::string _error; }; /** * Exception that is thrown when a child exits with a non-zero status code. */ struct child_exit_exception : execution_failure_exception { /** * Constructs a child_exit_exception. * @param message The exception message. * @param status_code The exit status code of the child process. * @param output The child process stdout output. * @param error The child process stderr output. */ child_exit_exception(std::string const& message, int status_code, std::string output, std::string error); /** * Gets the child process exit status code. * @return Returns the child process exit status code. */ int status_code() const; private: int _status_code; }; /** * Exception that is thrown when a child exists due to a signal. */ struct child_signal_exception : execution_failure_exception { /** * Constructs a child_signal_exception. * @param message The exception message. * @param signal The signal code that terminated the child process. * @param output The child process stdout output. * @param error The child process stderr output. */ child_signal_exception(std::string const& message, int signal, std::string output, std::string error); /** * Gets the signal that terminated the child process. * @return Returns the signal that terminated the child process. */ int signal() const; private: int _signal; }; /** * Exception that is thrown when a command times out. */ struct timeout_exception : execution_exception { /** * Constructs a timeout_exception. * @param message The exception message. * @param pid The process id of the process that timed out and was killed. */ timeout_exception(std::string const& message, size_t pid); /** * Gets the process id of the process that timed out and was killed. * @return Returns the process id of the process that timed out and was killed. */ size_t pid() const; private: size_t _pid; }; /** * Encapsulates return value from executing a process. */ struct result { /** * Constructor. */ result(bool s, std::string o, std::string e, int ec, size_t p) : success(s), output(move(o)), error(move(e)), exit_code(ec), pid(p) {} /** * Whether or not the command succeeded, defaults to true. */ bool success = true; /** * Output from stdout. */ std::string output; /** * Output from stderr (if not redirected). */ std::string error; /** * The process exit code, defaults to 0. */ int exit_code = 0; /** * The process ID */ size_t pid = 0; }; /** * Searches the given paths for the given executable file. * @param file The file to search for. * @param directories The directories to search. * @return Returns the full path or empty if the file could not be found. */ std::string which(std::string const& file, std::vector const& directories = lth_util::environment::search_paths()); /** * Expands the executable in the command to the full path. * @param command The command to expand. * @param directories The directories to search. * @return Returns the expanded command if the executable was found or empty if it was not found.. */ std::string expand_command(std::string const& command, std::vector const& directories = lth_util::environment::search_paths()); /** * Executes the given program. * @param file The name or path of the program to execute. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, std::vector const& arguments, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param environment The environment variables to pass to the child process. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, std::vector const& arguments, std::map const& environment, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param input A string to place on stdin for the child process before reading output. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, std::vector const& arguments, std::string const& input, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param input A string to place on stdin for the child process before reading output. * @param environment The environment variables to pass to the child process. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, std::vector const& arguments, std::string const& input, std::map const& environment, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program by calling a specified callback that receives the pid of the program's process. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param input A string to place on stdin for the child process before reading output. * @param environment The environment variables to pass to the child process. * @param pid_callback The callback that is called with the pid of the child process. Defaults to no callback, in which case the pid won't be processed. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output, merging the environment, and redirecting stderr to null. * @return Returns a result struct. */ result execute( std::string const& file, std::vector const& arguments, std::string const& input, std::map const& environment, std::function pid_callback = nullptr, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); /** * Executes the given program by writing the output of stdout and stderr to specified files. The output * is processed line-by-line, so binary data isn't supported. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param input A string to place on stdin for the child process before reading output. * @param out_file The file where the output on stdout will be written. * @param err_file The file where the output on stderr will be written. Defaults to no file, in which case the output on stderr will be buffered and returned in the result struct. * @param environment The environment variables to pass to the child process. * @param pid_callback The callback that is called with the pid of the child process. Defaults to no callback, in which case the pid will not be processed. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output and merging the environment. * @return Returns a result struct that will not contain the output of the streams for which a file was specified. * * Throws an execution_exception error in case it fails to open a file. */ result execute( std::string const& file, std::vector const& arguments, std::string const& input, std::string const& out_file, std::string const& err_file = "", std::map const& environment = std::map(), std::function pid_callback = nullptr, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment }); /** * Executes the given program by writing the output of stdout and stderr to specified files. The output * is processed line-by-line, so binary data isn't supported. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param input A string to place on stdin for the child process before reading output. * @param out_file The file where the output on stdout will be written. * @param err_file The file where the output on stderr will be written. * @param environment The environment variables to pass to the child process. * @param pid_callback The callback that is called with the pid of the child process. * @param timeout The timeout, in seconds. * @param perms The file permissions to apply when creating the out_file and err_file. * On Windows this only toggles read-only. * @param options The execution options. Defaults to trimming output and merging the environment. * @return Returns a result struct that will not contain the output of the streams for which a file was specified. * * Throws an execution_exception error in case it fails to open a file. */ result execute( std::string const& file, std::vector const& arguments, std::string const& input, std::string const& out_file, std::string const& err_file, std::map const& environment, std::function pid_callback, uint32_t timeout, boost::optional perms, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment }); /** * Executes the given program and returns each line of output. * @param file The name or path of the program to execute. * @param stdout_callback The callback that is called with each line of output on stdout. * @param stderr_callback The callback that is called with each line of output on stderr. If nullptr, implies redirect_stderr_to_null unless redirect_stderr_to_stdout is set in options. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output and merging the environment. * @return Returns true if the execution succeeded or false if it did not. */ bool each_line( std::string const& file, std::function stdout_callback, std::function stderr_callback = nullptr, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment }); /** * Executes the given program and returns each line of output. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param stdout_callback The callback that is called with each line of output on stdout. * @param stderr_callback The callback that is called with each line of output on stderr. If nullptr, implies redirect_stderr_to_null unless redirect_stderr_to_stdout is set in options. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output and merging the environment. * @return Returns true if the execution succeeded or false if it did not. */ bool each_line( std::string const& file, std::vector const& arguments, std::function stdout_callback, std::function stderr_callback = nullptr, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment }); /** * Executes the given program and returns each line of output. * @param file The name or path of the program to execute. * @param arguments The arguments to pass to the program. On Windows they will be quoted as needed for spaces. * @param environment The environment variables to pass to the child process. * @param stdout_callback The callback that is called with each line of output on stdout. * @param stderr_callback The callback that is called with each line of output on stderr. If nullptr, implies redirect_stderr_to_null unless redirect_to_stdout is set in options. * @param timeout The timeout, in seconds. Defaults to no timeout. * @param options The execution options. Defaults to trimming output and merging the environment. * @return Returns true if the execution succeeded or false if it did not. */ bool each_line( std::string const& file, std::vector const& arguments, std::map const& environment, std::function stdout_callback, std::function stderr_callback = nullptr, uint32_t timeout = 0, lth_util::option_set const& options = { execution_options::trim_output, execution_options::merge_environment }); /** * Processes stdout and stderror streams of a child process. * @param trim True if output should be trimmed or false if not. * @param stdout_callback The callback to use when a line is read for stdout. * @param stderr_callback The callback to use when a line is read for stderr. * @param read_streams The callback that is called to read stdout and stderr streams. * @return Returns a tuple of stdout and stderr output. If stdout_callback or stderr_callback is given, it will return empty strings. */ std::tuple process_streams( bool trim, std::function const& stdout_callback, std::function const& stderr_callback, std::function, std::function)> const& read_streams); }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/000075500000000000000000000000001332360634000173455ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/src/execution.cc000064400000000000000000000434201332360634000216620ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace leatherman::logging; using namespace leatherman::util; namespace fs = boost::filesystem; using namespace boost::algorithm; namespace leatherman { namespace execution { execution_exception::execution_exception(string const& message) : runtime_error(message) { } execution_failure_exception::execution_failure_exception(string const& message, string output, string error) : execution_exception(message), _output(move(output)), _error(move(error)) { } string const& execution_failure_exception::output() const { return _output; } string const& execution_failure_exception::error() const { return _error; } child_exit_exception::child_exit_exception(string const& message, int status_code, string output, string error) : execution_failure_exception(message, move(output), move(error)), _status_code(status_code) { } int child_exit_exception::status_code() const { return _status_code; } child_signal_exception::child_signal_exception(string const& message, int signal, string output, string error) : execution_failure_exception(message, move(output), move(error)), _signal(signal) { } int child_signal_exception::signal() const { return _signal; } timeout_exception::timeout_exception(string const& message, size_t pid) : execution_exception(message), _pid(pid) { } size_t timeout_exception::pid() const { return _pid; } void log_execution(string const& file, vector const* arguments) { if (!LOG_IS_DEBUG_ENABLED()) { return; } ostringstream command_line; command_line << file; if (arguments) { for (auto const& argument : *arguments) { command_line << ' ' << argument; } } LOG_DEBUG("executing command: {1}", command_line.str()); } string expand_command(string const& command, vector const& directories) { string result = command; boost::trim(result); if (result.empty()) { return {}; } bool quoted = result[0] == '"' || result[0] == '\''; string file; string remainder; if (quoted) { // Look for the ending quote for the command auto pos = result.find(result[0], 1); if (pos == string::npos) { // No closing quote file = result.substr(1); } else { file = result.substr(1, pos - 1); remainder = result.substr(pos + 1); } } else { auto pos = command.find(' '); if (pos == string::npos) { file = result; } else { file = result.substr(0, pos); remainder = result.substr(pos); } } file = which(file, directories); if (file.empty()) { return {}; } // If originally unquoted and the expanded file has a space, quote it if (!quoted && file.find(' ') != string::npos) { return "\"" + file + "\"" + remainder; } else if (quoted) { // Originally quoted, use the same quote characters return result[0] + file + result[0] + remainder; } // Not quoted return file + remainder; } result execute( string const& file, vector const* arguments, string const* input, map const* environment, function const& pid_callback, function const& stdout_callback, function const& stderr_callback, option_set const& options, uint32_t timeout); static void setup_execute(function& stderr_callback, option_set& options) { // If not redirecting stderr to stdout, but redirecting to null, use a do-nothing callback so that stderr is logged when the level is debug if (LOG_IS_DEBUG_ENABLED() && !options[execution_options::redirect_stderr_to_stdout] && options[execution_options::redirect_stderr_to_null]) { // Use a do-nothing callback so that stderr is logged stderr_callback = ([&](string&) { return true; }); options.clear(execution_options::redirect_stderr_to_null); } } result execute( string const& file, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, nullptr, nullptr, nullptr, nullptr, nullptr, stderr_callback, actual_options, timeout); } result execute( string const& file, vector const& arguments, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, &arguments, nullptr, nullptr, nullptr, nullptr, stderr_callback, actual_options, timeout); } result execute( string const& file, vector const& arguments, map const& environment, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, &arguments, nullptr, &environment, nullptr, nullptr, stderr_callback, actual_options, timeout); } result execute( string const& file, vector const& arguments, string const& input, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, &arguments, &input, nullptr, nullptr, nullptr, stderr_callback, actual_options, timeout); } result execute( string const& file, vector const& arguments, string const& input, map const& environment, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, &arguments, &input, &environment, nullptr, nullptr, stderr_callback, actual_options, timeout); } result execute( string const& file, vector const& arguments, string const& input, map const& environment, // cppcheck-suppress passedByValue std::function pid_callback, uint32_t timeout, option_set const& options) { auto actual_options = options; function stderr_callback; setup_execute(stderr_callback, actual_options); return execute(file, &arguments, &input, &environment, pid_callback, nullptr, stderr_callback, actual_options, timeout); } result execute( std::string const& file, std::vector const& arguments, std::string const& input, std::string const& out_file, std::string const& err_file, std::map const& environment, std::function pid_callback, uint32_t timeout, lth_util::option_set const& options) { return execute(file, arguments, input, out_file, err_file, environment, pid_callback, timeout, {}, options); } result execute( // cppcheck-suppress funcArgOrderDifferent std::string const& file, std::vector const& arguments, std::string const& input, std::string const& out_file, std::string const& err_file, std::map const& environment, // cppcheck-suppress passedByValue std::function pid_callback, uint32_t timeout, boost::optional perms, lth_util::option_set const& options) { auto actual_options = options; function stderr_callback; function stdout_callback; boost::nowide::ofstream out_stream; boost::nowide::ofstream err_stream; out_stream.open(out_file.c_str(), std::ios::binary); if (!out_stream.is_open()) { throw execution_exception(_("failed to open output file {1}", out_file)); } boost::system::error_code ec; if (perms) { fs::permissions(out_file, *perms, ec); if (ec) { throw execution_exception(_("failed to modify permissions on output file {1} to {2,num,oct}: {3}", out_file, *perms, ec.message())); } } if (err_file.empty()) { setup_execute(stderr_callback, actual_options); } else { err_stream.open(err_file.c_str(), std::ios::binary); if (!err_stream.is_open()) { throw execution_exception(_("failed to open error file {1}", err_file)); } if (perms) { fs::permissions(err_file, *perms, ec); if (ec) { throw execution_exception(_("failed to modify permissions on error file {1} to {2,num,oct}: {3}", err_file, *perms, ec.message())); } } stderr_callback = ([&](string& line) { err_stream << line << "\n"; return true; }); } stdout_callback = ([&](string& line) { out_stream << line << "\n"; return true; }); auto env = environment.empty() ? nullptr : &environment; return execute(file, &arguments, &input, env, pid_callback, stdout_callback, stderr_callback, actual_options, timeout); } static void setup_each_line(function& stdout_callback, function& stderr_callback, option_set& options) { // If not given a stdout callback, use a no-op one to prevent execute from buffering stdout (also logs for debug level) if (!stdout_callback) { stdout_callback = ([&](string&) { return true; }); } // If given no stderr callback and not redirecting to stdout, redirect stderr to null when not debug log level if (!stderr_callback && !options[execution_options::redirect_stderr_to_stdout]) { if (LOG_IS_DEBUG_ENABLED()) { // Use a do-nothing callback so that stderr is logged stderr_callback = ([&](string&) { return true; }); options.clear(execution_options::redirect_stderr_to_null); } else { // Not debug level, redirect to null options.set(execution_options::redirect_stderr_to_null); } } } bool each_line( string const& file, function stdout_callback, function stderr_callback, uint32_t timeout, option_set const& options) { auto actual_options = options; setup_each_line(stdout_callback, stderr_callback, actual_options); return execute(file, nullptr, nullptr, nullptr, nullptr, stdout_callback, stderr_callback, actual_options, timeout).success; } bool each_line( string const& file, vector const& arguments, function stdout_callback, function stderr_callback, uint32_t timeout, option_set const& options) { auto actual_options = options; setup_each_line(stdout_callback, stderr_callback, actual_options); return execute(file, &arguments, nullptr, nullptr, nullptr, stdout_callback, stderr_callback, actual_options, timeout).success; } bool each_line( string const& file, vector const& arguments, map const& environment, function stdout_callback, function stderr_callback, uint32_t timeout, option_set const& options) { auto actual_options = options; setup_each_line(stdout_callback, stderr_callback, actual_options); return execute(file, &arguments, nullptr, &environment, nullptr, stdout_callback, stderr_callback, actual_options, timeout).success; } static bool process_data(bool trim, string const& data, string& buffer, string const& logger, function const& callback) { // Do nothing if nothing was read if (data.empty()) { return true; } // If given no callback, buffer the entire output if (!callback) { buffer.append(data); return true; } // Find the last newline, because anything after may not be a complete line. auto lastNL = data.find_last_of("\n"); if (lastNL == string::npos) { // No newline found, so keep appending and continue. buffer.append(data); return true; } // Make a range for iterating through lines. auto str_range = make_pair(data.begin(), data.begin()+lastNL); auto line_iterator = boost::make_iterator_range( make_split_iterator(str_range, token_finder(is_any_of("\n"))), split_iterator()); for (auto &line : line_iterator) { // The previous trailing data is picked up by default. buffer.append(line.begin(), line.end()); if (trim) { boost::trim(buffer); // Skip empty lines only if trimming output. // Otherwise we want to include empty lines to remain honest to the original output. if (buffer.empty()) { continue; } } #ifdef _WIN32 // Remove leading or trailing carriage returns. We don't want them during callbacks. boost::trim_if(buffer, is_any_of("\r")); #endif // Log the line to the output logger if (LOG_IS_DEBUG_ENABLED()) { log(logger, log_level::debug, 0, buffer); } // Pass the line to the callback bool finished = !callback(buffer); // Clear the line for the next iteration buffer.clear(); // Break out if finished processing if (finished) { return false; } } // Save the new trailing data; step past the newline buffer.assign(data.begin()+lastNL+1, data.end()); return true; } tuple process_streams(bool trim, function const& stdout_callback, function const& stderr_callback, function, function)> const& read_streams) { // Get a special logger used specifically for child process output static const string stdout_logger = "|"; static const string stderr_logger = "!!!"; // Buffers for all of the output or partial line output string stdout_buffer; string stderr_buffer; // Read the streams read_streams( [&](string const& data) { if (!process_data(trim, data, stdout_buffer, stdout_logger, stdout_callback)) { LOG_DEBUG("completed processing output: closing child pipes."); return false; } return true; }, [&](string const& data) { if (!process_data(trim, data, stderr_buffer, stderr_logger, stderr_callback)) { LOG_DEBUG("completed processing output: closing child pipes."); return false; } return true; }); // Log the result and do a final callback if needed. if (trim) { boost::trim(stdout_buffer); boost::trim(stderr_buffer); } // Log the last line of output for stdout if (!stdout_buffer.empty()) { if (LOG_IS_DEBUG_ENABLED()) { log(stdout_logger, log_level::debug, 0, stdout_buffer); } if (stdout_callback) { stdout_callback(stdout_buffer); stdout_buffer.clear(); } } // Log the last line of output for stderr if (!stderr_buffer.empty()) { if (LOG_IS_DEBUG_ENABLED()) { log(stderr_logger, log_level::debug, 0, stderr_buffer); } if (stderr_callback) { stderr_callback(stderr_buffer); stderr_buffer.clear(); } } return make_tuple(move(stdout_buffer), move(stderr_buffer)); } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/posix/000075500000000000000000000000001332360634000205075ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/src/posix/execution.cc000064400000000000000000000526051332360634000230310ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform.hpp" // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace leatherman::util; using namespace leatherman::util::posix; using namespace leatherman::execution; using namespace leatherman::logging; using namespace leatherman::file_util; using namespace boost::filesystem; // Declare environ for OSX extern char** environ; namespace leatherman { namespace execution { void log_execution(string const& file, vector const* arguments); const char *const command_shell = "sh"; const char *const command_args = "-c"; static uint64_t get_max_descriptor_limit() { // WARNING: this function is potentially called under vfork // See comment below in exec_child in case you're not afraid #ifdef _SC_OPEN_MAX { auto open_max = sysconf(_SC_OPEN_MAX); if (open_max > 0) { return open_max; } } #endif // _SC_OPEN_MAX #ifdef RLIMIT_NOFILE { rlimit lim; if (getrlimit(RLIMIT_NOFILE, &lim) == 0) { return lim.rlim_cur; } } #endif // RLIMIT_NOFILE #ifdef OPEN_MAX return OPEN_MAX; #else return 256; #endif // OPEN_MAX } static volatile bool command_timedout = false; static void timer_handler(int signal) { command_timedout = true; } string format_error(string const& message, int error) { if (message.empty()) { return _("{1} ({2})", strerror(error), error); } return _("{1}: {2} ({3}).", message, strerror(error), error); } static vector get_groups() { // Query for the number of groups auto num = getgroups(0, nullptr); if (num < 1) { return {}; } // Allocate a buffer for the groups vector groups(static_cast(num)); num = getgroups(groups.size(), groups.data()); if (static_cast(num) != groups.size()) { return {}; } return groups; } static bool is_group_member(gid_t gid) { // Check for primary group if (getgid() == gid || getegid() == gid) { return true; } // Get the groups and search for the given gid static auto groups = get_groups(); return find(groups.begin(), groups.end(), gid) != groups.end(); } static bool is_executable(char const* path) { struct stat fs; if (stat(path, &fs) != 0) { return false; } auto euid = geteuid(); // If effectively running as root, any exec bit will do if (euid == 0) { return fs.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH); } // If the file is effectively owned, check for user exec bit if (fs.st_uid == euid) { return fs.st_mode & S_IXUSR; } // If the file is owned by a group we're a member of, check for the group exec bit if (is_group_member(fs.st_gid)) { return fs.st_mode & S_IXGRP; } // Lastly check for "others" exec bit return fs.st_mode & S_IXOTH; } string which(string const& file, vector const& directories) { // If the file is already absolute, return it if it's executable path p = file; boost::system::error_code ec; if (p.is_absolute()) { return is_regular_file(p, ec) && is_executable(p.c_str()) ? p.string() : string(); } // Otherwise, check for an executable file under the given search paths for (auto const& dir : directories) { path p = path(dir) / file; if (is_regular_file(p, ec) && is_executable(p.c_str())) { return p.string(); } } return {}; } // Represents information about a pipe struct pipe { // cppcheck-suppress passedByValue pipe(string pipe_name, scoped_descriptor desc, function cb) : name(move(pipe_name)), descriptor(move(desc)), callback(move(cb)), read(true) { } // cppcheck-suppress passedByValue pipe(string pipe_name, scoped_descriptor desc, string buf) : name(move(pipe_name)), descriptor(move(desc)), buffer(move(buf)), read(false) { } const string name; scoped_descriptor descriptor; string buffer; function callback; bool read; }; static void rw_from_child(pid_t child, array& pipes, uint32_t timeout, bool allow_stdin_unread) { // Each pipe is a tuple of descriptor, buffer to use to read data, and a callback to call when data is read // The input pair is a descriptor and text to write to it fd_set read_set, write_set; while (!command_timedout) { FD_ZERO(&read_set); FD_ZERO(&write_set); // Set up the descriptors and buffers to select upon int max = -1; for (auto& pipe : pipes) { if (pipe.descriptor == -1) { continue; } FD_SET(pipe.descriptor, pipe.read ? &read_set : &write_set); if (pipe.descriptor > max) { max = pipe.descriptor; } if (pipe.read) { pipe.buffer.resize(4096); } } if (max == -1) { // All pipes closed; we're done return; } // If using a timeout, timeout after 500ms to check whether or not the command itself timed out timeval read_timeout = {}; read_timeout.tv_usec = 500000; int result = select(max + 1, &read_set, &write_set, nullptr, timeout ? &read_timeout : nullptr); if (result == -1) { if (errno != EINTR) { throw execution_exception(format_error(_("select call failed waiting for child i/o"))); } // Interrupted by signal LOG_DEBUG("select call was interrupted and will be retried."); continue; } else if (result == 0) { // Read timeout, try again continue; } for (auto& pipe : pipes) { if (pipe.descriptor == -1 || !FD_ISSET(pipe.descriptor, pipe.read ? &read_set : &write_set)) { continue; } // There is data to read/write auto count = pipe.read ? read(pipe.descriptor, &pipe.buffer[0], pipe.buffer.size()) : write(pipe.descriptor, pipe.buffer.c_str(), pipe.buffer.size()); if (count < 0) { if (allow_stdin_unread && !pipe.read && errno == EPIPE) { // Input pipe was closed prematurely due to process exit, log and let it go. LOG_DEBUG("{1} pipe i/o was closed early, process may have ignored input.", pipe.name); pipe.descriptor = {}; continue; } else if (errno == EINTR) { // Interrupted by signal LOG_DEBUG("{1} pipe i/o was interrupted and will be retried.", pipe.name); continue; } throw execution_exception(_("{1} pipe i/o failed: {2}", pipe.name, format_error())); } else if (count == 0) { // Pipe has closed pipe.descriptor = {}; continue; } if (pipe.read) { // Call the callback pipe.buffer.resize(count); if (!pipe.callback(pipe.buffer)) { // Callback signaled that we're done return; } } else { // Register written data pipe.buffer.erase(0, count); } } } // Should only reach here if the command timed out // cppcheck-suppress zerodivcond - http://trac.cppcheck.net/ticket/5402 throw timeout_exception(_("command timed out after {1} seconds.", timeout), static_cast(child)); } static void do_exec_child(int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp) { // WARNING: this function is potentially called from a vfork'd child // Do not modify program state from this function; only call setpgid, dup2, close, execve, and _exit // Do not allocate heap memory or throw exceptions // The child is sharing the address space of the parent process, so carelessly modifying this // function may lead to parent state corruption, memory leaks, and/or total protonic reversal // As such, strings are explicitly not localized in this function. // // This is especially important due to a deadlock in vfork/exec on Solaris, identified in // http://www.oracle.com/technetwork/server-storage/solaris10/subprocess-136439.html. The solution // they use in posix_spawn is to avoid calling functions exported as from libc as global symbols // after the fork. `write`, `strlen`, and `close` are still suspect below. // Set the process group; this will be used by the parent if we need to kill the process and its children if (setpgid(0, 0) == -1) { return; } // Redirect stdin if (dup2(in_fd, STDIN_FILENO) == -1) { return; } // Redirect stdout if (dup2(out_fd, STDOUT_FILENO) == -1) { return; } // Redirect stderr if (dup2(err_fd, STDERR_FILENO) == -1) { return; } // Close all open file descriptors above stderr for (uint64_t i = (STDERR_FILENO + 1); i < max_fd; ++i) { close(i); } // Execute the given program; this should not return if successful execve(program, const_cast(argv), const_cast(envp)); } void exec_child(int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp) { // WARNING: this function is potentially called from a vfork'd child // Do not modify program state from this function; only call setpgid, dup2, close, execve, and _exit // Do not allocate heap memory or throw exceptions // The child is sharing the address space of the parent process, so carelessly modifying this // function may lead to parent state corruption, memory leaks, and/or total protonic reversal do_exec_child(in_fd, out_fd, err_fd, max_fd, program, argv, envp); // If we've reached here, we've failed, so exit the child _exit(errno == 0 ? EXIT_FAILURE : errno); } // Helper function that turns a vector of strings into a vector of const cstr pointers // This is used to pass arguments and environment to execve static vector to_exec_arg(vector const* argument, string const* first = nullptr) { vector result; result.reserve((argument ? argument->size() : 0) + (first ? 1 : 0) + 1 /* terminating null */); if (first) { result.push_back(first->c_str()); } if (argument) { transform(argument->begin(), argument->end(), back_inserter(result), [](string const& s) { return s.c_str(); }); } // Null terminate the list result.push_back(nullptr); return result; } // Helper function that creates a vector of environment variables in the format of key=value // Also handles merging of environment and defaulting LC_ALL and LANG to C static vector create_environment(map const* environment, bool merge, bool inherit) { vector result; // Merge in our current environment, if requested if (merge && environ) { for (auto var = environ; *var; ++var) { // Don't respect LC_ALL or LANG from the parent process, unless inherit_locale specified if (!inherit && (boost::starts_with(*var, "LC_ALL=") || boost::starts_with(*var, "LANG="))) { continue; } result.emplace_back(*var); } } // Add the given environment if (environment) { for (auto const& kvp : *environment) { result.emplace_back(_("{1}={2}", kvp.first, kvp.second)); } } // Set the locale to C unless specified in the given environment string locale_env; if (!environment || environment->count("LC_ALL") == 0) { if (inherit && environment::get("LC_ALL", locale_env)) { result.emplace_back("LC_ALL=" + locale_env); } else if (!inherit) { result.emplace_back("LC_ALL=C"); } } if (!environment || environment->count("LANG") == 0) { if (inherit && environment::get("LANG", locale_env)) { result.emplace_back("LANG=" + locale_env); } else if (!inherit) { result.emplace_back("LANG=C"); } } return result; } result execute( string const& file, vector const* arguments, string const* input, map const* environment, function const& pid_callback, function const& stdout_callback, function const& stderr_callback, option_set const& options, uint32_t timeout) { // Search for the executable string executable = which(file); log_execution(executable.empty() ? file : executable, arguments); if (executable.empty()) { LOG_DEBUG("{1} was not found on the PATH.", file); if (options[execution_options::throw_on_nonzero_exit]) { throw child_exit_exception(_("child process returned non-zero exit status."), 127, {}, {}); } return {false, "", "", 127, 0}; } // Create the pipes for stdin/stdout redirection int pipes[2]; if (::pipe(pipes) < 0) { throw execution_exception(format_error(_("failed to allocate pipe for stdin redirection"))); } scoped_descriptor stdin_read(pipes[0]); scoped_descriptor stdin_write(pipes[1]); if (::pipe(pipes) < 0) { throw execution_exception(format_error(_("failed to allocate pipe for stdout redirection"))); } scoped_descriptor stdout_read(pipes[0]); scoped_descriptor stdout_write(pipes[1]); // Redirect stderr to stdout, null, or to the pipe to read scoped_descriptor stderr_read(-1); scoped_descriptor stderr_write(-1); scoped_descriptor dev_null(-1); int child_stderr = -1; if (options[execution_options::redirect_stderr_to_stdout]) { child_stderr = stdout_write; } else if (options[execution_options::redirect_stderr_to_null]) { dev_null = scoped_descriptor(open("/dev/null", O_RDWR)); child_stderr = dev_null; } else { if (::pipe(pipes) < 0) { throw execution_exception(format_error(_("failed to allocate pipe for stderr redirection"))); } stderr_read = scoped_descriptor(pipes[0]); stderr_write = scoped_descriptor(pipes[1]); child_stderr = stderr_write; } // Allocate the child process arguments and envp *before* creating the child auto args = to_exec_arg(arguments, &file); auto variables = create_environment(environment, options[execution_options::merge_environment], options[execution_options::inherit_locale]); auto envp = to_exec_arg(&variables); // Create the child pid_t child = create_child(options, stdin_read, stdout_write, child_stderr, get_max_descriptor_limit(), executable.c_str(), args.data(), envp.data()); // Close the unused descriptors if (!input) { stdin_write.release(); } stdin_read.release(); stdout_write.release(); stderr_write.release(); // Define a reaper that is invoked when we exit this scope // This ensures that the child won't become a zombie if an exception is thrown bool kill_child = true; bool success = false; bool signaled = false; int status = 0; scope_exit reaper([&]() { if (kill_child) { kill(-child, SIGKILL); } // Wait for the child to exit if (waitpid(child, &status, 0) == -1) { LOG_DEBUG(format_error(_("waitpid failed"))); return; } if (WIFEXITED(status)) { status = static_cast(WEXITSTATUS(status)); success = status == 0; return; } if (WIFSIGNALED(status)) { signaled = true; status = static_cast(WTERMSIG(status)); return; } }); // Set up an interval timer for timeouts // Note: OSX doesn't implement POSIX per-process timers, so we're stuck with the obsolete POSIX timers API scope_exit timer_reset; if (timeout) { struct sigaction sa = {}; sa.sa_handler = timer_handler; if (sigaction(SIGALRM, &sa, nullptr) == -1) { throw execution_exception(format_error(_("sigaction failed while setting up timeout"))); } itimerval timer = {}; timer.it_value.tv_sec = static_cast(timeout); if (setitimer(ITIMER_REAL, &timer, nullptr) == -1) { throw execution_exception(format_error(_("setitimer failed while setting up timeout"))); } // Set the resource to disable the timer timer_reset = scope_exit([&]() { itimerval timer = {}; setitimer(ITIMER_REAL, &timer, nullptr); command_timedout = false; }); } // Execute the PID callback if (pid_callback) { pid_callback(child); } // This somewhat complicated construct performs the following: // Calls a platform-agnostic implementation of processing stdout/stderr data // The platform agnostic code calls back into the given lambda to do the actual reading // It provides two callbacks of its own to call when there's data available on stdout/stderr // We return from the lambda when all data has been read string output, error; tie(output, error) = process_streams(options[execution_options::trim_output], stdout_callback, stderr_callback, [&](function const& process_stdout, function const& process_stderr) { array pipes = { { pipe("stdout", move(stdout_read), process_stdout), pipe("stderr", move(stderr_read), process_stderr), input ? pipe("stdin", move(stdin_write), *input) : pipe("", {}, "") }}; rw_from_child(child, pipes, timeout, options[execution_options::allow_stdin_unread]); }); // Close the read pipes // If the child hasn't sent all the data yet, this may signal SIGPIPE on next write stdout_read.release(); stderr_read.release(); // Wait for the child to exit kill_child = false; reaper.invoke(); if (signaled) { LOG_DEBUG("process was signaled with signal {1}.", status); } else { LOG_DEBUG("process exited with status code {1}.", status); } // Throw exception if needed if (!success) { if (!signaled && status != 0 && options[execution_options::throw_on_nonzero_exit]) { throw child_exit_exception(_("child process returned non-zero exit status ({1}).", status), status, move(output), move(error)); } if (signaled && options[execution_options::throw_on_signal]) { throw child_signal_exception(_("child process was terminated by signal ({1}).", status), status, move(output), move(error)); } } return {success, move(output), move(error), status, static_cast(child)}; } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/posix/generic/000075500000000000000000000000001332360634000221235ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/src/posix/generic/platform.cc000064400000000000000000000022411332360634000242550ustar00rootroot00000000000000#include #include #include "../platform.hpp" // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; namespace leatherman { namespace execution { pid_t create_child(leatherman::util::option_set const& options, int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp) { // Fork the child process // Note: this uses vfork (unless the thread_safe execution option is specified), which is inherently unsafe // (the parent's address space is shared with the child) pid_t pid = options[execution_options::thread_safe] ? fork() : vfork(); if (pid < 0) { throw execution_exception(format_error(_("failed to fork child process"))); } if (pid == 0) { // Is this the child process? // Exec the child; this never returns exec_child(in_fd, out_fd, err_fd, max_fd, program, argv, envp); } return pid; } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/posix/platform.hpp000064400000000000000000000011521332360634000230430ustar00rootroot00000000000000#include #include namespace leatherman { namespace execution { std::string format_error(std::string const& message = std::string(), int error = errno); pid_t create_child(leatherman::util::option_set const& options, int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp); void exec_child(int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp); }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/posix/solaris/000075500000000000000000000000001332360634000221635ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/src/posix/solaris/platform.cc000064400000000000000000000143361332360634000243250ustar00rootroot00000000000000/* * Portions of this file were copied from OpenSSH's * openbsd-compat/port-solaris.c file, that file * contained the following copyright notice: * * Copyright (c) 2006 Chad Mynhier. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../platform.hpp" #define CT_TEMPLATE CTFS_ROOT "/process/template" #define CT_LATEST CTFS_ROOT "/process/latest" // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; namespace leatherman { namespace execution { static int activate_new_contract_template(void) { int tmpl_fd; int err; // Open a template if ((tmpl_fd = open(CT_TEMPLATE, O_RDWR)) == -1) { err = errno; goto fail; } // Set the template parameters and event sets if ((err = ct_pr_tmpl_set_param(tmpl_fd, CT_PR_PGRPONLY)) != 0) { goto close_fail; } if ((err = ct_pr_tmpl_set_fatal(tmpl_fd, CT_PR_EV_HWERR)) != 0) { goto close_fail; } if ((err = ct_tmpl_set_critical(tmpl_fd, 0)) != 0) { goto close_fail; } if ((err = ct_tmpl_set_informative(tmpl_fd, CT_PR_EV_HWERR)) != 0) { goto close_fail; } // Now make this the active template for this process if ((err = ct_tmpl_activate(tmpl_fd)) != 0) { goto close_fail; } return tmpl_fd; close_fail: close(tmpl_fd); fail: throw execution_exception(format_error(_("failed to create process contract template"), err)); } static int deactivate_contract_template(int tmpl_fd) { // WARNING: this function is potentially called from a vfork'd child // Do not modify program state from this function; only call setpgid, dup2, close, execve, and _exit // Do not allocate heap memory or throw exceptions // The child is sharing the address space of the parent process, so carelessly modifying this // function may lead to parent state corruption, memory leaks, and/or total protonic reversal if (tmpl_fd < 0) { return 0; } // Deactivate the template int err = ct_tmpl_clear(tmpl_fd); close(tmpl_fd); return err; } // Lookup the latest child process contract ID static ctid_t get_latest_child_contract_id(void) { int stat_fd; ct_stathdl_t stathdl; ctid_t ctid; int err; if ((stat_fd = open(CT_LATEST, O_RDONLY)) < 0) { err = errno; goto fail; } if ((err = ct_status_read(stat_fd, CTD_COMMON, &stathdl)) != 0) { close(stat_fd); goto fail; } ctid = ct_status_get_id(stathdl); err = errno; ct_status_free(stathdl); close(stat_fd); if (ctid < 0) { goto fail; } return ctid; fail: throw execution_exception(format_error(_("failed to lookup the latest child process contract"), err)); } static void abandon_latest_child_contract() { ctid_t ctid = get_latest_child_contract_id(); int ctl_fd; int err; if ((ctl_fd = open((boost::format { "%s/process/%d/ctl" } % CTFS_ROOT % ctid).str().c_str(), O_WRONLY)) < 0) { err = errno; goto fail; } // Abandon the contract created for the child process err = ct_ctl_abandon(ctl_fd); close(ctl_fd); if (err == 0) { return; } fail: throw execution_exception(format_error(_("failed to abandon contract created for a child process"), err)); } pid_t create_child(leatherman::util::option_set const& options, int in_fd, int out_fd, int err_fd, uint64_t max_fd, char const* program, char const** argv, char const** envp) { bool detach = options[execution_options::create_detached_process]; // Create a new process contract template & activate it int tmpl_fd = detach ? activate_new_contract_template() : -1; int err; // Fork the child process // Note: this uses vfork (unless the thread_safe execution option is specified), which is inherently unsafe // (the parent's address space is shared with the child) pid_t pid = options[execution_options::thread_safe] ? fork() : vfork(); if (pid < 0) { err = errno; deactivate_contract_template(tmpl_fd); throw execution_exception(format_error(_("failed to fork child process"), err)); } if (pid == 0) { // Is this the child process? if ((err = deactivate_contract_template(tmpl_fd)) != 0) { _exit(err); } // Exec the child; this never returns exec_child(in_fd, out_fd, err_fd, max_fd, program, argv, envp); } // This is the parent process if ((err = deactivate_contract_template(tmpl_fd)) != 0) { throw execution_exception(format_error(_("failed to deactivate contract template created for a child process"), err)); } if (detach) { // Abandon the contract created for the child process abandon_latest_child_contract(); } return pid; } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/src/windows/000075500000000000000000000000001332360634000210375ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/src/windows/execution.cc000064400000000000000000000732771332360634000233710ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace leatherman::windows; using namespace leatherman::logging; using namespace leatherman::util; using namespace leatherman::util::windows; using namespace boost::filesystem; using namespace boost::algorithm; namespace leatherman { namespace execution { void log_execution(string const& file, vector const* arguments); const char *const command_shell = "cmd.exe"; const char *const command_args = "/c"; struct extpath_helper { vector const& ext_paths() const { return _extpaths; } bool contains(const string & ext) const { return binary_search(_extpaths.begin(), _extpaths.end(), to_lower_copy(ext)); } private: // Use sorted, lower-case operations to ignore case and use binary search. vector _extpaths = {".bat", ".cmd", ".com", ".exe"};; }; static bool is_executable(path const& p, extpath_helper const* helper = nullptr) { // If there's an error accessing file status, we assume is_executable // is false and return. The reason for failure doesn't matter to us. boost::system::error_code ec; bool isfile = is_regular_file(p, ec); if (ec) { LOG_TRACE("error reading status of path {1}: {2} ({3})", p, ec.message(), ec.value()); } if (helper) { // Checking extensions aren't needed if we explicitly specified it. // If helper was passed, then we haven't and should check the ext. isfile &= helper->contains(p.extension().string()); } return isfile; } string which(string const& file, vector const& directories) { // On Windows, everything has execute permission; Ruby determined // executability based on extension {com, exe, bat, cmd}. We'll do the // same check here using extpath_helper. static extpath_helper helper; // If the file is already absolute, return it if it's executable. path p = file; if (p.is_absolute()) { return is_executable(p, &helper) ? p.string() : string(); } // On Windows, treat 'echo' as a command that can be found if (file == "echo") { return "echo"; } // Otherwise, check for an executable file under the given search paths for (auto const& dir : directories) { path p = path(dir) / file; if (!p.has_extension()) { path pext = p; for (auto const&ext : helper.ext_paths()) { pext.replace_extension(ext); if (is_executable(pext)) { return pext.string(); } } } if (is_executable(p, &helper)) { return p.string(); } } return {}; } // Create a pipe, throwing if there's an error. Returns {read, write} handles. // Always creates overlapped pipes. static tuple CreatePipeThrow() { static LONG counter = 0; static boost::uuids::random_generator rand_uuid; SECURITY_ATTRIBUTES attributes = {}; attributes.nLength = sizeof(SECURITY_ATTRIBUTES); attributes.bInheritHandle = TRUE; attributes.lpSecurityDescriptor = NULL; // Format a name for the pipe. Use the thread id to ensure no two threads try to use the same // pipe, and a counter to generate multiple pipes for the same process invocation. // A scenario exists using timeouts where we could release the invoking end of a named pipe // but the other end doesn't release. Then the invoking thread shuts down and another with // the same thread id is started and reconnects to the existing named pipe. Use the process // id and a random UUID to make that highly unlikely. wstring name = boost::nowide::widen(_("\\\\.\\Pipe\\leatherman.{1}.{2}.{3}.{4}", GetCurrentProcessId(), GetCurrentThreadId(), InterlockedIncrement(&counter), to_string(rand_uuid()))); // Create the read pipe scoped_handle read_handle(CreateNamedPipeW( name.c_str(), PIPE_ACCESS_INBOUND | FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_WAIT, 1, 4096, 4096, 0, &attributes)); if (read_handle == INVALID_HANDLE_VALUE) { LOG_ERROR("failed to create read pipe: {1}.", windows::system_error()); throw execution_exception(_("failed to create read pipe.")); } // Open the write pipe scoped_handle write_handle(CreateFileW( name.c_str(), GENERIC_WRITE, 0, &attributes, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr)); if (write_handle == INVALID_HANDLE_VALUE) { LOG_ERROR("failed to create write pipe: {1}.", windows::system_error()); throw execution_exception(_("failed to create write pipe.")); } return make_tuple(move(read_handle), move(write_handle)); } // Source: http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx static string ArgvToCommandLine(vector const& arguments, bool preserve = false) { // Unless we're told otherwise, don't quote unless we actually need to do so - hopefully avoid problems if // programs won't parse quotes properly. string commandline; for (auto const& arg : arguments) { if (arg.empty()) { continue; } else if (preserve || arg.find_first_of(" \t\n\v\"") == arg.npos) { commandline += arg; } else { commandline += '"'; for (auto it = arg.begin(); ; ++it) { unsigned num_back_slashes = 0; while (it != arg.end() && *it == '\\') { ++it; ++num_back_slashes; } if (it == arg.end()) { // Escape all backslashes, but let the terminating double quotation mark we add below be // interpreted as a metacharacter. commandline.append(num_back_slashes * 2, '\\'); break; } else if (*it == '"') { // Escape all backslashes and the following double quotation mark. commandline.append(num_back_slashes * 2 + 1, '\\'); commandline.push_back(*it); } else { // Backslashes aren't special here. commandline.append(num_back_slashes, '\\'); commandline.push_back(*it); } } commandline += '"'; } commandline += ' '; } // Strip the trailing space. boost::trim_right(commandline); return commandline; } // Represents information about a pipe struct pipe { // cppcheck-suppress passedByValue pipe(string pipe_name, scoped_handle pipe_handle, function cb) : name(move(pipe_name)), handle(move(pipe_handle)), overlapped{}, pending(false), read(true), callback(move(cb)) { init(); } // cppcheck-suppress passedByValue pipe(string pipe_name, scoped_handle pipe_handle, string buf) : name(move(pipe_name)), handle(move(pipe_handle)), overlapped{}, pending(false), read(false), buffer(move(buf)) { init(); } const string name; scoped_handle handle; OVERLAPPED overlapped; scoped_handle event; bool pending; bool read; string buffer; function callback; private: void init() { if (handle != INVALID_HANDLE_VALUE) { event = scoped_handle(CreateEvent(nullptr, TRUE, FALSE, nullptr)); if (!event) { LOG_ERROR("failed to create {1} read event: {2}.", name, windows::system_error()); throw execution_exception(_("failed to create read event.")); } overlapped.hEvent = event; } } }; static void rw_from_child(DWORD child, array& pipes, uint32_t timeout, HANDLE timer, bool convert_newlines) { vector wait_handles; while (true) { // Process all pipes for (auto& pipe : pipes) { // If the handle is closed or is pending, skip if (pipe.handle == INVALID_HANDLE_VALUE || pipe.pending) { continue; } // Process the pipe until pending while (true) { // Before doing anything, check to see if there's been a timeout // This is done pre-emptively in case ReadFile never returns ERROR_IO_PENDING if (timeout && WaitForSingleObject(timer, 0) == WAIT_OBJECT_0) { throw timeout_exception(_("command timed out after {1} seconds.", timeout), static_cast(child)); } if (pipe.read) { // Read the data pipe.buffer.resize(4096); } DWORD count = 0; auto success = pipe.read ? ReadFile(pipe.handle, &pipe.buffer[0], pipe.buffer.size(), &count, &pipe.overlapped) : WriteFile(pipe.handle, pipe.buffer.c_str(), pipe.buffer.size(), &count, &pipe.overlapped); if (!success) { // Treat broken pipes as closed pipes if (GetLastError() == ERROR_BROKEN_PIPE) { pipe.handle = {}; break; } // Check to see if it's a pending operation if (GetLastError() == ERROR_IO_PENDING) { pipe.pending = true; break; } LOG_ERROR("{1} pipe i/o failed: {2}.", pipe.name, windows::system_error()); throw execution_exception(_("child i/o failed.")); } // Check for closed pipe if (count == 0) { pipe.handle = {}; break; } if (pipe.read) { // Read completed immediately, process the data pipe.buffer.resize(count); if (convert_newlines) { pipe.buffer.erase( std::remove(pipe.buffer.begin(), pipe.buffer.end(), '\r'), pipe.buffer.end()); } if (!pipe.callback(pipe.buffer)) { // Callback signaled that we're done return; } } else { // Register written data pipe.buffer.erase(0, count); } } } // All pipes should be pending now wait_handles.clear(); for (auto const& pipe : pipes) { if (pipe.handle == INVALID_HANDLE_VALUE || !pipe.pending) { continue; } wait_handles.push_back(pipe.event); } // If no wait handles, then we're done processing if (wait_handles.empty()) { return; } if (timeout) { wait_handles.push_back(timer); } // Wait for data (and, optionally, timeout) auto result = WaitForMultipleObjects(wait_handles.size(), wait_handles.data(), FALSE, INFINITE); if (result >= (WAIT_OBJECT_0 + wait_handles.size())) { LOG_ERROR("failed to wait for child process i/o: {1}.", windows::system_error()); throw execution_exception(_("failed to wait for child process i/o.")); } // Check for timeout DWORD index = result - WAIT_OBJECT_0; if (timeout && wait_handles[index] == timer) { throw timeout_exception(_("command timed out after {1} seconds.", timeout), static_cast(child)); } // Find the pipe for the event that was signalled for (auto& pipe : pipes) { if (pipe.handle == INVALID_HANDLE_VALUE || !pipe.pending || pipe.event != wait_handles[index]) { continue; } // Pipe is no longer pending pipe.pending = false; // Get the overlapped result and process it DWORD count = 0; if (!GetOverlappedResult(pipe.handle, &pipe.overlapped, &count, FALSE)) { if (GetLastError() != ERROR_BROKEN_PIPE) { LOG_ERROR("asynchronous i/o on {1} failed: {2}.", pipe.name, windows::system_error()); throw execution_exception(_("asynchronous i/o failed.")); } // Treat a broken pipe as nothing left to read count = 0; } // Check for closed pipe if (count == 0) { pipe.handle = {}; break; } if (pipe.read) { // Read completed, process the data pipe.buffer.resize(count); if (convert_newlines) { pipe.buffer.erase( std::remove(pipe.buffer.begin(), pipe.buffer.end(), '\r'), pipe.buffer.end()); } if (!pipe.callback(pipe.buffer)) { // Callback signaled that we're done return; } } else { // Register written data pipe.buffer.erase(0, count); } break; } } } result execute( string const& file, vector const* arguments, string const* input, map const* environment, function const& pid_callback, function const& stdout_callback, function const& stderr_callback, option_set const& options, uint32_t timeout) { // Since we use a job object in the windows world, we want to // be sure we're not in a job object, or at least able to // break our processes out if we are in one. BOOL in_job; bool use_job_object = true; if (!IsProcessInJob(GetCurrentProcess(), nullptr, &in_job)) { throw execution_exception(_("could not determine if the parent process is running in a job object")); } if (in_job) { JOBOBJECT_BASIC_LIMIT_INFORMATION limits; if (!QueryInformationJobObject(nullptr, JobObjectBasicLimitInformation, &limits, sizeof(limits), nullptr) || !(limits.LimitFlags & JOB_OBJECT_LIMIT_BREAKAWAY_OK)) { // short-circuits if QueryInformationJobObject fails use_job_object = false; } } // Search for the executable string executable = which(file); log_execution(executable.empty() ? file : executable, arguments); if (executable.empty()) { LOG_DEBUG("{1} was not found on the PATH.", file); if (options[execution_options::throw_on_nonzero_exit]) { throw child_exit_exception(_("child process returned non-zero exit status."), 127, {}, {}); } return {false, "", "", 127, 0}; } // Setup the execution environment vector modified_environ; vector scoped_environ; if (options[execution_options::merge_environment]) { // Modify the existing environment, then restore it after. There's no way to modify environment variables // after the child has started. An alternative would be to use GetEnvironmentStrings and add/modify the block, // but searching for and modifying existing environment strings to update would be cumbersome in that form. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms682009(v=vs.85).aspx if (!options[execution_options::inherit_locale]) { // Unless inherit_locale is specified, override with a C locale to ensure consistent behavior from // command-line tools. if (!environment || environment->count("LC_ALL") == 0) { scoped_environ.emplace_back("LC_ALL", "C"); } if (!environment || environment->count("LANG") == 0) { scoped_environ.emplace_back("LANG", "C"); } } if (environment) { for (auto const& kv : *environment) { // Use scoped_env to save the old state and restore it on return. LOG_DEBUG("child environment {1}={2}", kv.first, kv.second); scoped_environ.emplace_back(kv.first, kv.second); } } } else { // We aren't inheriting the environment, so create an environment block instead of changing existing env. // Environment variables must be sorted alphabetically and case-insensitive, // so copy them all into the same map with case-insensitive key compare: // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682009(v=vs.85).aspx map sortedEnvironment( [](string const& a, string const& b) { return ilexicographical_compare(a, b); }); if (environment) { sortedEnvironment.insert(environment->begin(), environment->end()); } // Insert LANG and LC_ALL if they aren't already present. Emplace ensures this behavior. string locale_env; if (options[execution_options::inherit_locale] && environment::get("LC_ALL", locale_env)) { sortedEnvironment.emplace("LC_ALL", locale_env); } else if (!options[execution_options::inherit_locale]) { sortedEnvironment.emplace("LC_ALL", "C"); } if (options[execution_options::inherit_locale] && environment::get("LANG", locale_env)) { sortedEnvironment.emplace("LANG", locale_env); } else if (!options[execution_options::inherit_locale]) { sortedEnvironment.emplace("LANG", "C"); } // An environment block is a NULL-terminated list of NULL-terminated strings. for (auto const& variable : sortedEnvironment) { LOG_DEBUG("child environment {1}={2}", variable.first, variable.second); string var = variable.first + "=" + variable.second; for (auto c : boost::nowide::widen(var)) { modified_environ.push_back(c); } modified_environ.push_back(L'\0'); } modified_environ.push_back(L'\0'); if (sortedEnvironment.empty()) { // The environment block is terminated by two nulls, so if the environment is // empty add a second one. modified_environ.push_back(L'\0'); } } // Execute the command, reading the results into a buffer until there's no more to read. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms682499(v=vs.85).aspx // for details on redirecting input/output. scoped_handle stdInRd, stdInWr; tie(stdInRd, stdInWr) = CreatePipeThrow(); if (!SetHandleInformation(stdInWr, HANDLE_FLAG_INHERIT, 0)) { throw execution_exception(_("pipe could not be modified")); } scoped_handle stdOutRd, stdOutWr; tie(stdOutRd, stdOutWr) = CreatePipeThrow(); if (!SetHandleInformation(stdOutRd, HANDLE_FLAG_INHERIT, 0)) { throw execution_exception(_("pipe could not be modified")); } scoped_handle stdErrRd, stdErrWr; if (!options[execution_options::redirect_stderr_to_stdout]) { // If redirecting to null, open the "NUL" device and inherit the handle if (options[execution_options::redirect_stderr_to_null]) { SECURITY_ATTRIBUTES attributes = {}; attributes.nLength = sizeof(SECURITY_ATTRIBUTES); attributes.bInheritHandle = TRUE; stdErrWr = scoped_handle(CreateFileW(L"nul", GENERIC_WRITE, FILE_SHARE_WRITE, &attributes, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr)); if (stdErrWr == INVALID_HANDLE_VALUE) { throw execution_exception(_("cannot open NUL device for redirecting stderr.")); } } else { // Otherwise, we're reading from stderr, so create a pipe tie(stdErrRd, stdErrWr) = CreatePipeThrow(); if (!SetHandleInformation(stdErrRd, HANDLE_FLAG_INHERIT, 0)) { throw execution_exception(_("pipe could not be modified")); } } } // Execute the command with arguments. Prefix arguments with the executable, or quoted arguments won't work. auto commandLine = arguments ? boost::nowide::widen(ArgvToCommandLine({ executable }) + " " + ArgvToCommandLine(*arguments, options[execution_options::preserve_arguments])) : L""; STARTUPINFO startupInfo = {}; startupInfo.cb = sizeof(startupInfo); startupInfo.dwFlags |= STARTF_USESTDHANDLES; startupInfo.hStdInput = stdInRd; startupInfo.hStdOutput = stdOutWr; // Set up stderr redirection to out or the pipe (which may be INVALID_HANDLE_VALUE, i.e. "null") if (options[execution_options::redirect_stderr_to_stdout]) { startupInfo.hStdError = stdOutWr; } else { startupInfo.hStdError = stdErrWr; } PROCESS_INFORMATION procInfo = {}; // Set up flags for CreateProcess based on whether the create_detached_process // option was set and the parent process is running in a Job object. auto creation_flags = CREATE_NO_WINDOW | CREATE_UNICODE_ENVIRONMENT; if (use_job_object) { creation_flags |= CREATE_BREAKAWAY_FROM_JOB; } if (options[execution_options::create_detached_process]) { creation_flags |= CREATE_NEW_PROCESS_GROUP; } if (!CreateProcessW( boost::nowide::widen(executable).c_str(), &commandLine[0], /* Pass a modifiable string buffer; the contents may be modified */ NULL, /* Don't allow child process to inherit process handle */ NULL, /* Don't allow child process to inherit thread handle */ TRUE, /* Inherit handles from the calling process for communication */ creation_flags, options[execution_options::merge_environment] ? NULL : modified_environ.data(), NULL, /* Use existing current directory */ &startupInfo, /* STARTUPINFO for child process */ &procInfo)) { /* PROCESS_INFORMATION pointer for output */ LOG_ERROR("failed to create process: {1}.", windows::system_error()); throw execution_exception(_("failed to create child process.")); } // Release unused pipes, to avoid any races in process completion. if (!input) { stdInWr.release(); } stdInRd.release(); stdOutWr.release(); stdErrWr.release(); scoped_handle hProcess(procInfo.hProcess); scoped_handle hThread(procInfo.hThread); // Use a Job Object to group any child processes spawned by the CreateProcess invocation, so we can // easily stop them in case of a timeout. bool create_job_object = use_job_object && !options[execution_options::create_detached_process]; scoped_handle hJob; if (create_job_object) { hJob = scoped_handle(CreateJobObjectW(nullptr, nullptr)); if (hJob == NULL) { LOG_ERROR("failed to create job object: {1}.", windows::system_error()); throw execution_exception(_("failed to create job object.")); } else if (!AssignProcessToJobObject(hJob, hProcess)) { LOG_ERROR("failed to associate process with job object: {1}.", windows::system_error()); throw execution_exception(_("failed to associate process with job object.")); } } bool terminate = true; scope_exit reaper([&]() { if (terminate) { // Terminate the process on an exception if (create_job_object) { if (!TerminateJobObject(hJob, -1)) { LOG_ERROR("failed to terminate process: {1}.", windows::system_error()); } } else { LOG_WARNING("could not terminate process {1} because a job object could not be used.", procInfo.dwProcessId); } } }); // Create a waitable timer if given a timeout scoped_handle timer; if (timeout) { timer = scoped_handle(CreateWaitableTimer(nullptr, TRUE, nullptr)); if (!timer) { LOG_ERROR("failed to create waitable timer: {1}.", windows::system_error()); throw execution_exception(_("failed to create waitable timer.")); } // "timeout" in X intervals in the future (1 interval = 100 ns) // The negative value indicates relative to the current time LARGE_INTEGER future; future.QuadPart = timeout * -10000000ll; if (!SetWaitableTimer(timer, &future, 0, nullptr, nullptr, FALSE)) { LOG_ERROR("failed to set waitable timer: {1}.", windows::system_error()); throw execution_exception(_("failed to set waitable timer.")); } } // Execute the PID callback if (pid_callback) { pid_callback(procInfo.dwProcessId); } string output, error; tie(output, error) = process_streams(options[execution_options::trim_output], stdout_callback, stderr_callback, [&](function const& process_stdout, function const& process_stderr) { // Read the child output array pipes = { { input ? pipe("stdin", move(stdInWr), *input) : pipe("stdin", {}, ""), pipe("stdout", move(stdOutRd), process_stdout), pipe("stderr", move(stdErrRd), process_stderr) } }; rw_from_child(procInfo.dwProcessId, pipes, timeout, timer, options[execution_options::convert_newlines]); }); HANDLE handles[2] = { hProcess, timer }; auto wait_result = WaitForMultipleObjects(timeout ? 2 : 1, handles, FALSE, INFINITE); if (wait_result == WAIT_OBJECT_0) { // Process has terminated terminate = false; } else if (wait_result == WAIT_OBJECT_0 + 1) { // Timeout while waiting on the process to complete throw timeout_exception(_("command timed out after {1} seconds.", timeout), static_cast(procInfo.dwProcessId)); } else { LOG_ERROR("failed to wait for child process to terminate: {1}.", windows::system_error()); throw execution_exception(_("failed to wait for child process to terminate.")); } // Now check the process return status. DWORD exit_code; if (!GetExitCodeProcess(hProcess, &exit_code)) { throw execution_exception(_("error retrieving exit code of completed process")); } LOG_DEBUG("process exited with exit code {1}.", exit_code); if (exit_code != 0 && options[execution_options::throw_on_nonzero_exit]) { throw child_exit_exception(_("child process returned non-zero exit status."), exit_code, output, error); } return {exit_code == 0, move(output), move(error), static_cast(exit_code), static_cast(procInfo.dwProcessId)}; } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/execution/tests/000075500000000000000000000000001332360634000177205ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/fixtures.hpp.in000064400000000000000000000001741332360634000227110ustar00rootroot00000000000000#define EXEC_TESTS_DIRECTORY "@CMAKE_CURRENT_LIST_DIR@/tests" #define CMAKE_BIN_DIRECTORY "@CMAKE_RUNTIME_OUTPUT_DIRECTORY@"leatherman-1.4.2+dfsg/execution/tests/fixtures/000075500000000000000000000000001332360634000215715ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/fixtures/echo_pid000075500000000000000000000000331332360634000232650ustar00rootroot00000000000000#! /usr/bin/env sh echo $$ leatherman-1.4.2+dfsg/execution/tests/fixtures/error_message000075500000000000000000000001221332360634000243470ustar00rootroot00000000000000#! /usr/bin/env sh echo error message! >&2 echo foo=bar echo echo some more stuff leatherman-1.4.2+dfsg/execution/tests/fixtures/execution/000075500000000000000000000000001332360634000235745ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/fixtures/execution/selfkill.sh000064400000000000000000000000361332360634000257340ustar00rootroot00000000000000#!/usr/bin/env sh kill -9 $$ leatherman-1.4.2+dfsg/execution/tests/fixtures/execution/sleep.sh000064400000000000000000000000341332360634000252350ustar00rootroot00000000000000#!/usr/bin/env sh sleep 60 leatherman-1.4.2+dfsg/execution/tests/fixtures/facts000075500000000000000000000001451332360634000226170ustar00rootroot00000000000000#! /usr/bin/env sh echo 'exe_fact1=value1' echo 'exe_fact2=' echo 'exe_fact3' echo 'EXE_fact4=value2'leatherman-1.4.2+dfsg/execution/tests/fixtures/failed000075500000000000000000000000621332360634000227410ustar00rootroot00000000000000#! /usr/bin/env sh echo 'this script fails' exit 1leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/000075500000000000000000000000001332360634000222075ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/crlf.txt000064400000000000000000000000251332360634000236730ustar00rootroot00000000000000line1 line2 line3 leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/file1.txt000064400000000000000000000000401332360634000237420ustar00rootroot00000000000000 this is a test of trimming leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/file2.txt000064400000000000000000000000051332360634000237440ustar00rootroot00000000000000file2leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/file3.txt000064400000000000000000000000051332360634000237450ustar00rootroot00000000000000file3leatherman-1.4.2+dfsg/execution/tests/fixtures/ls/file4.txt000064400000000000000000000000471332360634000237540ustar00rootroot00000000000000line1 line2 line3 line4 leatherman-1.4.2+dfsg/execution/tests/fixtures/not_executable000064400000000000000000000000341332360634000245120ustar00rootroot00000000000000This file is not executable.leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/000075500000000000000000000000001332360634000232635ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/error_message.bat000075500000000000000000000001231332360634000266070ustar00rootroot00000000000000@echo off echo error message!>&2 echo foo=bar echo. echo some more stuff exit /b 0 leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/facts.bat000075500000000000000000000001161332360634000250540ustar00rootroot00000000000000@echo exe_fact1=value1 @echo exe_fact2= @echo exe_fact3 @echo EXE_fact4=value2leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/failed.cmd000075500000000000000000000000361332360634000251760ustar00rootroot00000000000000@echo this script fails exit 1leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/not_executable000064400000000000000000000000341332360634000262040ustar00rootroot00000000000000This file is not executable.leatherman-1.4.2+dfsg/execution/tests/fixtures/windows/ruby_script.rb000064400000000000000000000000341332360634000261520ustar00rootroot00000000000000puts "rbver=#{RUBY_VERSION}"leatherman-1.4.2+dfsg/execution/tests/log_capture.cc000064400000000000000000000012201332360634000225260ustar00rootroot00000000000000#include "log_capture.hpp" #include using namespace std; using namespace leatherman::logging; namespace leatherman { namespace execution { namespace testing { log_capture::log_capture(log_level level) { // Setup logging for capturing setup_logging(_stream); set_level(level); } log_capture::~log_capture() { // Cleanup setup_logging(boost::nowide::cout); set_level(log_level::none); clear_error_logged_flag(); } string log_capture::result() const { return _stream.str(); } }}} // namespace leatherman::execution::testing leatherman-1.4.2+dfsg/execution/tests/log_capture.hpp000064400000000000000000000015151332360634000227370ustar00rootroot00000000000000#pragma once #include #include #include namespace leatherman { namespace execution { namespace testing { /** * Utility class for capturing facter log output. */ struct log_capture { /** * Constructs the log capture and starts capturing log output. * @param level The log level to capture. */ explicit log_capture(logging::log_level level); /** * Destructs the log capture and stops capturing log output. */ ~log_capture(); /** * Gets the captured log. * @return Returns the captured log as a single string. */ std::string result() const; private: std::ostringstream _stream; }; }}} // namespace leatherman::execution::testing leatherman-1.4.2+dfsg/execution/tests/lth_cat.cc000064400000000000000000000015001332360634000216410ustar00rootroot00000000000000#include "lth_cat.hpp" #include #include namespace nw = boost::nowide; using namespace std; static string prompt(set const& codes) { if (codes.count("overwhelm")) { return lth_cat::overwhelm; } return {}; } int main(int argc, char** argv) { // Enable special testing modes set codes(argv+1, argv+argc); if (codes.count("prefix")) { nw::cout << lth_cat::prefix << flush; } string buf; nw::cout << prompt(codes) << flush; while (getline(nw::cin, buf)) { nw::cout << buf << endl; if (codes.count("stderr")) { nw::cerr << buf << endl; } nw::cout << prompt(codes) << flush; } if (codes.count("suffix")) { nw::cout << lth_cat::suffix << flush; } return 0; } leatherman-1.4.2+dfsg/execution/tests/lth_cat.hpp000064400000000000000000000004561332360634000220540ustar00rootroot00000000000000#pragma once #include namespace lth_cat { static const std::string prefix = "Welcome to the Leatherman cat, meow\n"; static const std::string suffix = "Goodbye\n"; static const std::string overwhelm = "Overwhelm the read buffer" + std::string(5000, '-'); } // namespace lth_cat leatherman-1.4.2+dfsg/execution/tests/posix/000075500000000000000000000000001332360634000210625ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/posix/execution.cc000064400000000000000000001006751332360634000234050ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include "../fixtures.hpp" #include "../log_capture.hpp" #include "../lth_cat.hpp" #include #include #include using namespace std; using namespace leatherman::logging; using namespace leatherman::execution::testing; using namespace leatherman::util; using namespace leatherman::execution; namespace fs = boost::filesystem; SCENARIO("searching for programs with execution::which") { GIVEN("an absolute path to an executable file") { THEN("the same path should be returned") { REQUIRE( which(EXEC_TESTS_DIRECTORY "/fixtures/facts") == EXEC_TESTS_DIRECTORY "/fixtures/facts" ); } } GIVEN("an absolute path to a non-executable file") { THEN("an empty string should be returned") { REQUIRE( which(EXEC_TESTS_DIRECTORY "/fixtures/not_executable") == "" ); } } GIVEN("an absolute path to a non-existant file") { THEN("an empty string should be returned") { REQUIRE( which(EXEC_TESTS_DIRECTORY "/fixtures/does_not_exist") == "" ); } } GIVEN("an executable file on the PATH") { THEN("the full path should be returned") { REQUIRE( which("facts", { EXEC_TESTS_DIRECTORY "/fixtures/" }) == EXEC_TESTS_DIRECTORY "/fixtures/facts" ); } } GIVEN("a path relative to a directory on PATH") { THEN("the full path should be returned") { REQUIRE( which("facts", { EXEC_TESTS_DIRECTORY "/fixtures" }) == EXEC_TESTS_DIRECTORY "/fixtures/facts" ); } } GIVEN("a file that does not exist on PATH") { THEN("an empty string should be returned") { REQUIRE( which("not_on_the_path") == "" ); } } GIVEN("a file that is not executable") { THEN("an empty string should be returned") { REQUIRE( which("not_executable", { EXEC_TESTS_DIRECTORY "/fixtures" }) == "" ); } } } SCENARIO("expanding command paths with execution::expand_command") { GIVEN("an executable on the PATH") { THEN("the executable is expanded to an absolute path") { REQUIRE( expand_command("facts 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures" }) == EXEC_TESTS_DIRECTORY "/fixtures/facts 1 2 3" ); } } GIVEN("a single-quoted command") { THEN("the expanded path should be single-quoted") { REQUIRE( expand_command("'facts' 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures" }) == "'" EXEC_TESTS_DIRECTORY "/fixtures/facts' 1 2 3" ); } } GIVEN("a double-quoted command") { THEN("the expanded path should be double-quoted") { REQUIRE( expand_command("\"facts\" 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures" }) == "\"" EXEC_TESTS_DIRECTORY "/fixtures/facts\" 1 2 3" ); } } GIVEN("a command not on PATH") { THEN("the returned command is empty") { REQUIRE(expand_command("not_on_the_path") == ""); } } GIVEN("a non-executable command on PATH") { THEN("the returned command is empty") { REQUIRE(expand_command("not_executable", { EXEC_TESTS_DIRECTORY "/fixtures" }) == ""); } } } SCENARIO("executing commands with execution::execute") { auto get_variables = [](string const& input) { map variables; leatherman::util::each_line(input, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); return variables; }; std::string spool_dir { EXEC_TESTS_DIRECTORY "/spool" }; auto get_file_content = [spool_dir](string const& filename) { string filepath((fs::path(spool_dir) / filename).string()); boost::nowide::ifstream strm(filepath.c_str()); if (!strm) FAIL("failed to open file: " + filename); string content((istreambuf_iterator(strm)), (istreambuf_iterator())); strm.close(); return content; }; if (!fs::exists(spool_dir) && !fs::create_directories(spool_dir)) { FAIL("failed to create spool directory"); } scope_exit spool_cleaner([spool_dir]() { fs::remove_all(spool_dir); }); GIVEN("a command that succeeds") { THEN("the output should be returned") { auto exec = execute("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file3.txt" }); REQUIRE(exec.success); REQUIRE(exec.output == "file3"); REQUIRE(exec.exit_code == 0); } WHEN("requested to merge the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); auto exec = execute("env", {}, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" } }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); THEN("the child environment should contain the given variables") { REQUIRE(variables.size() > 4); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); auto exec = execute("env", {}, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" }}, 0, { execution_options::trim_output }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); THEN("the child environment should only contain the given variables") { REQUIRE(variables.size() == 4u); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override LC_ALL or LANG") { auto exec = execute("env", {}, { {"LANG", "FOO" }, { "LC_ALL", "BAR" } }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); THEN("the values should be passed to the child process") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "BAR"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "FOO"); } } WHEN("requested to inherit locale") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL", "en_US.UTF-8"); scoped_env lang("LANG", "en_US.UTF-8"); auto exec = execute("env", 0, { execution_options::inherit_locale, execution_options::trim_output }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); THEN("the child environment should only have LC_ALL and LANG set to en_US.UTF-8") { REQUIRE(variables.size() == 2u); REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "en_US.UTF-8"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "en_US.UTF-8"); } } WHEN("requested to inherit locale with no locale set") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL"); scoped_env lang("LANG"); auto exec = execute("env", 0, { execution_options::inherit_locale, execution_options::trim_output }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); CAPTURE(exec.output); THEN("the child environment should not have LC_ALL and LANG set") { REQUIRE(variables.empty()); } } WHEN("requested to inherit locale with parent environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL", "en_US.UTF-8"); scoped_env lang("LANG", "en_US.UTF-8"); auto exec = execute("env", 0, { execution_options::inherit_locale, execution_options::trim_output, execution_options::merge_environment }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto variables = get_variables(exec.output); THEN("the child environment should contain the merged variables") { REQUIRE(variables.size() > 3u); REQUIRE(variables.count("TEST_INHERITED_VARIABLE") == 1); REQUIRE(variables["TEST_INHERITED_VARIABLE"] == "TEST_INHERITED_VALUE"); } THEN("the child environment should have LC_ALL and LANG set to en_US.UTF-8") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "en_US.UTF-8"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "en_US.UTF-8"); } } WHEN("expecting input") { auto exec = execute("cat", {}, "hello"); REQUIRE(exec.success); REQUIRE(exec.output == "hello"); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code == 0); } WHEN("expecting input with lots of output") { auto exec = execute(CMAKE_BIN_DIRECTORY "/lth_cat", { "prefix", "suffix", "overwhelm", "stderr" }, "hello\ngoodbye", 0, { execution_options::merge_environment }); REQUIRE(exec.success); REQUIRE(exec.output == lth_cat::prefix+lth_cat::overwhelm+"hello\n"+lth_cat::overwhelm+"goodbye\n"+lth_cat::overwhelm+lth_cat::suffix); REQUIRE(exec.error == "hello\ngoodbye\n"); REQUIRE(exec.exit_code == 0); } WHEN("requested to write stdout to file") { string out_file(spool_dir + "/stdout_test.out"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message", {}, "", out_file); REQUIRE(fs::exists(out_file)); THEN("stdout is correctly redirected to file") { auto output = get_file_content("stdout_test.out"); REQUIRE(output == "foo=bar\nsome more stuff\n"); } THEN("the returned results are correct and stdout was not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error == "error message!"); } } WHEN("requested to write stdout and stderr to the same file with trim") { string out_file(spool_dir + "/stdout_stderr_test.out"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message", {}, "", out_file, "", map(), nullptr, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); REQUIRE(fs::exists(out_file)); THEN("stdout and stderr are correctly redirected to file") { auto output = get_file_content("stdout_stderr_test.out"); REQUIRE(output == "error message!\nfoo=bar\nsome more stuff\n"); } THEN("the returned results are correct and out/err streams were not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error.empty()); } } WHEN("requested to write stdout to a file in an unknown directory") { bool success = false; try { execute("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt" }, "", EXEC_TESTS_DIRECTORY "/spam/eggs/stdout.out"); success = true; } catch (...) { // pass } THEN("it fails") { REQUIRE_FALSE(success); } } WHEN("requested to write both stdout and stderr to file with permissions") { string out_file(spool_dir + "/stdout_test_b.out"); string err_file(spool_dir + "/stderr_test_b.err"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message", {}, "", out_file, err_file, {}, nullptr, 0, fs::owner_read | fs::owner_write | fs::group_read, lth_util::option_set{}); REQUIRE(fs::exists(out_file)); REQUIRE(fs::exists(err_file)); THEN("stdout and stderr are correctly redirected to different files") { auto output = get_file_content("stdout_test_b.out"); auto error = get_file_content("stderr_test_b.err"); REQUIRE(output == "foo=bar\n\nsome more stuff\n"); REQUIRE(error == "error message!\n"); } THEN("the files have restricted permissions") { auto out_perms = fs::status(out_file).permissions(); auto err_perms = fs::status(err_file).permissions(); REQUIRE(out_perms == (fs::owner_read | fs::owner_write | fs::group_read)); REQUIRE(err_perms == (fs::owner_read | fs::owner_write | fs::group_read)); } THEN("the returned results are correct and out/err streams were not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error.empty()); } } WHEN("requested to write both stdout and stderr to file without trim") { string out_file(spool_dir + "/stdout_test_b.out"); string err_file(spool_dir + "/stderr_test_b.err"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message", {}, "", out_file, err_file, {}, nullptr, 0, lth_util::option_set{}); REQUIRE(fs::exists(out_file)); REQUIRE(fs::exists(err_file)); THEN("stdout and stderr are correctly redirected to different files") { auto output = get_file_content("stdout_test_b.out"); auto error = get_file_content("stderr_test_b.err"); REQUIRE(output == "foo=bar\n\nsome more stuff\n"); REQUIRE(error == "error message!\n"); } THEN("the returned results are correct and out/err streams were not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error.empty()); } } WHEN("requested to write stderr to a file in a directory that does not exist") { string out_file(spool_dir + "/good.out"); string err_file(spool_dir + "/spam/eggs/bad.err"); bool success = false; try { execute("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt" }, "", out_file, err_file); success = true; } catch (...) { // pass } THEN("it fails") { REQUIRE_FALSE(success); } } WHEN("requested to execute a PID callback") { int pid_from_callback; auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/echo_pid", {}, "", map(), [&pid_from_callback](size_t pid) { pid_from_callback = pid; }); THEN("the returned results are correct") { REQUIRE(exec.success); REQUIRE_FALSE(exec.output.empty()); REQUIRE(exec.error.empty()); } THEN("the callback is successfully executed") { REQUIRE(to_string(pid_from_callback) == exec.output); } } } GIVEN("a command that fails") { WHEN("default options are used") { auto exec = execute("ls", { "does_not_exist" }); THEN("no output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == ""); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code > 0); } } WHEN("the redirect stderr option is used") { auto exec = execute("ls", { "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); THEN("error output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(boost::ends_with(exec.output, "No such file or directory")); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code > 0); } } WHEN("not redirecting stderr to null") { auto exec = execute("ls", { "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment }); THEN("error output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == ""); REQUIRE(boost::ends_with(exec.error, "No such file or directory")); REQUIRE(exec.exit_code > 0); } } WHEN("the 'throw on non-zero exit' option is used") { THEN("a child exit exception is thrown") { REQUIRE_THROWS_AS(execute("ls", {"does_not_exist"}, 0, {execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_nonzero_exit}), child_exit_exception); } } WHEN("the 'throw on signal' option is used") { THEN("a child signal exception is thrown") { REQUIRE_THROWS_AS(execute("sh", { EXEC_TESTS_DIRECTORY "/fixtures/execution/selfkill.sh" }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_signal }), child_signal_exception); } } } GIVEN("a command that outputs leading/trailing whitespace") { THEN("whitespace should be trimmed by default") { auto exec = execute("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt" }); REQUIRE(exec.success); REQUIRE(exec.output == "this is a test of trimming"); } WHEN("the 'trim whitespace' option is not used") { auto exec = execute("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt" }, 0, { execution_options::merge_environment }); THEN("whitespace should not be trimmed") { REQUIRE(exec.output == " this is a test of trimming "); } } } GIVEN("a long-running command") { WHEN("given a timeout") { THEN("a timeout exception should be thrown") { try { execute("sh", { EXEC_TESTS_DIRECTORY "/fixtures/execution/sleep.sh" }, 1); FAIL("did not throw timeout exception"); } catch (timeout_exception const& ex) { // Verify the process group was killed by waiting for it int status = 0; REQUIRE(waitpid(-ex.pid(), &status, WNOHANG) == -1); REQUIRE(errno == ECHILD); } catch (exception const&) { FAIL("unexpected exception thrown"); } } } } GIVEN("stderr is redirected to null") { WHEN("using a debug log level") { log_capture capture(log_level::debug); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message"); REQUIRE(exec.success); REQUIRE(exec.output == "foo=bar\n\nsome more stuff"); REQUIRE(exec.error.empty()); THEN("stderr is logged") { auto output = capture.result(); CAPTURE(output); REQUIRE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } WHEN("not using a debug log level") { log_capture capture(log_level::warning); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/error_message"); REQUIRE(exec.success); REQUIRE(exec.output == "foo=bar\n\nsome more stuff"); REQUIRE(exec.error.empty()); THEN("stderr is not logged") { auto output = capture.result(); CAPTURE(output); REQUIRE_FALSE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } } } SCENARIO("executing commands with execution::each_line") { GIVEN("a command that succeeds") { THEN("each line of output should be returned") { vector lines; bool success = each_line("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file4.txt" }, [&](string& line) { lines.push_back(line); return true; }); REQUIRE(success); REQUIRE(lines.size() == 4u); REQUIRE(lines[0] == "line1"); REQUIRE(lines[1] == "line2"); REQUIRE(lines[2] == "line3"); REQUIRE(lines[3] == "line4"); } WHEN("output stops when false is returned from callback") { vector lines; bool success = each_line("cat", { EXEC_TESTS_DIRECTORY "/fixtures/ls/file4.txt" }, [&](string& line) { lines.push_back(line); return false; }); REQUIRE(success); REQUIRE(lines.size() == 1u); REQUIRE(lines[0] == "line1"); } WHEN("requested to merge the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); map variables; bool success = each_line("env", {}, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); REQUIRE(success); THEN("the child environment should contain the given variables") { REQUIRE(variables.size() > 4); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); map variables; bool success = each_line( "env", {}, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }, nullptr, 0, { execution_options::trim_output }); REQUIRE(success); THEN("the child environment should only contain the given variables") { REQUIRE(variables.size() == 4u); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override LC_ALL or LANG") { map variables; bool success = each_line("env", {}, { {"LANG", "FOO" }, { "LC_ALL", "BAR" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); REQUIRE(success); THEN("the values should be passed to the child process") { REQUIRE(variables.count("LC_ALL") == 1); REQUIRE(variables["LC_ALL"] == "BAR"); REQUIRE(variables.count("LANG") == 1); REQUIRE(variables["LANG"] == "FOO"); } } } GIVEN("a command that fails") { WHEN("default options are used") { THEN("no output is returned") { auto success = each_line("ls", { "does_not_exist" }, [](string& line) { FAIL("should not be called"); return true; }); REQUIRE_FALSE(success); } } WHEN("the redirect stderr option is used") { string output; auto result = each_line( "ls", { "does_not_exist" }, [&](string& line) { if (!output.empty()) { output += "\n"; } output += line; return true; }, nullptr, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); THEN("error output is returned") { REQUIRE_FALSE(result); REQUIRE(boost::ends_with(output, "No such file or directory")); } } WHEN("not redirecting stderr to null") { string output; string error; auto result = each_line( "ls", { "does_not_exist" }, [&](string& line) { if (!output.empty()) { output += "\n"; } output += line; return true; }, [&](string& line) { if (!error.empty()) { error += "\n"; } error += line; return true; }); THEN("error output is returned") { REQUIRE_FALSE(result); REQUIRE(output == ""); REQUIRE(boost::ends_with(error, "No such file or directory")); } } WHEN("redirecting stderr to null") { string error; auto result = each_line( "ls", { "does_not_exist" }, nullptr, [&](string& line) { if (!error.empty()) { error += "\n"; } error += line; return true; }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); THEN("no error output is returned") { REQUIRE_FALSE(result); REQUIRE(error == ""); } } WHEN("the 'throw on non-zero exit' option is used") { THEN("a child exit exception is thrown") { REQUIRE_THROWS_AS(each_line("ls", {"does_not_exist"}, nullptr, nullptr, 0, {execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_nonzero_exit}), child_exit_exception); } } WHEN("the 'throw on signal' option is used") { THEN("a child signal exception is thrown") { REQUIRE_THROWS_AS(each_line("sh", { EXEC_TESTS_DIRECTORY "/fixtures/execution/selfkill.sh" }, nullptr, nullptr, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_signal }), child_signal_exception); } } } GIVEN("a long-running command") { WHEN("given a timeout") { THEN("a timeout exception should be thrown") { try { each_line("sh", { EXEC_TESTS_DIRECTORY "/fixtures/execution/sleep.sh" }, nullptr, nullptr, 1); FAIL("did not throw timeout exception"); } catch (timeout_exception const& ex) { // Verify the process group was killed by waiting for it int status = 0; REQUIRE(waitpid(-ex.pid(), &status, WNOHANG) == -1); REQUIRE(errno == ECHILD); } catch (exception const&) { FAIL("unexpected exception thrown"); } } } } GIVEN("stderr is redirected to null") { WHEN("using a debug log level") { log_capture capture(log_level::debug); REQUIRE(leatherman::execution::each_line(EXEC_TESTS_DIRECTORY "/fixtures/error_message", nullptr)); THEN("stderr is logged") { auto output = capture.result(); CAPTURE(output); REQUIRE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } WHEN("not using a debug log level") { log_capture capture(log_level::warning); REQUIRE(leatherman::execution::each_line(EXEC_TESTS_DIRECTORY "/fixtures/error_message", nullptr)); THEN("stderr is not logged") { auto output = capture.result(); CAPTURE(output); REQUIRE_FALSE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } } } leatherman-1.4.2+dfsg/execution/tests/posix/solaris/000075500000000000000000000000001332360634000225365ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/posix/solaris/execution.cc000064400000000000000000000046141332360634000250550ustar00rootroot00000000000000#include #include #include #include #include using namespace std; using namespace leatherman::execution; SCENARIO("executing detached commands with execution::execute") { auto get_ctids = [](string const& input) { vector ctids; leatherman::util::each_line(input, [&ctids](string& line) { try { boost::algorithm::trim(line); ctids.push_back(boost::lexical_cast(line)); } catch(boost::bad_lexical_cast const&) { ctids.clear(); return false; } return true; }); return ctids; }; GIVEN("the detached process creation is requested") { THEN("the command is executed in a different process contract than its parent") { auto exec = execute("/bin/sh", { "-c", "ps -o ctid= -p $EXECUTOR_PID,$$" }, { { "EXECUTOR_PID", to_string(getpid()) } }, 0, { execution_options::create_detached_process, execution_options::merge_environment, execution_options::redirect_stderr_to_null }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto ctids = get_ctids(exec.output); REQUIRE(ctids.size() == 2); // the ps command returned two CTIDs REQUIRE(ctids[0] != ctids[1]); // the contract IDs are different } } GIVEN("the detached process creation is NOT requested") { THEN("the command is executed in the same process contract as its parent") { auto exec = execute("/bin/sh", { "-c", "ps -o ctid= -p $EXECUTOR_PID,$$" }, { { "EXECUTOR_PID", to_string(getpid()) } }, 0, { execution_options::merge_environment, execution_options::redirect_stderr_to_null }); REQUIRE(exec.success); REQUIRE(exec.exit_code == 0); auto ctids = get_ctids(exec.output); REQUIRE(ctids.size() == 2); // the ps command returned two CTIDs REQUIRE(ctids[0] == ctids[1]); // the contract IDs are the same } } } leatherman-1.4.2+dfsg/execution/tests/windows/000075500000000000000000000000001332360634000214125ustar00rootroot00000000000000leatherman-1.4.2+dfsg/execution/tests/windows/execution.cc000064400000000000000000001065311332360634000237320ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include "../fixtures.hpp" #include "../log_capture.hpp" #include "../lth_cat.hpp" #include using namespace std; using namespace leatherman::util; using namespace leatherman::execution; using namespace leatherman::logging; using namespace leatherman::execution::testing; using leatherman::util::scoped_env; using namespace boost::filesystem; // Ruby doesn't appear to normalize commands passed to cmd.exe, so neither do we. A utility is provided // here for familiarity in writing unit tests. static string normalize(const char *filepath) { return path(filepath).make_preferred().string(); } SCENARIO("searching for programs with execution::which") { GIVEN("an absolute path") { THEN("the same path should be returned") { REQUIRE(which(EXEC_TESTS_DIRECTORY "/fixtures/windows/facts.bat") == EXEC_TESTS_DIRECTORY "/fixtures/windows/facts.bat"); } } GIVEN("a relative path") { THEN("it should find a file with the same relative offset from a directory on PATH") { REQUIRE(which("windows/facts", { EXEC_TESTS_DIRECTORY "/fixtures" }) == EXEC_TESTS_DIRECTORY "/fixtures\\windows/facts.bat"); } } GIVEN("a file without an extension") { THEN("it should find a batch file with the same base name") { REQUIRE(which("facts", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == EXEC_TESTS_DIRECTORY "/fixtures/windows\\facts.bat"); } } GIVEN("a file that does not exist") { THEN("an empty string should be returned") { REQUIRE(which("not_on_the_path") == ""); } } GIVEN("a file that exists but is not an executable") { THEN("an empty string should be returned") { REQUIRE(which("not_executable", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == ""); } } } SCENARIO("expanding command paths with execution::expand_command") { GIVEN("an executable on the PATH") { THEN("the executable is expanded to an absolute path") { REQUIRE(expand_command("facts 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == EXEC_TESTS_DIRECTORY "/fixtures/windows\\facts.bat 1 2 3"); } } GIVEN("a single-quoted command") { THEN("the expanded path should be single-quoted") { REQUIRE(expand_command("'facts' 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == "'" EXEC_TESTS_DIRECTORY "/fixtures/windows\\facts.bat' 1 2 3"); } } GIVEN("a double-quoted command") { THEN("the expanded path should be double-quoted") { REQUIRE(expand_command("\"facts\" 1 2 3", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == "\"" EXEC_TESTS_DIRECTORY "/fixtures/windows\\facts.bat\" 1 2 3"); } } GIVEN("a command not on PATH") { THEN("the returned command is empty") { REQUIRE(expand_command("not_on_the_path") == ""); } } GIVEN("a non-executable command on PATH") { THEN("the returned command is empty") { REQUIRE(expand_command("not_executable", { EXEC_TESTS_DIRECTORY "/fixtures/windows" }) == ""); } } } SCENARIO("executing commands with execution::execute") { auto get_variables = [](string const& input) { map variables; leatherman::util::each_line(input, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); return variables; }; std::string spool_dir { EXEC_TESTS_DIRECTORY "/spool" }; auto get_file_content = [spool_dir](string const& filename) { string filepath((path(spool_dir) / filename).string()); boost::nowide::ifstream strm(filepath.c_str()); if (!strm) FAIL("failed to open file: " + filename); string content((istreambuf_iterator(strm)), (istreambuf_iterator())); strm.close(); return content; }; if (!exists(spool_dir) && !create_directories(spool_dir)) { FAIL("failed to create spool directory"); } scope_exit spool_cleaner([spool_dir]() { remove_all(spool_dir); }); GIVEN("a command that succeeds") { THEN("the output should be returned") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file3.txt") }); REQUIRE(exec.success); REQUIRE(exec.output == "file3"); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code == 0); } WHEN("the create new process group option is used") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file3.txt") }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null, execution_options::create_detached_process }); REQUIRE(exec.success); REQUIRE(exec.output == "file3"); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code == 0); } WHEN("expecting input") { auto exec = execute("cmd.exe", { "/c", CMAKE_BIN_DIRECTORY "/lth_cat.exe" }, "hello"); REQUIRE(exec.success); REQUIRE(exec.output == "hello"); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code == 0); } WHEN("expecting input with lots of output") { auto exec = execute("cmd.exe", { "/c", CMAKE_BIN_DIRECTORY "/lth_cat.exe", "prefix", "suffix", "overwhelm", "stderr" }, "hello\ngoodbye", 0, { execution_options::merge_environment }); REQUIRE(exec.success); auto expected = lth_cat::prefix+lth_cat::overwhelm+"hello\n"+lth_cat::overwhelm+"goodbye\n"+lth_cat::overwhelm+lth_cat::suffix; boost::replace_all(expected, "\n", "\r\n"); REQUIRE(exec.output == expected); REQUIRE(exec.error == "hello\r\ngoodbye\r\n"); REQUIRE(exec.exit_code == 0); } WHEN("requested to write stdout to file") { string out_file(spool_dir + "/stdout_test.out"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", {}, "", out_file); REQUIRE(exists(out_file)); THEN("stdout is correctly redirected to file") { auto output = get_file_content("stdout_test.out"); REQUIRE(output == "foo=bar\nsome more stuff\n"); } THEN("the returned results are correct and stdout was not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error == "error message!"); } } WHEN("requested to write stdout and stderr to the same file with trim") { string out_file(spool_dir + "/stdout_stderr_test.out"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", {}, "", out_file, "", {}, nullptr, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); REQUIRE(boost::filesystem::exists(out_file)); THEN("stdout and stderr are correctly redirected to file") { auto output = get_file_content("stdout_stderr_test.out"); REQUIRE(output == "error message!\nfoo=bar\nsome more stuff\n"); } THEN("the returned results are correct and out/err streams were not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error.empty()); } } WHEN("requested to write stdout to a file in an unknown directory") { bool success = false; try { execute("cmd.exe", { "/c", CMAKE_BIN_DIRECTORY "/lth_cat.exe" }, "spam", EXEC_TESTS_DIRECTORY "/spam/eggs/stdout.out"); success = true; } catch (...) { // pass } THEN("it fails") { REQUIRE_FALSE(success); } } WHEN("requested to write both stdout and stderr to file without trim") { string out_file(spool_dir + "/stdout_test_b.out"); string err_file(spool_dir + "/stderr_test_b.err"); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", {}, "", out_file, err_file, {}, nullptr, 0, lth_util::option_set{}); REQUIRE(boost::filesystem::exists(out_file)); REQUIRE(boost::filesystem::exists(err_file)); THEN("stdout and stderr are correctly redirected to different files") { auto output = get_file_content("stdout_test_b.out"); auto error = get_file_content("stderr_test_b.err"); REQUIRE(output == "foo=bar\n\nsome more stuff\n"); REQUIRE(error == "error message!\n"); } THEN("the returned results are correct and out/err streams were not buffered") { REQUIRE(exec.success); REQUIRE(exec.output.empty()); REQUIRE(exec.error.empty()); } } WHEN("requested to execute a PID callback") { int pid_from_callback = 0; auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", {}, "", {}, [&pid_from_callback](size_t pid) { pid_from_callback = pid; }); THEN("the returned results are correct") { REQUIRE(exec.success); REQUIRE(exec.output == "foo=bar\r\n\r\nsome more stuff"); REQUIRE(exec.error.empty()); // stderr is redirected to null } THEN("the callback is successfully executed") { REQUIRE(pid_from_callback > 0); } } } GIVEN("a command that fails") { WHEN("default options are used") { auto exec = execute("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }); THEN("no output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == ""); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code > 0); } } WHEN("the create new process group option is used") { auto exec = execute("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null, execution_options::create_detached_process }); THEN("no output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == ""); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code > 0); } } WHEN("the redirect stderr option is used") { auto exec = execute("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); THEN("error output is returned on stdout") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == "File Not Found"); REQUIRE(exec.error == ""); REQUIRE(exec.exit_code > 0); } } WHEN("not redirecting stderr to null") { auto exec = execute("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment }); THEN("error output is returned") { REQUIRE_FALSE(exec.success); REQUIRE(exec.output == ""); REQUIRE(exec.error == "File Not Found"); REQUIRE(exec.exit_code > 0); } } WHEN("the 'throw on non-zero exit' option is used") { THEN("a child exit exception is thrown") { REQUIRE_THROWS_AS(execute("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_nonzero_exit }), child_exit_exception); } } WHEN("requested to merge the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); auto exec = execute("cmd.exe", { "/c", "set" }, { { "TEST_VARIABLE1", "TEST_VALUE1" }, { "TEST_VARIABLE2", "TEST_VALUE2" } }); REQUIRE(exec.success); REQUIRE(exec.error == ""); auto variables = get_variables(exec.output); THEN("the child environment should contain the given variables") { REQUIRE(variables.size() > 4u); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); auto exec = execute("cmd.exe", { "/c", "set" }, { { "TEST_VARIABLE1", "TEST_VALUE1" }, { "TEST_VARIABLE2", "TEST_VALUE2" } }, 0, { execution_options::trim_output }); REQUIRE(exec.success); REQUIRE(exec.error == ""); auto variables = get_variables(exec.output); THEN("the child environment should only contain the given variables") { REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override LC_ALL or LANG") { auto exec = execute("cmd.exe", { "/c", "set" }, { { "LANG", "FOO" }, { "LC_ALL", "BAR" } }); REQUIRE(exec.success); REQUIRE(exec.error == ""); auto variables = get_variables(exec.output); THEN("the values should be passed to the child process") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "BAR"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "FOO"); } } } GIVEN("a command that outputs leading/trailing whitespace") { THEN("whitespace should be trimmed by default") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt") }); REQUIRE(exec.success); REQUIRE(exec.output == "this is a test of trimming"); REQUIRE(exec.error == ""); } WHEN("the 'trim whitespace' option is not used") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file1.txt") }, 0, { execution_options::merge_environment }); REQUIRE(exec.success); THEN("whitespace should not be trimmed") { REQUIRE(exec.output == " this is a test of trimming "); REQUIRE(exec.error == ""); } } } GIVEN("a long-running command") { WHEN("given a timeout") { THEN("a timeout exception should be thrown") { string ruby = which("ruby.exe"); if (ruby.empty()) { WARN("skipping command timeout test because no ruby was found on the PATH."); return; } try { execute("cmd.exe", { "/c", "ruby.exe", "-e", "sleep 60" }, 1); FAIL("did not throw timeout exception"); } catch (timeout_exception const& ex) { // Verify the process was killed REQUIRE(OpenProcess(0, FALSE, ex.pid()) == nullptr); } catch (exception const&) { FAIL("unexpected exception thrown"); } } } } GIVEN("stderr is redirected to null") { WHEN("using a debug log level") { log_capture capture(log_level::debug); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat"); REQUIRE(exec.success); REQUIRE(exec.output == "foo=bar\r\n\r\nsome more stuff"); REQUIRE(exec.error.empty()); THEN("stderr is logged") { auto output = capture.result(); CAPTURE(output); REQUIRE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } WHEN("not using a debug log level") { log_capture capture(log_level::warning); auto exec = execute(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat"); REQUIRE(exec.success); REQUIRE(exec.output == "foo=bar\r\n\r\nsome more stuff"); REQUIRE(exec.error.empty()); THEN("stderr is not logged") { auto output = capture.result(); CAPTURE(output); REQUIRE_FALSE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } } GIVEN("a command that outputs windows-style newlines") { // These are the default options so that I don't override them. lth_util::option_set options = { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_null }; WHEN("newlines are not normalized") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/crlf.txt") }, 0, options); REQUIRE(exec.success); REQUIRE(exec.error == ""); REQUIRE(exec.output.find('\r') != std::string::npos); } WHEN("requested to normalize newlines") { auto exec = execute("cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/crlf.txt") }, 0, options | option_set{ execution_options::convert_newlines }); REQUIRE(exec.success); REQUIRE(exec.error == ""); REQUIRE(exec.output.find('\r') == std::string::npos); } } } SCENARIO("executing commands with leatherman::execution::each_line") { GIVEN("a command that succeeds") { THEN("each line of output should be returned") { vector lines; bool success = leatherman::execution::each_line( "cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file4.txt") }, [&](string& line) { lines.push_back(line); return true; }); REQUIRE(success); REQUIRE(lines.size() == 4u); REQUIRE(lines[0] == "line1"); REQUIRE(lines[1] == "line2"); REQUIRE(lines[2] == "line3"); REQUIRE(lines[3] == "line4"); } WHEN("output stops when false is returned from callback") { vector lines; bool success = leatherman::execution::each_line( "cmd.exe", { "/c", "type", normalize(EXEC_TESTS_DIRECTORY "/fixtures/ls/file4.txt") }, [&](string& line) { lines.push_back(line); return false; }); REQUIRE(success); REQUIRE(lines.size() == 1u); REQUIRE(lines[0] == "line1"); } WHEN("requested to merge the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); REQUIRE(success); THEN("the child environment should contain the given variables") { REQUIRE(variables.size() > 4u); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override the environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, { {"TEST_VARIABLE1", "TEST_VALUE1" }, {"TEST_VARIABLE2", "TEST_VALUE2" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }, nullptr, 0, { execution_options::trim_output }); REQUIRE(success); THEN("the child environment should only contain the given variables") { // Windows adds several extra variables, such as COMSPEC, PATHEXT, and PROMPT. // Leave some buffer room for future additions, while ensuring we don't include // everything. REQUIRE(variables.size() < 10u); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_VARIABLE1") == 1u); REQUIRE(variables["TEST_VARIABLE1"] == "TEST_VALUE1"); REQUIRE(variables.count("TEST_INHERITED_VARIABLE") == 0u); } THEN("the child environment should have LC_ALL and LANG set to C") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "C"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "C"); } } WHEN("requested to override LC_ALL or LANG") { map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, { {"LANG", "FOO" }, { "LC_ALL", "BAR" } }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }); REQUIRE(success); THEN("the values should be passed to the child process") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "BAR"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "FOO"); } } WHEN("requested to inherit locale") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL", "en_US.UTF-8"); scoped_env lang("LANG", "en_US.UTF-8"); map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }, nullptr, 0, { execution_options::trim_output, execution_options::inherit_locale }); REQUIRE(success); THEN("the child environment should only have LC_ALL and LANG set to en_US.UTF-8") { // Windows adds several extra variables, such as COMSPEC, PATHEXT, and PROMPT. // Leave some buffer room for future additions, while ensuring we don't include // everything. REQUIRE(variables.size() < 10u); REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "en_US.UTF-8"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "en_US.UTF-8"); REQUIRE(variables.count("TEST_INHERITED_VARIABLE") == 0u); } } WHEN("requested to inherit locale with no locale set") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL"); scoped_env lang("LANG"); map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }, nullptr, 0, { execution_options::trim_output, execution_options::inherit_locale }); REQUIRE(success); THEN("the child environment should only have LC_ALL and LANG set to en_US.UTF-8") { // Windows adds several extra variables, such as COMSPEC, PATHEXT, and PROMPT. // Leave some buffer room for future additions, while ensuring we don't include // everything. REQUIRE(variables.size() < 8u); REQUIRE(variables.count("LC_ALL") == 0u); REQUIRE(variables.count("LANG") == 0u); REQUIRE(variables.count("TEST_INHERITED_VARIABLE") == 0u); } } WHEN("requested to inherit locale with parent environment") { scoped_env test_var("TEST_INHERITED_VARIABLE", "TEST_INHERITED_VALUE"); scoped_env lc_all("LC_ALL", "en_US.UTF-8"); scoped_env lang("LANG", "en_US.UTF-8"); map variables; bool success = each_line( "cmd.exe", { "/c", "set" }, [&](string& line) { vector parts; boost::split(parts, line, boost::is_any_of("="), boost::token_compress_off); if (parts.size() != 2u) { return true; } variables.emplace(make_pair(move(parts[0]), move(parts[1]))); return true; }, nullptr, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::inherit_locale }); REQUIRE(success); THEN("the child environment should contain the merged variables") { REQUIRE(variables.size() > 3u); REQUIRE(variables.count("TEST_INHERITED_VARIABLE") == 1); REQUIRE(variables["TEST_INHERITED_VARIABLE"] == "TEST_INHERITED_VALUE"); } THEN("the child environment should have LC_ALL and LANG set to en_US.UTF-8") { REQUIRE(variables.count("LC_ALL") == 1u); REQUIRE(variables["LC_ALL"] == "en_US.UTF-8"); REQUIRE(variables.count("LANG") == 1u); REQUIRE(variables["LANG"] == "en_US.UTF-8"); } } } GIVEN("a command that fails") { WHEN("default options are used") { THEN("no output is returned") { auto success = leatherman::execution::each_line( "cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, [](string& line) { FAIL("should not be called"); return true; }); REQUIRE_FALSE(success); } } WHEN("the redirect stderr option is used") { string output; auto result = leatherman::execution::each_line( "cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, [&](string& line) { if (!output.empty()) { output += "\n"; } output += line; return true; }, [&](string&) { FAIL("should not be called"); return true; }, 0, { execution_options::trim_output, execution_options::merge_environment, execution_options::redirect_stderr_to_stdout }); THEN("error output is returned on stdout") { REQUIRE_FALSE(result); REQUIRE(output == "File Not Found"); } } WHEN("not redirecting stderr to null") { string output; auto result = leatherman::execution::each_line( "cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, [&](string&) { FAIL("should not be called."); return true; }, [&](string& line) { if (!output.empty()) { output += "\n"; } output += line; return true; }); THEN("error output is returned") { REQUIRE_FALSE(result); REQUIRE(output == "File Not Found"); } } WHEN("the 'throw on non-zero exit' option is used") { THEN("a child exit exception is thrown") { REQUIRE_THROWS_AS(each_line("cmd.exe", { "/c", "dir", "/B", "does_not_exist" }, nullptr, nullptr, 0, {execution_options::trim_output, execution_options::merge_environment, execution_options::throw_on_nonzero_exit}), child_exit_exception); } } } GIVEN("a long-running command") { WHEN("given a timeout") { THEN("a timeout exception should be thrown") { string ruby = which("ruby.exe"); if (ruby.empty()) { WARN("skipping command timeout test because no ruby was found on the PATH."); return; } try { each_line("cmd.exe", { "/c", "ruby.exe", "-e", "sleep 60" }, nullptr, nullptr, 1); FAIL("did not throw timeout exception"); } catch (timeout_exception const& ex) { // Verify the process was killed REQUIRE(OpenProcess(0, FALSE, ex.pid()) == nullptr); } catch (exception const&) { FAIL("unexpected exception thrown"); } } } } GIVEN("stderr is redirected to null") { WHEN("using a debug log level") { log_capture capture(log_level::debug); REQUIRE(leatherman::execution::each_line(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", nullptr)); THEN("stderr is logged") { auto output = capture.result(); CAPTURE(output); REQUIRE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } WHEN("not using a debug log level") { log_capture capture(log_level::warning); REQUIRE(leatherman::execution::each_line(EXEC_TESTS_DIRECTORY "/fixtures/windows/error_message.bat", nullptr)); THEN("stderr is not logged") { auto output = capture.result(); CAPTURE(output); REQUIRE_FALSE(re_search(output, boost::regex("DEBUG !!! - error message!"))); } } } } leatherman-1.4.2+dfsg/file_util/000075500000000000000000000000001332360634000165275ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/CMakeLists.txt000064400000000000000000000011251332360634000212660ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex filesystem system) add_leatherman_deps("${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(nowide) leatherman_dependency(locale) leatherman_dependency(logging) leatherman_dependency(util) if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.file_util") leatherman_logging_line_numbers() endif() add_leatherman_library(src/directory.cc src/file.cc) add_leatherman_headers(inc/leatherman) add_leatherman_test(tests/file_utils_test.cc tests/directory_utils_test.cc tests/fixtures.cc) leatherman-1.4.2+dfsg/file_util/inc/000075500000000000000000000000001332360634000173005ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/inc/leatherman/000075500000000000000000000000001332360634000214205ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/inc/leatherman/file_util/000075500000000000000000000000001332360634000233745ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/inc/leatherman/file_util/directory.hpp000064400000000000000000000022421332360634000261110ustar00rootroot00000000000000/** * @file * Declares utility functions for enumerating directories. */ #pragma once #include #include namespace leatherman { namespace file_util { /** * Enumerates the files that match the given pattern in the given directory. * @param directory The directory to search for the files. * @param callback The callback to invoke when a matching file is found. * @param pattern The pattern to filter the file names by. If empty, all files are passed. */ void each_file(std::string const& directory, std::function const& callback, std::string const& pattern = {}); /** * Enumerates the subdirectories in the given directory. * @param directory The directory to search for the subdirectories. * @param callback The callback to invoke when a matching subdirectory is found. * @param pattern The pattern to filter the subdirectory names by. If empty, all subdirectories are passed. */ void each_subdirectory(std::string const& directory, std::function const& callback, std::string const& pattern = {}); }} // namespace leatherman::file_util leatherman-1.4.2+dfsg/file_util/inc/leatherman/file_util/file.hpp000064400000000000000000000065431332360634000250340ustar00rootroot00000000000000/** * @file * Declares utility functions for reading data from files. */ #pragma once #include #include #include #include #include namespace leatherman { namespace file_util { /** * Reads each line from the given file. * @param path The path to the file to read. * @param callback The callback function that is passed each line in the file. * @return Returns true if the file was opened successfully or false if it was not. */ bool each_line(std::string const& path, std::function callback); /** * Reads the entire contents of the given file into a string. * @param path The path of the file to read. * @return Returns the file contents as a string. */ std::string read(std::string const& path); /** * Reads the entire contents of the given file into a string. * @param path The path of the file to read. * @param contents The returned file contents. * @return Returns true if the contents were read or false if the file is not readable. */ bool read(std::string const& path, std::string& contents); /** *@return Returns true if the specified file exists and can * be read by the current process. */ bool file_readable(const std::string &file_path); /** * Writes content to a temporary file in the specified mode, then * renames the file to the desired path. If the file already exists, * its previous content will be deleted, so appending is not * possible. * @param text The content to be written * @param file_path The final destination and name of the file * @param mode The mode in which to write the file * * Throws an error in case it fails to open the file to write. */ void atomic_write_to_file(const std::string &text, const std::string &file_path, std::ios_base::openmode mode = std::ios::binary); /** * Writes content to a temporary file in the specified mode, then * renames the file to the desired path. If the file already exists, * its previous content will be deleted, so appending is not * possible. * @param text The content to be written * @param file_path The final destination and name of the file * @param perms The file permissions to apply to the file. * On Windows this only toggles read-only. * @param mode The mode in which to write the file * * Throws an error in case it fails to open the file to write. */ void atomic_write_to_file(const std::string &text, const std::string &file_path, boost::optional perms, std::ios_base::openmode mode); /** * Expands a leading tilde to the user's home directory * @return Returns the expanded path, or the original string * in case the expansion fails. */ std::string tilde_expand(std::string path); /** * @return Returns a shell-safe version of the path */ std::string shell_quote(std::string path); /** * @return Returns the home path for the current platform. */ std::string get_home_path(); }} // namespace leatherman::file_util leatherman-1.4.2+dfsg/file_util/src/000075500000000000000000000000001332360634000173165ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/src/directory.cc000064400000000000000000000030761332360634000216370ustar00rootroot00000000000000#include #include #include using namespace std; using namespace boost::filesystem; using namespace leatherman::util; namespace leatherman { namespace file_util { static void each(string const& directory, file_type type, function const& callback, string const& pattern) { boost::regex regex; if (!pattern.empty()) { regex = pattern; } // Attempt to iterate the directory boost::system::error_code ec; directory_iterator it = directory_iterator(directory, ec); if (ec) { return; } // Call the callback for any matching entries directory_iterator end; for (; it != end; ++it) { ec.clear(); auto status = it->status(ec); if (ec || (status.type() != type)) { continue; } if (regex.empty() || re_search(it->path().filename().string(), regex)) { if (!callback(it->path().string())) { break; } } } } void each_file(string const& directory, function const& callback, string const& pattern) { each(directory, regular_file, callback, pattern); } void each_subdirectory(string const& directory, function const& callback, string const& pattern) { each(directory, directory_file, callback, pattern); } }} // namespace leatherman::file_util leatherman-1.4.2+dfsg/file_util/src/file.cc000064400000000000000000000077701332360634000205570ustar00rootroot00000000000000#include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; namespace leatherman { namespace file_util { namespace boost_error = boost::system::errc; namespace boost_file = boost::filesystem; bool each_line(string const& path, function callback) { boost::nowide::ifstream in(path.c_str()); if (!in) { return false; } string line; while (getline(in, line)) { if (!callback(line)) { break; } } return true; } string read(string const& path) { string contents; if (!read(path, contents)) { return {}; } return contents; } bool read(string const& path, string& contents) { boost::nowide::ifstream in(path.c_str(), ios::in | ios::binary); ostringstream buffer; if (!in) { return false; } buffer << in.rdbuf(); contents = buffer.str(); return true; } bool file_readable(const std::string &file_path) { bool exists { false }; if (file_path.empty()) { LOG_WARNING("file path is an empty string"); } else { boost::system::error_code ec; boost_file::file_status status = boost_file::status(file_path.c_str(), ec); if (boost_file::exists(status) && !boost_file::is_directory(status)) { boost::nowide::ifstream file_stream(file_path.c_str()); exists = file_stream.good(); file_stream.close(); } else { LOG_DEBUG("Error reading file: {1}", ec.message()); exists = false; } } return exists; } void atomic_write_to_file(const std::string &text, const std::string &file_path, std::ios_base::openmode mode) { atomic_write_to_file(text, file_path, {}, mode); } void atomic_write_to_file(const std::string &text, const std::string &file_path, boost::optional perms, std::ios_base::openmode mode) { boost::nowide::ofstream ofs; std::string tmp_name = file_path + "~"; ofs.open(tmp_name.c_str(), mode); if (!ofs.is_open()) { throw boost_file::filesystem_error { _("failed to open {1}", file_path), boost_error::make_error_code( boost_error::io_error) }; } if (perms) { boost_file::permissions(tmp_name, *perms); } ofs << text; ofs.close(); boost_file::rename(tmp_name.data(), file_path.data()); } std::string tilde_expand(std::string path) { if (path[0] == '~' && (path.size() == 1 || path[1] == '/')) { auto result = get_home_path(); result.append(path.begin() + 1, path.end()); return result; } return path; } std::string shell_quote(std::string path) { std::stringstream ss; ss << boost::io::quoted(path); return ss.str(); } std::string get_home_path() { #ifdef _WIN32 auto home_var = "USERPROFILE"; auto result = boost::nowide::getenv(home_var); #else auto home_var = "HOME"; auto result = boost::nowide::getenv(home_var); #endif if (result){ return result; } else { LOG_WARNING("{1} has not been set", home_var); return ""; } } }} // namespace leatherman::file_util leatherman-1.4.2+dfsg/file_util/tests/000075500000000000000000000000001332360634000176715ustar00rootroot00000000000000leatherman-1.4.2+dfsg/file_util/tests/directory_utils_test.cc000064400000000000000000000057441332360634000244750ustar00rootroot00000000000000#include #include #include #include "fixtures.hpp" #include using namespace leatherman::file_util; TEST_CASE("file_util::each_file", "[utils]") { temp_directory directory; atomic_write_to_file("1\n", directory.get_dir_name() + "/test1"); atomic_write_to_file("2\n", directory.get_dir_name() + "/test2"); atomic_write_to_file("3\n", directory.get_dir_name() + "/test3"); SECTION("each file should be visited") { std::set file_contents; each_file(directory.get_dir_name(), [&file_contents](std::string const &path) { file_contents.insert(read(path)); return true; }); REQUIRE(file_contents.size() == 3u); REQUIRE(file_contents.find("1\n") != file_contents.end()); REQUIRE(file_contents.find("2\n") != file_contents.end()); REQUIRE(file_contents.find("3\n") != file_contents.end()); } SECTION("can find a file to match a pattern") { std::string content = "N/A"; each_file(directory.get_dir_name(), [&content](std::string const &path) { return read(path, content); }, "[0-1]"); REQUIRE(content == "1\n"); } SECTION("only one file returned from false callback"){ int count = 0; each_file(directory.get_dir_name(), [&count](std::string const& path){ count++; return false; }); REQUIRE(count == 1); } } TEST_CASE("file_util::each_subdirectory", "[utils]") { temp_directory directory; boost::filesystem::create_directory(directory.get_dir_name() + "/test1"); atomic_write_to_file("1", directory.get_dir_name() + "/test1/t1"); boost::filesystem::create_directory(directory.get_dir_name() + "/test2"); atomic_write_to_file("2a", directory.get_dir_name() + "/test2/t2a"); atomic_write_to_file("2b", directory.get_dir_name() + "/test2/t2b"); SECTION("each subdirectory should be visited") { int counter = 0; each_subdirectory(directory.get_dir_name(), [&counter](std::string const &path) { each_file(path, [&counter](std::string const &file) { counter++; return true; }); return true; }); REQUIRE(counter == 3); } SECTION("can find directories that match a pattern") { int counter = 0; each_subdirectory(directory.get_dir_name(), [&counter](std::string const &path) { each_file(path, [&counter](std::string const &file) { counter++; return true; }); return true; }, "[2-3]"); REQUIRE(counter == 2); } SECTION("only one directory found from false callback"){ int count = 0; each_subdirectory(directory.get_dir_name(), [&count](std::string const& path){ count++; return false; }); REQUIRE(count == 1); } } leatherman-1.4.2+dfsg/file_util/tests/file_utils_test.cc000064400000000000000000000136161332360634000234050ustar00rootroot00000000000000#include #include #include "fixtures.hpp" #include #ifdef _WIN32 #include #endif using namespace leatherman::file_util; TEST_CASE("file_util::tilde_expand", "[utils]") { #ifdef _WIN32 _putenv("USERPROFILE=/testhome"); #else setenv("HOME", "/testhome", 1); #endif SECTION("empty path should be empty") { REQUIRE(tilde_expand("") == ""); } SECTION("spaces should be preserved") { REQUIRE(tilde_expand("i like spaces") == "i like spaces"); } SECTION("should expand using environment variable") { CHECK(tilde_expand("~") == "/testhome"); CHECK(tilde_expand("~/") == "/testhome/"); CHECK(tilde_expand("~/foo") == "/testhome/foo"); } SECTION("only a ~ at the start") { REQUIRE(tilde_expand("/foo/bar~") == "/foo/bar~"); } SECTION("~baz/foo does not expand") { REQUIRE(tilde_expand("~baz/foo") == "~baz/foo"); } SECTION("it should expand the home directory path") { REQUIRE(tilde_expand("~/foo") != "~/foo"); } SECTION("it should not expand the working directory path") { REQUIRE(tilde_expand("./foo") == "./foo"); } auto home_path = get_home_path(); SECTION("it should expand ~ to the HOME env var") { REQUIRE(tilde_expand("~") == home_path); } SECTION("it should expand ~ as the base directory") { std::string expected_path{home_path + "/spam"}; std::string expanded_path{tilde_expand("~/spam")}; REQUIRE(expanded_path == expected_path); } } TEST_CASE("shell_quote", "[utils]") { SECTION("empty string") { REQUIRE(shell_quote("") == "\"\""); } SECTION("single word") { REQUIRE(shell_quote("plain") == "\"plain\""); } SECTION("words separated by space") { REQUIRE(shell_quote("a space") == "\"a space\""); } SECTION("exclamation mark") { REQUIRE(shell_quote("!csh") == "\"!csh\""); } SECTION("single quote before expression") { REQUIRE(shell_quote("'open quote") == "\"'open quote\""); } SECTION("single quote after expression") { REQUIRE(shell_quote("close quote'") == "\"close quote'\""); } SECTION("double quote before expression") { REQUIRE(shell_quote("\"open doublequote") == "\"\\\"open doublequote\""); } SECTION("double quote after expression") { REQUIRE(shell_quote("close doublequote\"") == "\"close doublequote\\\"\""); } } TEST_CASE("lth_file::file_readable", "[utils]") { SECTION("it can check that a file does not exist") { auto file_path = unique_fixture_path().string(); CAPTURE(file_path); REQUIRE_FALSE(file_readable(file_path)); } SECTION("directories are not readable") { temp_directory dir_path; REQUIRE_FALSE(file_readable(dir_path.get_dir_name())); } } TEST_CASE("lth_file::atomic_write_to_file", "[utils]") { SECTION("it can write to a regular file, ensure it exists, and delete it") { auto file_path = unique_fixture_path().string(); REQUIRE_FALSE(file_readable(file_path)); atomic_write_to_file("test\n", file_path); REQUIRE(file_readable(file_path)); boost::filesystem::remove(file_path); REQUIRE_FALSE(file_readable(file_path)); } SECTION("can write to an existing file") { temp_file file("existing file"); REQUIRE(file_readable(file.get_file_name())); atomic_write_to_file("test", file.get_file_name()); REQUIRE(file_readable(file.get_file_name())); REQUIRE(read(file.get_file_name()) == "test"); } #ifndef _WIN32 SECTION("can write a file with permissions") { auto file_path = unique_fixture_path().string(); REQUIRE_FALSE(file_readable(file_path)); auto perms = boost::filesystem::owner_read | boost::filesystem::owner_write; atomic_write_to_file("test\n", file_path, perms, std::ios::binary); REQUIRE(file_readable(file_path)); auto stat = boost::filesystem::status(file_path); REQUIRE(stat.permissions() == perms); boost::filesystem::remove(file_path); REQUIRE_FALSE(file_readable(file_path)); } #endif } TEST_CASE("file_util::read", "[utils]") { SECTION("trying to read a nonexistent file returns the empty string"){ std::string contents; REQUIRE(read("does_not_exist") == ""); REQUIRE_FALSE(read("does_not_exist", contents)); REQUIRE(contents.empty()); } SECTION("it can read from a file") { auto file_path = unique_fixture_path().string(); atomic_write_to_file("test\n", file_path); REQUIRE(file_readable(file_path)); std::string contents; REQUIRE(read(file_path, contents)); REQUIRE(contents == "test\n"); REQUIRE(read(file_path) == "test\n"); boost::filesystem::remove(file_path); } } TEST_CASE("file_util::each_line", "[utils]") { SECTION("trying to read a nonexistent file returns false") { REQUIRE_FALSE(each_line("does_not_exist", [](std::string &line) { FAIL("should not be called"); return true; })); } SECTION("an action is performed on each line of a file") { temp_file file("test1\ntest2\ntest3\n"); int i = 0; REQUIRE(each_line(file.get_file_name(), [&i](std::string const &line) { i++; return line == ("test" + std::to_string(i)); })); REQUIRE(i == 3); } SECTION("a callback that returns false stops at the first line"){ temp_file file("test1\ntest2\ntest3\n"); std::vector lines; REQUIRE(each_line(file.get_file_name(), [&](std::string& line) { lines.emplace_back(move(line)); return false; })); REQUIRE(lines.size() == 1u); REQUIRE(lines[0] == "test1"); } } leatherman-1.4.2+dfsg/file_util/tests/fixtures.cc000064400000000000000000000016431332360634000220550ustar00rootroot00000000000000#include "fixtures.hpp" #include #include temp_directory::temp_directory() { auto unique_path = unique_fixture_path(); dir_name = unique_path.string(); boost::filesystem::create_directory(unique_path); } temp_directory::~temp_directory() { boost::filesystem::remove_all(dir_name); } std::string const& temp_directory::get_dir_name() const { return dir_name; } temp_file::temp_file(const std::string &content) { auto unique_path = unique_fixture_path(); file_name = unique_path.string(); leatherman::file_util::atomic_write_to_file(content, file_name); } temp_file::~temp_file() { boost::filesystem::remove(file_name); } std::string const& temp_file::get_file_name() const { return file_name; } boost::filesystem::path unique_fixture_path() { return boost::filesystem::unique_path("file_util_fixture_%%%%-%%%%-%%%%-%%%%"); }leatherman-1.4.2+dfsg/file_util/tests/fixtures.hpp000064400000000000000000000013431332360634000222540ustar00rootroot00000000000000#pragma once #include #include /** * Class to create a temporary directory with a unique name * and destroy it once it is no longer needed. * */ class temp_directory { public: temp_directory(); ~temp_directory(); std::string const& get_dir_name() const; private: std::string dir_name; }; /** * Class to create a temporary file with a unique name and * destroy it once it is no longer needed. */ class temp_file { public: temp_file(std::string const& content); ~temp_file(); std::string const& get_file_name() const; private: std::string file_name; }; /** Generates a unique string for use as a file path. */ boost::filesystem::path unique_fixture_path();leatherman-1.4.2+dfsg/json_container/000075500000000000000000000000001332360634000175665ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/CMakeLists.txt000064400000000000000000000004731332360634000223320ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex) add_leatherman_deps("${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(locale) add_leatherman_library("src/json_container.cc") add_leatherman_headers("inc/leatherman") add_leatherman_test("tests/json_container_test.cc")leatherman-1.4.2+dfsg/json_container/README.md000064400000000000000000000073101332360634000210460ustar00rootroot00000000000000## JsonContainer The JsonContainer class provides a simplified abstraction around complex JSON C++ libraries. It has the following constructors: JsonContainer() // Creates an empty container JsonContainer(std::string json_txt) // creates a JsonContainer from a JSON string Consider the following JSON string wrapped in a JsonContainer object, data. ``` { "module" : "puppet", "action" : "run", "params" : { "first" : "--module-path=/home/alice/modules" } } ``` You can construct a JsonContainer as follows: ``` JsonContainer data { jsons_string }; ``` The JsonContainer's constructor can throw the following exception: - data_parse_error - This error is thrown when invalid JSON is passed to the constructor. Note that you can instantiate JsonContainer by passing a non-empty `std::string` representing a JSON object, array, or value. In case you want to pass a JSON string, you must include escaped double quotes. For example: ``` JsonContainer { "4" }; // a number JsonContainer { "\"fast'n bulbous\"" }; // a string JsonContainer { "null" }; // the null value JsonContainer { "" }; // JsonContainer will throw a data_parse_error! ``` The following calls to the _get_ method will retrieve values from the JsonContainer. ``` data.get("module"); // == "puppet" data.get({ "params", "first" }); // == "--module-path=/home/alice/modules" ``` Note that when the _get_ method is invoked with an initialiser list it will use each argument to descend a level into the object tree. Also, when an unsigned integer is provided as index, _get_ will verify that the specified in an array and return its requested element. The _get_ method can throw the following exception: - data_key_error - Thrown when the specified entry does not exist. - data_type_error - Thrown when an index is provided but the parent element is not an array. - data_index_error - Thrown when the provided index is out of bounds. The supported scalar types are: int, double, bool, std::string, and JsonContainer. Elements of such types can be grouped in an array, represented by a std::vector instance. You can also set the value of fields and create new fields with the _set_ method. ``` data.set("foo", 42); data.set({ "params", "second" }, false); ``` This will change the internal JSON representation to ``` { "module" : "puppet", "action" : "run", "params" : { "first" : "--module-path=/home/alice/modules", "second" : false }, "foo" : 42 } ``` Note that the _set_ method uses the initialiser list in the same way as the _get_ method. Each argument to the list is one level to descend. The _set_ method can throw the following exception: - data_key_error - thrown when a nested message key is invalid (i.e. the associated value is not a valid JSON object, so that is not possible to iterate the remaining nested keys) or when the root element is not a valid JSON object, so that is not possible to set the specified key-value entry. You can use the _type_ method to retrieve the type of a given value. As done for _get_ and _set_, you can specify the value's key with an initialiser list, in order to navigate multiple levels within a JSON object. The _type_ method returns a value of the DataType enumeration, defined as: ``` enum DataType { Object, Array, String, Int, Bool, Double, Null }; ``` As for the _get_ method, _type_ can throw the following exceptions: - data_key_error - Thrown when the specified entry does not exist. - data_type_error - Thrown when an index is provided but the parent element is not an array. - data_index_error - Thrown when the provided index is out of bounds. leatherman-1.4.2+dfsg/json_container/inc/000075500000000000000000000000001332360634000203375ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/inc/leatherman/000075500000000000000000000000001332360634000224575ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/inc/leatherman/json_container/000075500000000000000000000000001332360634000254725ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/inc/leatherman/json_container/json_container.hpp000064400000000000000000000407021332360634000312210ustar00rootroot00000000000000#pragma once #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; // Forward declarations for rapidjson namespace rapidjson { class CrtAllocator; template class GenericValue; template struct UTF8; template class GenericDocument; } // namespace rapidjson namespace leatherman { namespace json_container { // Constants constexpr size_t DEFAULT_LEFT_PADDING { 4 }; // Errors /// Parent error class. class data_error : public std::runtime_error { public: explicit data_error(std::string const& msg) : std::runtime_error(msg) {} }; /// Error thrown when trying to parse an invalid JSON string. class data_parse_error : public data_error { public: explicit data_parse_error(std::string const& msg) : data_error(msg) {} }; /// Error due to an operation involving a key. class data_key_error : public data_error { public: explicit data_key_error(std::string const& msg) : data_error(msg) {} }; /// Error due to an operation involving an array index. class data_index_error : public data_error { public: explicit data_index_error(std::string const& msg) : data_error(msg) {} }; /// Error due to wrongly specified type. class data_type_error : public data_error { public: explicit data_type_error(std::string const& msg) : data_error(msg) {} }; // Types enum DataType { Object, Array, String, Int, Bool, Double, Null }; struct JsonContainerKey : public std::string { JsonContainerKey(const std::string& value) : std::string(value) {} JsonContainerKey(const char* value) : std::string(value) {} JsonContainerKey(std::initializer_list il) = delete; }; /** * Typedef for RapidJSON allocator. */ using json_allocator = rapidjson::CrtAllocator; /** * Typedef for RapidJSON value. */ using json_value = rapidjson::GenericValue, json_allocator>; /** * Typedef for RapidJSON document. */ using json_document = rapidjson::GenericDocument, json_allocator, json_allocator>; // Usage: // // SUPPORTED SCALARS: // int, float, double, bool, std::string, nullptr // // To set a key to a scalar value in object x // x.set("foo", 1); // x.set(foo", "bar"); // // To set a nested key to a scalar value in object x // x.set({ "foo", "bar", "baz" }, true); // // To set a key to a vector value in object x // std::vector tmp { 1, 2, 3 }; // x.set>("foo", tmp); // // To get a scalar value from a key in object x // x.get("foo"); // x.get("bar"); // // To get a vector from a key in object x // x.get>("foo"); // // To get the int entry with index i from the array a in object x // x.get("a", 1); // // To get a result object (json object) from object x // x.get("foo"); // // To get a null value from a key in object x // x.get("foo") == ""; // x.get("foo") == 0; // x.get("foo") == false; // x.get("foo") == 0.0f; // x.get("foo") == 0.0; // // To get a json string representation of object x // x.toString(); // // To check if a key is set in object x // x.includes("foo"); // x.includes({ "foo", "bar", "baz" }); class JsonContainer { public: JsonContainer(); explicit JsonContainer(const std::string& json_txt); explicit JsonContainer(const json_value& value); JsonContainer(const JsonContainer& data); JsonContainer(const JsonContainer&& data); JsonContainer& operator=(JsonContainer other); ~JsonContainer(); const json_document& getRaw() const; std::string toString() const; /// Throw a data_key_error in case the specified key is unknown. std::string toString(const JsonContainerKey& key) const; /// Throw a data_key_error in case the specified key is unknown. std::string toString(const std::vector& keys) const; // TODO: Refactor this to use a default parameter of DEFAULT_LEFT_PADDING // for left_padding to remove the empty-argument variant. std::string toPrettyString(size_t left_padding) const; std::string toPrettyString() const; std::string toPrettyJson(size_t left_padding = DEFAULT_LEFT_PADDING) const; /// Return true if the root is an empty JSON array or an empty /// JSON object, false otherwise. bool empty() const; /// Return the number of entries of the root element in case /// is an object or array; returns 0 in case of a scalar size_t size() const; /// Return the number of entries of the specified element; /// returns 0 in case it's scalar /// Throw a data_key_error in case the specified key is unknown. size_t size(const JsonContainerKey& key) const; /// Return the number of entries of the specified element; /// return 0 in case it's scalar /// Throw a data_key_error in case of unknown keys. size_t size(const std::vector& keys) const; /// In case the root entry is an object, returns its keys, /// otherwise an empty vector. std::vector keys() const; /// Whether the specified entry exists. bool includes(const JsonContainerKey& key) const; /// Whether the specified entry exists. bool includes(const std::vector& keys) const; DataType type() const; /// Throw a data_key_error in case the specified key is unknown. DataType type(const JsonContainerKey& key) const; /// Throw a data_key_error in case of unknown keys. DataType type(const std::vector& keys) const; /// Throw a data_type_error in case the root entry is not an array. /// Throw a data_index_error in case the index is out of bounds. DataType type(const size_t idx) const; /// Throw a data_key_error in case the specified key is unknown. /// Throw a data_type_error in case the specified entry is not an array. /// Throw a data_index_error in case the index is out of bound. DataType type(const JsonContainerKey& key, const size_t idx) const; /// Throw a data_key_error in case of unknown keys. /// Throw a data_type_error in case the specified entry is not an array. /// Throw a data_index_error in case the index is out of bound. DataType type(const std::vector& keys, const size_t idx) const; /// Return the value of the root entry. /// Throw a data_type_error in case the type of the root entry /// does not match the specified one. template T get() const { return getValue(*getValueInJson()); } /// Return the value of the specified entry of the root object. /// Throw a data_key_error in case the entry does not exist. /// Throw a data_type_error in case the type T doesn't match /// the specified one. template T get(const JsonContainerKey& key) const { return getValue(*getValueInJson(std::vector { key })); } /// Return the value of the specified nested entry. /// Throw a data_key_error in case the entry does not exist. /// Throw a data_type_error in case the type T doesn't match /// the specified one. template T get(std::vector keys) const { return getValue(*getValueInJson(keys)); } /// Return the indexed value of root array. /// Throw a data_index_error in case the index is out of bound. /// Throw a data_type_error in case the type T doesn't match /// the one of the specified value or if the root entry is not /// an array. template T get(const size_t idx) const { return getValue(*getValueInJson(std::vector {}, true, idx)); } /// Return the indexed value of the specified array entry. /// Throw a data_key_error in case the array entry is unknown. /// Throw a data_index_error in case the index is out of bound. /// Throw a data_type_error in case the type T doesn't match /// the one of the specified entry or in case the specified /// entry is not an array. template T get(const JsonContainerKey& key, const size_t idx) const { return getValue(*getValueInJson(std::vector { key }, true, idx)); } /// Return the indexed value of the specified nested array /// entry. /// Throw a data_key_error in case the array entry is unknown. /// Throw a data_index_error in case the index is out of bound. /// Throw a data_type_error in case the type T doesn't match /// the one of the specified entry or in case the specified /// entry is not an array. template T get(std::vector keys, const size_t idx) const { return getValue(*getValueInJson(keys, true, idx)); } /// Return the value of the specified entry of the root object, /// or default_value if the entry doesn't exist. /// Throw a data_type_error in case the type T doesn't match /// the specified one or if the root entry is not an object. template T getWithDefault(const JsonContainerKey& key, const T default_value) const { auto jval = getValueInJson(); auto key_data = key.data(); if (!isObject(*jval)) { throw data_type_error { _("not an object") }; } if (!hasKey(*jval, key_data)) { return default_value; } return getValue(*getValueInJson(*jval, key_data)); } /// Return the value of the specified nested entry or /// default_value if the entry doesn't exist but its parent is /// an object. /// Throw a data_type_error in case the type T doesn't match /// the specified one or in case the parent of the specified /// entry is not an object. template T getWithDefault(const std::vector& keys, const T& default_value) const { auto key_data = keys.back().data(); auto jval_obj = getValueInJson(keys.cbegin(), keys.cend()-1); if (!isObject(*jval_obj)) { throw data_type_error { _("not an object") }; } if (!hasKey(*jval_obj, key_data)) { return default_value; } return getValue(*getValueInJson(*jval_obj, key_data)); } /// Throw a data_key_error in case the root is not a valid JSON /// object, so that is not possible to set the entry. template void set(const JsonContainerKey& key, T value) { auto jval = getValueInJson(); auto key_data = key.data(); if (!isObject(*jval)) { throw data_key_error { _("root is not a valid JSON object") }; } if (!hasKey(*jval, key_data)) { createKeyInJson(key_data, *jval); } setValue(*getValueInJson(*jval, key_data), value); } /// Throw a data_key_error if a known nested key is not associated /// with a valid JSON object, so that it is not possible to /// iterate the remaining keys. template void set(std::vector keys, T value) { auto jval = getValueInJson(); for (const auto& key : keys) { const char* key_data = key.data(); if (!isObject(*jval)) { throw data_key_error { _("invalid key supplied; cannot navigate the provided path") }; } if (!hasKey(*jval, key_data)) { createKeyInJson(key_data, *jval); } jval = getValueInJson(*jval, key_data); } setValue(*jval, value); } private: std::unique_ptr document_root_; size_t getSize(const json_value& jval) const; DataType getValueType(const json_value& jval) const; bool hasKey(const json_value& jval, const char* key) const; // NOTE(ale): we cant' use json_value::IsObject directly // since we have forward declarations for rapidjson; otherwise // we would have an implicit template instantiation error bool isObject(const json_value& jval) const; // Root object entry accessor // Throws a data_type_error in case the specified value is not // an object. // Throws a data_key_error or if the key is unknown. json_value* getValueInJson(const json_value& jval, const char* key) const; // Root array entry accessor // Throws a data_type_error in case the specified value is not // an array. // Throws a data_index_error in case the arraye index is out // of bounds. json_value* getValueInJson(const json_value& jval, const size_t& idx) const; // Generic entry accessor // In case any key is specified, throws a data_type_error if // the specified entry is not an object; throws a // data_key_error or if the key is unknown. // In case an array element is specified, throws a // data_index_error if the index is out of bounds. json_value* getValueInJson( std::vector::const_iterator begin, std::vector::const_iterator end, const bool is_array = false, const size_t idx = 0) const; // Generic entry accessor // In case any key is specified, throws a data_type_error if // the specified entry is not an object; throws a // data_key_error or if the key is unknown. // In case an array element is specified, throws a // data_index_error if the index is out of bounds. json_value* getValueInJson( const std::vector& keys = std::vector {}, const bool is_array = false, const size_t idx = 0) const { return getValueInJson(keys.cbegin(), keys.cend(), is_array, idx); } void createKeyInJson(const char* key, json_value& jval); template T getValue(const json_value& value) const; template void setValue(json_value& jval, T new_value); }; template<> void JsonContainer::setValue<>(json_value& jval, const std::string& new_value); template<> void JsonContainer::setValue<>(json_value& jval, const char* new_value); template<> void JsonContainer::setValue<>(json_value& jval, bool new_value); template<> void JsonContainer::setValue<>(json_value& jval, int new_value); template<> void JsonContainer::setValue<>(json_value& jval, double new_value); template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value); template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value); template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value); template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value); template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value); template<> void JsonContainer::setValue<>(json_value& jval, JsonContainer new_value); }} // namespace leatherman::json_container leatherman-1.4.2+dfsg/json_container/src/000075500000000000000000000000001332360634000203555ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/src/json_container.cc000064400000000000000000000465671332360634000237210ustar00rootroot00000000000000#include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; namespace leatherman { namespace json_container { const size_t LEFT_PADDING_INCREMENT { 2 }; // // free functions // std::string valueToString(const json_value& jval) { rapidjson::StringBuffer buffer; rapidjson::Writer writer { buffer }; jval.Accept(writer); return buffer.GetString(); } // // public interface // JsonContainer::JsonContainer() : document_root_ { new json_document() } { document_root_->SetObject(); } JsonContainer::JsonContainer(const std::string& json_text) : JsonContainer() { document_root_->Parse(json_text.data()); if (document_root_->HasParseError()) { throw data_parse_error { _("invalid json") }; } } JsonContainer::JsonContainer(const json_value& value) : JsonContainer() { // Because rapidjson disallows the use of copy constructors we pass // the json by const reference and recreate it by explicitly copying document_root_->CopyFrom(value, document_root_->GetAllocator()); } JsonContainer::JsonContainer(const JsonContainer& data) : JsonContainer(){ document_root_->CopyFrom(*data.document_root_, document_root_->GetAllocator()); } JsonContainer::JsonContainer(const JsonContainer&& data) : JsonContainer() { document_root_->CopyFrom(*data.document_root_, document_root_->GetAllocator()); } JsonContainer& JsonContainer::operator=(JsonContainer other) { std::swap(document_root_, other.document_root_); return *this; } // unique_ptr requires a complete type at time of destruction. this forces us to // either have an empty destructor or use a shared_ptr instead. JsonContainer::~JsonContainer() {} // representation const json_document& JsonContainer::getRaw() const { return *document_root_; } std::string JsonContainer::toString() const { return valueToString(*document_root_); } std::string JsonContainer::toString(const JsonContainerKey& key) const { auto jval = getValueInJson({ key }); return valueToString(*jval); } std::string JsonContainer::toString(const std::vector& keys) const { auto jval = getValueInJson(keys); return valueToString(*jval); } std::string JsonContainer::toPrettyString(size_t left_padding) const { if (empty()) { switch (type()) { case DataType::Object: return "{}"; case DataType::Array: return "[]"; default: return "\"\""; } } std::string formatted_data {}; if (type() == DataType::Object) { for (const auto& key : keys()) { formatted_data += std::string(left_padding, ' '); formatted_data += key + " : "; switch (type(key)) { case DataType::Object: // Inner object: add new line, increment padding formatted_data += "\n"; formatted_data += get(key).toPrettyString( left_padding + LEFT_PADDING_INCREMENT); break; case DataType::Array: // Array: add raw string, regardless of its items formatted_data += toString(key); break; case DataType::String: formatted_data += get(key); break; case DataType::Int: formatted_data += std::to_string(get(key)); break; case DataType::Bool: if (get(key)) { formatted_data += "true"; } else { formatted_data += "false"; } break; case DataType::Double: formatted_data += std::to_string(get(key)); break; default: formatted_data += "NULL"; } formatted_data += "\n"; } } else { formatted_data += toString(); } return formatted_data; } std::string JsonContainer::toPrettyString() const { return toPrettyString(DEFAULT_LEFT_PADDING); } std::string JsonContainer::toPrettyJson(size_t left_padding) const { rapidjson::StringBuffer buffer; rapidjson::PrettyWriter writer { buffer }; writer.SetIndent(' ', left_padding); auto& jval = *getValueInJson(); jval.Accept(writer); return buffer.GetString(); } // capacity bool JsonContainer::empty() const { auto jval = getValueInJson(); auto data_type = getValueType(*jval); if (data_type == DataType::Object) { return jval->ObjectEmpty(); } else if (data_type == DataType::Array) { return jval->Empty(); } else { return false; } } size_t JsonContainer::size() const { auto jval = getValueInJson(); return getSize(*jval); } size_t JsonContainer::size(const JsonContainerKey& key) const { auto jval = getValueInJson({ key }); return getSize(*jval); } size_t JsonContainer::size(const std::vector& keys) const { auto jval = getValueInJson(keys); return getSize(*jval); } // keys std::vector JsonContainer::keys() const { std::vector k; auto jval = getValueInJson(); if (jval->IsObject()) { for (json_value::ConstMemberIterator itr = jval->MemberBegin(); itr != jval->MemberEnd(); ++itr) { k.emplace_back(itr->name.GetString(), itr->name.GetStringLength()); } } // Return an empty vector if the document type isn't an object return k; } // includes bool JsonContainer::includes(const JsonContainerKey& key) const { auto jval = getValueInJson(); if (hasKey(*jval, key.data())) { return true; } else { return false; } } bool JsonContainer::includes(const std::vector& keys) const { auto jval = getValueInJson(); for (const auto& key : keys) { if (!hasKey(*jval, key.data())) { return false; } jval = getValueInJson(*jval, key.data()); } return true; } // type DataType JsonContainer::type() const { auto jval = getValueInJson(); return getValueType(*jval); } DataType JsonContainer::type(const JsonContainerKey& key) const { auto jval = getValueInJson({ key }); return getValueType(*jval); } DataType JsonContainer::type(const std::vector& keys) const { auto jval = getValueInJson(keys); return getValueType(*jval); } DataType JsonContainer::type(const size_t idx) const { auto jval = getValueInJson(std::vector {}, true, idx); return getValueType(*jval); } DataType JsonContainer::type(const JsonContainerKey& key, const size_t idx) const { auto jval = getValueInJson({ key }, true, idx); return getValueType(*jval); } DataType JsonContainer::type(const std::vector& keys, const size_t idx) const { auto jval = getValueInJson(keys, true, idx); return getValueType(*jval); } // // Private functions // size_t JsonContainer::getSize(const json_value& jval) const { switch (getValueType(jval)) { case DataType::Array: return jval.Size(); case DataType::Object: return jval.MemberCount(); default: return 0; } } DataType JsonContainer::getValueType(const json_value& jval) const { switch (jval.GetType()) { case rapidjson::Type::kNullType: return DataType::Null; case rapidjson::Type::kFalseType: return DataType::Bool; case rapidjson::Type::kTrueType: return DataType::Bool; case rapidjson::Type::kObjectType: return DataType::Object; case rapidjson::Type::kArrayType: return DataType::Array; case rapidjson::Type::kStringType: return DataType::String; case rapidjson::Type::kNumberType: if (jval.IsDouble()) { return DataType::Double; } else { return DataType::Int; } default: // This is unexpected as for rapidjson docs return DataType::Null; } } // Internal key / index manipulation methods bool JsonContainer::hasKey(const json_value& jval, const char* key) const { return (jval.IsObject() && jval.HasMember(key)); } bool JsonContainer::isObject(const json_value& jval) const { return jval.IsObject(); } json_value* JsonContainer::getValueInJson(const json_value& jval, const char* key) const { if (!jval.IsObject()) { throw data_type_error { _("not an object") }; } if (!jval.HasMember(key)) { throw data_key_error { _("unknown object entry with key: {1}", key) }; } return const_cast(&jval[key]); } json_value* JsonContainer::getValueInJson(const json_value& jval, const size_t& idx) const { if (getValueType(jval) != DataType::Array) { throw data_type_error { _("not an array") }; } if (idx >= jval.Size()) { throw data_index_error { _("array index out of bounds") }; } return const_cast(&jval[idx]); } json_value* JsonContainer::getValueInJson(std::vector::const_iterator begin, std::vector::const_iterator end, const bool is_array, const size_t idx) const { auto jval = dynamic_cast(document_root_.get()); for (auto it = begin; it != end; ++it) { jval = getValueInJson(*jval, it->data()); } if (is_array) { jval = getValueInJson(*jval, idx); } return jval; } void JsonContainer::createKeyInJson(const char* key, json_value& jval) { jval.AddMember(json_value(key, document_root_->GetAllocator()).Move(), json_value(rapidjson::kObjectType).Move(), document_root_->GetAllocator()); } // getValue specialisations template<> int JsonContainer::getValue<>(const json_value& value) const { if (value.IsNull()) { return 0; } if (!value.IsInt()) { throw data_type_error { _("not an integer") }; } return value.GetInt(); } template<> bool JsonContainer::getValue<>(const json_value& value) const { if (value.IsNull()) { return false; } if (!value.IsBool()) { throw data_type_error { _("not a boolean") }; } return value.GetBool(); } template<> std::string JsonContainer::getValue<>(const json_value& value) const { if (value.IsNull()) { return ""; } if (!value.IsString()) { throw data_type_error { _("not a string") }; } return std::string(value.GetString(), value.GetStringLength()); } template<> double JsonContainer::getValue<>(const json_value& value) const { if (value.IsNull()) { return 0.0; } if (!value.IsDouble()) { throw data_type_error { _("not a double") }; } return value.GetDouble(); } template<> JsonContainer JsonContainer::getValue<>(const json_value& value) const { if (value.IsNull()) { JsonContainer container {}; return container; } // HERE(ale): we don't do any type check // rvalue return (implicitly) JsonContainer container { value }; return container; } template<> json_value JsonContainer::getValue<>(const json_value& value) const { JsonContainer* tmp_this = const_cast(this); json_value v { value, tmp_this->document_root_->GetAllocator() }; return v; } template<> std::vector JsonContainer::getValue<>(const json_value& value) const { std::vector tmp {}; if (value.IsNull()) { return tmp; } if (!value.IsArray()) { throw data_type_error { _("not an array") }; } for (json_value::ConstValueIterator itr = value.Begin(); itr != value.End(); itr++) { if (!itr->IsString()) { throw data_type_error { _("not a string") }; } tmp.emplace_back(itr->GetString(), itr->GetStringLength()); } return tmp; } template<> std::vector JsonContainer::getValue<>(const json_value& value) const { std::vector tmp {}; if (value.IsNull()) { return tmp; } if (!value.IsArray()) { throw data_type_error { _("not an array") }; } for (json_value::ConstValueIterator itr = value.Begin(); itr != value.End(); itr++) { if (!itr->IsBool()) { throw data_type_error { _("not a boolean") }; } tmp.push_back(itr->GetBool()); } return tmp; } template<> std::vector JsonContainer::getValue<>(const json_value& value) const { std::vector tmp {}; if (value.IsNull()) { return tmp; } if (!value.IsArray()) { throw data_type_error { _("not an array") }; } for (json_value::ConstValueIterator itr = value.Begin(); itr != value.End(); itr++) { if (!itr->IsInt()) { throw data_type_error { _("not an integer") }; } tmp.push_back(itr->GetInt()); } return tmp; } template<> std::vector JsonContainer::getValue<>(const json_value& value) const { std::vector tmp {}; if (value.IsNull()) { return tmp; } if (!value.IsArray()) { throw data_type_error { _("not an array") }; } for (json_value::ConstValueIterator itr = value.Begin(); itr != value.End(); itr++) { if (!itr->IsDouble()) { throw data_type_error { _("not a double") }; } tmp.push_back(itr->GetDouble()); } return tmp; } template<> std::vector JsonContainer::getValue<>(const json_value& value) const { std::vector tmp {}; if (value.IsNull()) { return tmp; } if (!value.IsArray()) { throw data_type_error { _("not an array") }; } for (json_value::ConstValueIterator itr = value.Begin(); itr != value.End(); itr++) { if (!itr->IsObject()) { throw data_type_error { _("not an object") }; } JsonContainer* tmp_this = const_cast(this); const json_value tmpvalue(*itr, tmp_this->document_root_->GetAllocator()); JsonContainer tmp_data { tmpvalue }; tmp.push_back(tmp_data); } return tmp; } // setValue specialisations template<> void JsonContainer::setValue<>(json_value& jval, bool new_value) { jval.SetBool(new_value); } template<> void JsonContainer::setValue<>(json_value& jval, int new_value) { jval.SetInt(new_value); } template<> void JsonContainer::setValue<>(json_value& jval, const std::string new_value) { jval.SetString(new_value.data(), new_value.size(), document_root_->GetAllocator()); } template<> void JsonContainer::setValue<>(json_value& jval, const char * new_value) { jval.SetString(new_value, std::string(new_value).size(), document_root_->GetAllocator()); } template<> void JsonContainer::setValue<>(json_value& jval, double new_value) { jval.SetDouble(new_value); } template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value ) { jval.SetArray(); for (const auto& value : new_value) { // rapidjson doesn't like std::string... json_value s; s.SetString(value.data(), value.size(), document_root_->GetAllocator()); jval.PushBack(s, document_root_->GetAllocator()); } } template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value ) { jval.SetArray(); for (const auto& value : new_value) { json_value tmp_val; tmp_val.SetBool(value); jval.PushBack(tmp_val, document_root_->GetAllocator()); } } template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value ) { jval.SetArray(); for (const auto& value : new_value) { json_value tmp_val; tmp_val.SetInt(value); jval.PushBack(tmp_val, document_root_->GetAllocator()); } } template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value ) { jval.SetArray(); for (const auto& value : new_value) { json_value tmp_val; tmp_val.SetDouble(value); jval.PushBack(tmp_val, document_root_->GetAllocator()); } } template<> void JsonContainer::setValue<>(json_value& jval, std::vector new_value ) { jval.SetArray(); for (auto value : new_value) { json_document tmp_value; tmp_value.CopyFrom(*value.document_root_, document_root_->GetAllocator()); jval.PushBack(tmp_value, document_root_->GetAllocator()); } } template<> void JsonContainer::setValue<>(json_value& jval, JsonContainer new_value ) { jval.CopyFrom(new_value.getRaw(), document_root_->GetAllocator()); } }} // namespace leatherman::json_container leatherman-1.4.2+dfsg/json_container/tests/000075500000000000000000000000001332360634000207305ustar00rootroot00000000000000leatherman-1.4.2+dfsg/json_container/tests/json_container_test.cc000064400000000000000000001105451332360634000253170ustar00rootroot00000000000000#include #include #include static const std::string JSON = "{\"foo\" : {\"bar\" : 2}," " \"goo\" : 1," " \"bool\" : true," " \"string\" : \"a string\"," " \"string_with_null\" : \"a string\\u0000with\\u0000null\"," " \"null\" : null," " \"real\" : 3.1415," " \"vec\" : [1, 2], " " \"string_vec\" : [\"one\", \"two\\u0000null\"], " " \"nested\" : {" " \"foo\" : \"bar\"" " }" "}"; using namespace leatherman::json_container; TEST_CASE("JsonContainer::JsonContainer - passing JSON string", "[data]") { std::string json_value {}; SECTION("it should instantiate by passing any JSON value") { SECTION("object") { json_value = JSON; } SECTION("array") { SECTION("of numbers") { json_value = "[1, 2, 3]"; } SECTION("of booleans") { json_value = "[true, true]"; } SECTION("of strings") { json_value = "[\"spam\", \"eggs\", \"foo\"]"; } SECTION("of objects") { json_value = "[" + JSON + ",\n" + JSON + "]"; } SECTION("of arrays") { json_value = "[[1, 2, 3], [\"spam\", \"eggs\", \"foo\"]]"; } SECTION("of values of different types") { json_value = "[1, \"spam\",\n" + JSON + "]"; } } SECTION("std::string instance containing an empty JSON string") { json_value = "\"\""; } SECTION("string") { json_value = "\"foo\""; } SECTION("number - int") { json_value = "42"; } SECTION("number - float") { json_value = "3.14159"; } SECTION("boolean - true") { json_value = "true"; } SECTION("boolean - false") { json_value = "false"; } SECTION("null") { json_value = "null"; } REQUIRE_NOTHROW(JsonContainer { json_value }); } SECTION("it should throw a data_parse_error in case of empty string") { json_value = ""; REQUIRE_THROWS_AS(JsonContainer { json_value }, data_parse_error); } SECTION("it should throw a data_parse_error in case of invalid JSON") { SECTION("bad object") { json_value = "{\"foo\" : \"bar\", 42}"; } SECTION("bad key") { json_value = "{42 : \"bar\"}"; } SECTION("bad array") { json_value = "1, 2, 3"; } REQUIRE_THROWS_AS(JsonContainer { json_value }, data_parse_error); } } TEST_CASE("JsonContainer::get for object entries", "[data]") { JsonContainer data { JSON }; SECTION("it can get a root value") { REQUIRE(data.get("goo") == 1); } SECTION("it can get a nested value") { REQUIRE(data.get({"foo", "bar"}) == 2); } SECTION("it can get a bool value") { REQUIRE(data.get("bool") == true); } SECTION("it can get a string value") { REQUIRE(data.get("string") == "a string"); } SECTION("it can get a string value containing null character(s)") { REQUIRE(data.get("string_with_null") == std::string("a string\0with\0null", 18)); } SECTION("it can get a double value") { REQUIRE(data.get("real") == 3.1415); } SECTION("it can get a vector") { std::vector tmp { 1, 2 }; std::vector result { data.get>("vec") }; REQUIRE(tmp.size() == result.size()); REQUIRE(tmp[0] == result[0]); REQUIRE(tmp[1] == result[1]); } SECTION("it can get a string vector") { std::vector tmp { "one", { "two\0null", 8 } }; std::vector result { data.get>("string_vec") }; REQUIRE(tmp.size() == result.size()); REQUIRE(tmp[0] == result[0]); REQUIRE(tmp[1] == result[1]); } SECTION("it can get the root object") { REQUIRE(data.get().get("goo") == 1); } SECTION("it should behave correctly given a null value") { REQUIRE(data.get("null") == ""); REQUIRE(data.get("null") == 0); REQUIRE(data.get("null") == false); } SECTION("it can get the root entry") { SECTION("array of numbers") { JsonContainer data_array { "[1, 2, 3]" }; auto array = data_array.get>(); std::vector expected_array { 1, 2, 3 }; REQUIRE(array == expected_array); } SECTION("object") { auto object = data.get(); REQUIRE(object.get("goo") == 1); } SECTION("number") { JsonContainer data_number { "42" }; auto number = data_number.get(); REQUIRE(number == 42); } } SECTION("it throws a data_key_error in case of unknown object entry") { SECTION("unknown root object entry") { REQUIRE_THROWS_AS(data.get("unknown"), data_key_error); } SECTION("unknown nested object entry") { REQUIRE_THROWS_AS(data.get({ "nested", "unknown" }), data_key_error); } } SECTION("it throws a data_type_error in case of mismatch") { SECTION("root entry") { SECTION("not a boolean") { REQUIRE_THROWS_AS(data.get("string"), data_type_error); } SECTION("not an integer") { REQUIRE_THROWS_AS(data.get("real"), data_type_error); } SECTION("not a double") { REQUIRE_THROWS_AS(data.get("goo"), data_type_error); } SECTION("not a string") { REQUIRE_THROWS_AS(data.get("real"), data_type_error); } SECTION("array mismatches") { SECTION("not an array") { REQUIRE_THROWS_AS(data.get>("goo"), data_type_error); } SECTION("mismatch type on array entry") { REQUIRE_THROWS_AS(data.get("goo"), data_type_error); } } } SECTION("nested entry") { data.set({ "foo", "spam" }, JsonContainer { JSON }); SECTION("not a boolean") { REQUIRE_THROWS_AS(data.get({ "foo", "spam", "string" }), data_type_error); } SECTION("not an integer") { REQUIRE_THROWS_AS(data.get({ "foo", "spam", "real" }), data_type_error); } SECTION("not a double") { REQUIRE_THROWS_AS(data.get({ "foo", "spam", "goo" }), data_type_error); } SECTION("not a string") { REQUIRE_THROWS_AS(data.get({ "foo", "spam", "real" }), data_type_error); } SECTION("array mismatches") { SECTION("not an array") { REQUIRE_THROWS_AS( data.get>({ "foo", "spam", "goo" }), data_type_error); } SECTION("mismatch type on array entry") { REQUIRE_THROWS_AS(data.get({ "foo", "spam", "goo" }), data_type_error); } } } } SECTION("it can always return a JsonContainer instance of an entry") { SECTION("scalars") { SECTION("boolean") { REQUIRE(data.get("bool").get() == true); } SECTION("integer") { REQUIRE(data.get("goo").get() == 1); } SECTION("double") { REQUIRE(data.get("real").get() == 3.1415); } SECTION("string") { REQUIRE(data.get("string").get() == "a string"); } } SECTION("object") { REQUIRE(data.get("nested").get("foo") == "bar"); } SECTION("array") { std::vector expected_array {1, 2}; REQUIRE(data.get("vec").get>() == expected_array); } } SECTION("it can access array entries") { SECTION("it throws a data_type_error in case of type mismatch") { SECTION("root entry") { JsonContainer a { "[1, 2, 3]" }; REQUIRE_THROWS_AS(a.get(1), data_type_error); } SECTION("object entry") { REQUIRE_THROWS_AS(data.get("vec", 1), data_type_error); } } SECTION("it throws a data_index_error in case of index out of bounds") { SECTION("root entry") { JsonContainer a { "[1, 2, 3]" }; REQUIRE_THROWS_AS(a.get(10), data_index_error); } SECTION("object entry") { REQUIRE_THROWS_AS(data.get("vec", 10), data_index_error); } } SECTION("it can get a value") { SECTION("boolean") { JsonContainer b { "[false, false, true, false]" }; REQUIRE_FALSE(b.get(3)); } SECTION("integer") { JsonContainer i { "[1, 2, 3]" }; REQUIRE(i.get(1) == 2); } SECTION("double") { JsonContainer d { "[3.14, 2.718]" }; REQUIRE(d.get(1) == 2.718); } SECTION("string") { JsonContainer s { "[\"one\", \"two\"]" }; REQUIRE(s.get(1) == "two"); } SECTION("object") { JsonContainer o { "[ {\"spam\":\"eggs\"}, {\"foo\":\"bar\"} ]" }; auto retrieved_o = o.get(0); REQUIRE(retrieved_o.size() == 1u); REQUIRE(retrieved_o.get("spam") == "eggs"); } SECTION("array") { JsonContainer a { "[ [1, 2], [false, true], [\"ab\", \"cd\"] ]" }; std::vector e_a { false, true }; REQUIRE(a.get>(1) == e_a); } } SECTION("array with values of different types") { JsonContainer a { "[ 1, \"foo\", true, [2.718, 3.14], 42, 42.0, " "{\"spam\":\"eggs\"} ]" }; SECTION("boolean") { REQUIRE(a.get(2)); } SECTION("integer") { REQUIRE(a.get(0) == 1); } SECTION("double") { REQUIRE(a.get(5) == 42.0); } SECTION("string") { REQUIRE(a.get(1) == "foo"); } SECTION("object") { auto retrieved_o = a.get(6); REQUIRE(retrieved_o.size() == 1u); REQUIRE(retrieved_o.get("spam") == "eggs"); } SECTION("array") { std::vector expected_array { 2.718, 3.14 }; REQUIRE(a.get>(3) == expected_array); } } } } TEST_CASE("JsonContainer::getWithDefault", "[data]") { JsonContainer data { JSON }; JsonContainer data_a { "[1, 2, 3]" }; std::vector ints { 1, 2, 3 }; std::vector doubles { 1.0, 2.0, 3.0 }; std::vector bools { false, true, false }; std::vector strings { "foo", "bar", "baz" }; SECTION("it can provide a default value if a root entry key is not found") { REQUIRE(data.getWithDefault("dne", 42) == 42); REQUIRE(data.getWithDefault("dne", 42.0) == 42.0); REQUIRE(data.getWithDefault("dne", true) == true); REQUIRE(data.getWithDefault("dne", "foo") == "foo"); REQUIRE(data.getWithDefault>("dne", ints) == ints); REQUIRE(data.getWithDefault>("dne", doubles) == doubles); REQUIRE(data.getWithDefault>("dne", bools) == bools); REQUIRE(data.getWithDefault>("dne", strings) == strings); } SECTION("throw a data_type_error if the root entry is not an object") { REQUIRE_THROWS_AS(data_a.getWithDefault("foo", 42), data_type_error); } SECTION("it can provide a default value if a nested key is not found") { JsonContainer lv_2 {}; lv_2.set("entry_3", data); JsonContainer lv_1 {}; lv_1.set("entry_2", lv_2); std::vector missing_entry { "entry_2", "entry_3", "dne" }; REQUIRE(lv_1.getWithDefault(missing_entry, 42) == 42); REQUIRE(lv_1.getWithDefault(missing_entry, 42.0) == 42.0); REQUIRE(lv_1.getWithDefault(missing_entry, true) == true); REQUIRE(lv_1.getWithDefault(missing_entry, "foo") == "foo"); REQUIRE(lv_1.getWithDefault>(missing_entry, ints) == ints); REQUIRE(lv_1.getWithDefault>(missing_entry, doubles) == doubles); REQUIRE(lv_1.getWithDefault>(missing_entry, bools) == bools); REQUIRE(lv_1.getWithDefault>(missing_entry, strings) == strings); } SECTION("throw a data_type_error if the parent of a nested entry is not an object") { JsonContainer more_data_a {}; more_data_a.set>("ints_entry", ints); REQUIRE_THROWS_AS(more_data_a.getWithDefault({ "ints_entry", "foo" }, 42), data_type_error); } } TEST_CASE("JsonContainer::toString", "[data]") { SECTION("root entry") { SECTION("object") { JsonContainer o {}; o.set("spam", "eggs"); REQUIRE(o.toString() == "{\"spam\":\"eggs\"}"); } SECTION("array") { JsonContainer a { "[1, 2, 3]" }; REQUIRE(a.toString() == "[1,2,3]"); } SECTION("multi type array") { JsonContainer mt_a { "[1, false, \"s\"]" }; REQUIRE(mt_a.toString() == "[1,false,\"s\"]"); } SECTION("scalar") { JsonContainer s { "42" }; REQUIRE(s.toString() == "42"); } } JsonContainer data { JSON }; SECTION("root object entry") { REQUIRE(data.toString("goo") == "1"); } SECTION("nested object entry") { REQUIRE(data.toString({ "nested", "foo" }) == "\"bar\""); } } TEST_CASE("JsonContainer::toPrettyString", "[data]") { SECTION("does not throw when the root is") { SECTION("a string") { JsonContainer data_s { "\"some text\"" }; REQUIRE_NOTHROW(data_s.toPrettyString()); } SECTION("an array") { JsonContainer data_a { "[1, 2, 3]" }; REQUIRE_NOTHROW(data_a.toPrettyString()); } SECTION("an object") { JsonContainer data_o { JSON }; REQUIRE_NOTHROW(data_o.toPrettyString()); } SECTION("an object containing nested objects with an array") { JsonContainer data_ooa {}; JsonContainer tmp {}; tmp.set>("bar", { 1, 2, 3 }); JsonContainer tmp_two {}; tmp_two.set("spam", tmp); tmp_two.set>("beans", { 55, 56, 57 }); JsonContainer tmp_three {}; tmp_three.set("eggs", tmp_two); data_ooa.set("foo", tmp_three); REQUIRE_NOTHROW(data_ooa.toPrettyString()); } } } TEST_CASE("JsonContainer::toPrettyJson", "[data]") { SECTION("it pretty prints valid json") { SECTION("a null value") { std::string EXPECTED_OUTPUT = "null"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a double") { std::string EXPECTED_OUTPUT = "42.5"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a bool") { std::string EXPECTED_OUTPUT = "true"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("an int") { std::string EXPECTED_OUTPUT = "42"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a string") { std::string EXPECTED_OUTPUT = "\"string\""; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a simple array") { std::string EXPECTED_OUTPUT = "[\n" " null,\n" " 42.5,\n" " true,\n" " 42,\n" " \"string\"\n" "]"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a simple object") { std::string EXPECTED_OUTPUT = "{\n" " \"null-key\": null,\n" " \"double-key\": 42.5,\n" " \"bool-key\": true,\n" " \"int-key\": 42,\n" " \"string-key\": \"string\"\n" "}"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a nested object") { std::string EXPECTED_OUTPUT = "{\n" " \"object-key\": {\n" " \"null-key\": null,\n" " \"double-key\": 42.5,\n" " \"bool-key\": true,\n" " \"int-key\": 42,\n" " \"string-key\": \"string\"\n" " },\n" " \"array-key\": [\n" " null,\n" " 42.5,\n" " true,\n" " 42,\n" " \"string\"\n" " ]\n" "}"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } SECTION("a nested array") { std::string EXPECTED_OUTPUT = "[\n" " {\n" " \"null-key\": null,\n" " \"double-key\": 42.5,\n" " \"bool-key\": true,\n" " \"int-key\": 42,\n" " \"string-key\": \"string\"\n" " },\n" " [\n" " null,\n" " 42.5,\n" " true,\n" " 42,\n" " \"string\"\n" " ]\n" "]"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(); REQUIRE(pretty_json == EXPECTED_OUTPUT); } } SECTION("handles variable padding") { std::string EXPECTED_OUTPUT = "{\n" " \"object-key\": {\n" " \"null-key\": null,\n" " \"double-key\": 42.5,\n" " \"bool-key\": true,\n" " \"int-key\": 42,\n" " \"string-key\": \"string\"\n" " },\n" " \"array-key\": [\n" " null,\n" " 42.5,\n" " true,\n" " 42,\n" " \"string\"\n" " ]\n" "}"; JsonContainer data(EXPECTED_OUTPUT); auto pretty_json = data.toPrettyJson(2); REQUIRE(pretty_json == EXPECTED_OUTPUT); } } TEST_CASE("JsonContainer::empty", "[data]") { SECTION("works correctly for an empty JsonContainer instance") { JsonContainer data {}; REQUIRE(data.empty()); } SECTION("works correctly if the root is an empty array") { JsonContainer data { "[]" }; REQUIRE(data.empty()); } SECTION("works correctly for an non-empty JsonContainer instance") { JsonContainer data {}; data.set("spam", 1); REQUIRE_FALSE(data.empty()); } SECTION("works correctly if the root is an non-empty array") { JsonContainer data { "[1, 2, 3]" }; REQUIRE_FALSE(data.empty()); } } TEST_CASE("JsonContainer::size", "[data]") { SECTION("works correctly on the root (no key argument)") { SECTION("empty object") { JsonContainer data {}; REQUIRE(data.size() == 0u); } SECTION("the root is an empty array") { JsonContainer data { "[]" }; REQUIRE(data.size() == 0u); } SECTION("non-empty singleton object") { JsonContainer data {}; data.set("spam", 1); REQUIRE(data.size() == 1u); } SECTION("non-empty multi element object") { JsonContainer bigger_data { JSON }; REQUIRE(bigger_data.size() == 10u); } SECTION("non-empty array") { JsonContainer data { "[1, 2, 3]" }; REQUIRE(data.size() == 3u); } } JsonContainer data { JSON }; SECTION("works correctly on an object entry") { SECTION("entry is a scalar") { REQUIRE(data.size("goo") == 0u); } SECTION("entry is an object") { REQUIRE(data.size("foo") == 1u); } SECTION("entry is an array") { REQUIRE(data.size("vec") == 2u); } } SECTION("works correctly on a nested entry") { data.set({ "foo", "spam" }, JsonContainer { JSON }); SECTION("entry is a scalar") { REQUIRE(data.size({ "foo", "spam", "goo" }) == 0u); } SECTION("entry is an object") { REQUIRE(data.size({ "foo", "spam", "nested" }) == 1u); } SECTION("entry is an array") { REQUIRE(data.size({ "foo", "spam", "vec" }) == 2u); } } } TEST_CASE("JsonContainer::includes", "[data]") { SECTION("does not throw for an empty JsonContainer instance") { JsonContainer data {}; REQUIRE_FALSE(data.includes("foo")); } SECTION("it should not throw if the root is not a JSON object") { JsonContainer data { "[1, 2, 3]" }; REQUIRE_FALSE(data.includes("foo")); } SECTION("Document/object lookups") { JsonContainer msg { JSON }; REQUIRE(msg.includes("foo") == true); REQUIRE(msg.includes({ "foo", "bar" }) == true); REQUIRE(msg.includes({ "foo", "baz" }) == false); } SECTION("Non object/document lookups") { JsonContainer msg { "\"foo\"" }; REQUIRE(msg.includes({ "bar", "bar" }) == false); REQUIRE(msg.includes("foo") == false); } } TEST_CASE("JsonContainer::set", "[data]") { JsonContainer msg {}; SECTION("it should add a new pair to the root") { msg.set("foo", 4); REQUIRE(msg.get("foo") == 4); } SECTION("it allows the creation of a nested structure") { msg.set({"level1", "level21"}, 0); msg.set("bool1", true); msg.set({"level1", "level22"}, "a string"); msg.set("level11", "different string"); REQUIRE(msg.get({ "level1", "level21" }) == 0); REQUIRE(msg.get("bool1") == true); REQUIRE(msg.get({"level1", "level22"}) == "a string"); REQUIRE(msg.get("level11") == "different string"); } SECTION("it allows resetting an integer value") { msg.set("i entry", 0); REQUIRE(msg.includes("i entry")); REQUIRE(msg.get("i entry") == 0); msg.set("i entry", 5); REQUIRE(msg.get("i entry") == 5); } SECTION("it allows resetting a double value") { msg.set("d entry", 3.14159); REQUIRE(msg.includes("d entry")); REQUIRE(msg.get("d entry") == 3.14159); msg.set("d entry", 2.71828); REQUIRE(msg.get("d entry") == 2.71828); } SECTION("it allows resetting a boolean value") { msg.set("b entry", true); REQUIRE(msg.includes("b entry")); REQUIRE(msg.get("b entry") == true); msg.set("b entry", false); REQUIRE(msg.get("b entry") == false); } SECTION("it allows resetting a string value") { msg.set("s entry", "bar"); REQUIRE(msg.includes("s entry")); REQUIRE(msg.get("s entry") == "bar"); msg.set("s entry", "spam"); REQUIRE(msg.get("s entry") == "spam"); } SECTION("it allows resetting a string vector value") { std::vector s_v { "foo", "bar" }; msg.set>("s_v entry", s_v); REQUIRE(msg.includes("s_v entry")); REQUIRE(msg.get>("s_v entry") == s_v); std::vector s_v_other { "spam", "eggs" }; msg.set>("s_v entry", s_v_other); REQUIRE(msg.get>("s_v entry") == s_v_other); } SECTION("it allows resetting a JsonContainer value") { JsonContainer d {}; d.set("i", 1); msg.set("d_c entry", d); auto i_entry = msg.get("d_c entry"); // Expecting msg = { "d_c entry" : {"i" : 1} } REQUIRE(msg.includes("d_c entry")); REQUIRE(i_entry.get("i") == 1); JsonContainer d_other {}; d_other.set("b", true); msg.set("d_c entry", d_other); auto b_entry = msg.get("d_c entry"); // Expecting msg = { "d_c entry" : {"b" : true} } REQUIRE(b_entry.get("b")); } SECTION("it can set a key to a vector") { std::vector strings { "foo", "bar" }; msg.set>("sv", strings); std::vector ints { 4, 2 }; msg.set>("iv", ints); std::vector bools { true, false }; msg.set>("bv", bools); std::vector doubles { 0.00, 9.99 }; msg.set>("dv", doubles); REQUIRE(msg.get>("sv")[0] == "foo"); REQUIRE(msg.get>("sv")[1] == "bar"); REQUIRE(msg.get>("iv")[0] == 4); REQUIRE(msg.get>("iv")[1] == 2); REQUIRE(msg.get>("bv")[0] == true); REQUIRE(msg.get>("bv")[1] == false); REQUIRE(msg.get>("dv")[0] == 0.00); REQUIRE(msg.get>("dv")[1] == 9.99); } SECTION("it should throw a data_key_error in case root is not an object") { std::string json_array { "[1, 2, 3]" }; JsonContainer data_array { json_array }; REQUIRE_THROWS_AS(data_array.set("foo", "bar"), data_key_error); } SECTION("it should throw a data_key_error in case a known inner key is not " "associated with a JSON object") { JsonContainer d_c { JSON }; REQUIRE_THROWS_AS(d_c.set({ "vec", "foo" }, "bar"), data_key_error); } } TEST_CASE("JsonContainer::keys", "[data]") { SECTION("It returns a vector of keys") { JsonContainer data { "{ \"a\" : 1, " " \"b\" : 2, " " \"c\\u0000null\" : 2}" }; std::vector expected_keys { "a", "b", { "c\0null", 6 } }; REQUIRE(data.keys() == expected_keys); } SECTION("It returns an empty vector when the JsonContainer is empty") { JsonContainer data {}; REQUIRE(data.keys().size() == 0u); } SECTION("It returns an empty vector when the JsonContainer is an array") { JsonContainer data_array { "[1, 2, 3]" }; REQUIRE(data_array.keys().size() == 0u); } } TEST_CASE("JsonContainer::type", "[data]") { JsonContainer data {}; SECTION("When no key is passed it retrieves the type of the root value") { SECTION("array") { JsonContainer data_array { "[1, 2, 3]" }; REQUIRE(data_array.type() == DataType::Array); } SECTION("object") { data.set("b_entry", false); REQUIRE(data.type() == DataType::Object); } SECTION("integer") { JsonContainer data_number { "42" }; REQUIRE(data_number.type() == DataType::Int); } } SECTION("When a single key is passed") { SECTION("it throws a data_key_error if the key is unknown") { REQUIRE_THROWS_AS(data.type("foo"), data_key_error); } SECTION("it can distinguish a Bool (false) value") { data.set("b_entry", false); REQUIRE(data.type("b_entry") == DataType::Bool); } SECTION("it can distinguish a Bool (true) value") { data.set("b_entry", true); REQUIRE(data.type("b_entry") == DataType::Bool); } SECTION("it can distinguish an Object (JsonContainer) value") { JsonContainer tmp {}; tmp.set("eggs", "spam"); data.set("obj_entry", tmp); REQUIRE(data.type("obj_entry") == DataType::Object); } SECTION("it can distinguish an Array value") { std::vector tmp { "one", "two", "three" }; data.set>("array_entry", tmp); REQUIRE(data.type("array_entry") == DataType::Array); } SECTION("it can distinguish a String value") { data.set("eggs", "spam"); REQUIRE(data.type("eggs") == DataType::String); } SECTION("it can distinguish an Int value") { data.set("int_entry", 42); REQUIRE(data.type("int_entry") == DataType::Int); } SECTION("it can distinguish a Double value") { SECTION("defined by set") { data.set("d_entry", 2.71828); REQUIRE(data.type("d_entry") == DataType::Double); } SECTION("defined by JSON string given to the ctor") { JsonContainer data_number { "2.71828" }; REQUIRE(data_number.type() == DataType::Double); } } SECTION("it can distinguish a null value") { JsonContainer data_with_null { "{\"the_null\" : null}" }; REQUIRE(data_with_null.type("the_null") == DataType::Null); } } SECTION("When multiple keys are passed") { JsonContainer tmp {}; data.set("stuff", tmp); SECTION("it throws a data_key_error if a key is unknown") { REQUIRE_THROWS_AS(data.type({ "stuff", "bar" }), data_key_error); } SECTION("it can distinguish a Bool (false) value") { data.set({ "stuff", "b_entry" }, false); REQUIRE(data.type({ "stuff", "b_entry" }) == DataType::Bool); } SECTION("it can distinguish a Bool (true) value") { data.set({ "stuff", "b_entry" }, true); REQUIRE(data.type({ "stuff", "b_entry" }) == DataType::Bool); } SECTION("it can distinguish an Object (JsonContainer) value") { JsonContainer tmp {}; tmp.set("eggs", "spam"); data.set({ "stuff", "obj_entry" }, tmp); REQUIRE(data.type({ "stuff", "obj_entry" }) == DataType::Object); } SECTION("it can distinguish an Array value") { std::vector tmp { "one", "two", "three" }; data.set>({ "stuff", "array_entry" }, tmp); REQUIRE(data.type({ "stuff", "array_entry" }) == DataType::Array); } SECTION("it can distinguish a String value") { data.set({ "stuff", "eggs" }, "spam"); REQUIRE(data.type({ "stuff", "eggs" }) == DataType::String); } SECTION("it can distinguish an Int value") { data.set({ "stuff", "int_entry" }, 42); REQUIRE(data.type({ "stuff", "int_entry" }) == DataType::Int); } SECTION("it can distinguish a Double value") { data.set({ "stuff", "d_entry" }, 2.71828); REQUIRE(data.type({ "stuff", "d_entry" }) == DataType::Double); } SECTION("it can distinguish a null value") { JsonContainer data_with_null { "{\"the_null\" : null}" }; data.set({ "stuff", "more_stuff" }, data_with_null); auto data_type = data.type({ "stuff", "more_stuff", "the_null" }); REQUIRE(data_type == DataType::Null); } } } TEST_CASE("JsonContainer::type for arrays entries", "[data]") { JsonContainer data { "[false, -42, 3.14, \"spam\", {\"foo\" : [3, true]}, " "[1, 2, 3, 4] ]" }; SECTION("root entry") { SECTION("array") { JsonContainer not_an_aray { JSON }; REQUIRE_THROWS_AS(not_an_aray.type(1), data_type_error); } SECTION("array with values of different types") { JsonContainer data_array { "[1, \"spam\", false]" }; REQUIRE(data_array.type() == DataType::Array); } SECTION("boolean") { REQUIRE(data.type(0) == DataType::Bool); } SECTION("integer") { REQUIRE(data.type(1) == DataType::Int); } SECTION("double") { REQUIRE(data.type(2) == DataType::Double); } SECTION("string") { REQUIRE(data.type(3) == DataType::String); } SECTION("object") { REQUIRE(data.type(4) == DataType::Object); } SECTION("array") { REQUIRE(data.type(5) == DataType::Array); } } SECTION("object entry") { JsonContainer o { JSON }; o.set("multi type array", data); SECTION("container") { REQUIRE(o.type("multi type array") == DataType::Array); } SECTION("double") { REQUIRE(o.type("multi type array", 2) == DataType::Double); } SECTION("string") { REQUIRE(o.type("multi type array", 3) == DataType::String); } } SECTION("nested entry") { JsonContainer o { JSON }; o.set({ "nested", "multi type array" }, data); SECTION("container") { REQUIRE(o.type({ "nested", "multi type array" }) == DataType::Array); } SECTION("double") { REQUIRE(o.type({ "nested", "multi type array" }, 2) == DataType::Double); } SECTION("string") { REQUIRE(o.type({ "nested", "multi type array" }, 3) == DataType::String); } } } leatherman-1.4.2+dfsg/locale/000075500000000000000000000000001332360634000160125ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/CMakeLists.txt000064400000000000000000000025021332360634000205510ustar00rootroot00000000000000if (LEATHERMAN_USE_LOCALES) find_package(Boost 1.54 REQUIRED COMPONENTS locale system) if (BOOST_STATIC AND LEATHERMAN_USE_ICU) find_package(ICU COMPONENTS i18n uc) endif() else() find_package(Boost 1.54 REQUIRED regex) endif() add_leatherman_includes(${Boost_INCLUDE_DIRS}) add_leatherman_deps(${Boost_LIBRARIES}) if (ICU_LIBRARIES) add_leatherman_deps(${ICU_LIBRARIES}) endif() if (LEATHERMAN_USE_LOCALES AND BOOST_STATIC AND APPLE) # Boost.Locale relies on libiconv; if not using shared boost libraries # we need to include the dependency ourselves. So far this is only a # problem on Mac OS X. add_leatherman_deps(iconv) endif() leatherman_dependency(util) add_leatherman_headers(inc/leatherman) if (LEATHERMAN_USE_LOCALES) add_leatherman_library(src/locale.cc) if (GETTEXT_ENABLED) # This test relies on translation .mo files being generated. # Projects that don't support localization yet still need # tests to pass, so only enable these tests if gettext is # available. add_leatherman_test(tests/locale.cc) endif() else() add_leatherman_library(disabled/locale.cc) endif() add_leatherman_test(tests/format.cc) if (LEATHERMAN_USE_LOCALES AND BUILDING_LEATHERMAN) project(leatherman_locale) add_subdirectory(locales) endif() leatherman-1.4.2+dfsg/locale/disabled/000075500000000000000000000000001332360634000175615ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/disabled/locale.cc000064400000000000000000000021301332360634000213230ustar00rootroot00000000000000#include namespace leatherman { namespace locale { using namespace std; const std::locale get_locale(string const& id, string const& domain, vector const& paths) { // std::locale is not supported on these platforms throw runtime_error("leatherman::locale::get_locale is not supported on this platform"); } void clear_domain(string const& domain) { throw runtime_error("leatherman::locale::clear_domain is not supported on this platform"); } string translate(string const& msg, string const& domain) { return msg; } string translate_p(string const& context, string const& msg, string const& domain) { return msg; } string translate_n(string const& single, string const& plural, int n, string const& domain) { return n == 1 ? single : plural; } string translate_np(string const& context, string const& single, string const& plural, int n, string const& domain) { return n == 1 ? single : plural; } }} // namespace leatherman::locale leatherman-1.4.2+dfsg/locale/inc/000075500000000000000000000000001332360634000165635ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/inc/leatherman/000075500000000000000000000000001332360634000207035ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/inc/leatherman/locale/000075500000000000000000000000001332360634000221425ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/inc/leatherman/locale/locale.hpp000064400000000000000000000310471332360634000241170ustar00rootroot00000000000000/** * @file * Declares utility functions for setting the locale. * * Boost.Locale is not available on all platforms. This header is implemented * so that it can switch between using boost::locale::format and boost::format * (without localization) by defining LEATHERMAN_I18N. Because these classes * do not use the same substitution characters, but gettext replacement relies * on matching a string, specify that both "%N%" (Boost.Format) and "{N}" * (Boost.Locale) should be considered substitution characters when using * leatherman::locale::format, and "{N}" should be preferred. When i18n is * disabled, it will regex replace "{(\d+)}" to "%\1%" for use with Boost.Format. */ #pragma once #include #include #include #ifdef LEATHERMAN_I18N #include #else #include #include // Unset PROJECT_NAME so we only create a single locale. #undef PROJECT_NAME #define PROJECT_NAME "" #undef PROJECT_DIR #define PROJECT_DIR #endif namespace leatherman { namespace locale { /** * Gets a locale object for the specified locale id. * @param id The locale ID, defaults to a UTF-8 compatible system default. * @param domain The catalog domain to use for i18n via gettext. * @param paths Search paths for localization files. * @return The locale. If a locale for the specified domain already exists, it returns * the same locale until clear_domain is called for that domain. * Throws boost::locale::conv::conversion_error if the system locale is invalid or * the catalog for the specified language can't be used with the system locale encoding. * * Unsafe to use with GCC on AIX or Solaris, as std::locale is busted. */ const std::locale get_locale(std::string const& id = "", std::string const& domain = PROJECT_NAME, std::vector const& paths = {PROJECT_DIR}); /** * Clears the locale for a specific domain. * WARNING: This may invalidate existing references, so only use for testing. * @param domain The catalog domain to clear. */ void clear_domain(std::string const& domain = PROJECT_NAME); /** * Translate text using the locale initialized by this library. * If localization encounters an error, the original message will be returned. * @param msg The string to translate. * @param domain The catalog domain to use for i18n via gettext. * @return The translated string. */ std::string translate(std::string const& msg, std::string const& domain = PROJECT_NAME); /** * Translate text in a given context using the locale initialized by this library. * Context can be used to disambiguate the same word used multiple different ways. * If localization encounters an error, the original message will be returned. * @param context The context string. * @param msg The string to translate. * @param domain The catalog domain to use for i18n via gettext. * @return The translated string. */ std::string translate_p(std::string const& context, std::string const& msg, std::string const& domain = PROJECT_NAME); /** * Translate plural text using the locale initialized by this library. * If localization encounters an error, the `single` message will be returned for n == 1, * and the `plural` message will be returned for all other values of n. * @param single The singuar form to translate. * @param plural The plural form to translate. * @param n Number of items, used to choose singular or plural. * @param domain The catalog domain to use for i18n via gettext. * @return The translated string. */ std::string translate_n(std::string const& single, std::string const& plural, int n, std::string const& domain = PROJECT_NAME); /** * Translate plural text in a given context using the locale initialized by this library. * Context can be used to disambiguate the same word used multiple different ways. * If localization encounters an error, the `single` message will be returned for n == 1, * and the `plural` message will be returned for all other values of n. * @param context The context string. * @param single The singuar form to translate. * @param plural The plural form to translate. * @param n Number of items, used to choose singular or plural. * @param domain The catalog domain to use for i18n via gettext. * @return The translated string. */ std::string translate_np(std::string const& context, std::string const& single, std::string const& plural, int n, std::string const& domain = PROJECT_NAME); namespace { /* * Anonymous namespace, limiting access to current namespace */ /** * Translates and formats text using the locale initialized by this library. * @param trans The translation function. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template std::string format_common(std::function&& trans, TArgs... args) { // Create and apply formatter here, as we want to guarantee the lifetime of the arguments. // boost::locale::format doesn't make copies, and a common gotcha is using temporary arguments // to build up the formatter. // Technique for the one-liner explained at http://florianjw.de/en/variadic_templates.html. static const std::string domain{PROJECT_NAME}; #ifdef LEATHERMAN_I18N boost::locale::format form{trans(domain)}; (void) std::initializer_list{ ((void)(form % args), 0)... }; return form.str(get_locale("", domain)); #else // When locales are disabled, use boost::format, which expects %N% style formatting static const boost::regex match{"\\{(\\d+)\\}"}; static const std::string repl{"%\\1%"}; boost::format form{boost::regex_replace(trans(domain), match, repl)}; (void) std::initializer_list{ ((void)(form % args), 0)... }; return form.str(); #endif } } /** * Translates and formats text using the locale initialized by this library. * Use the default domain, i.e. PROJECT_NAME. * Replaces the use of boost::format with a variadic function call. * Specialized to the PROJECT_NAME domain. * @param fmt The format string. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template std::string format(std::string const& fmt, TArgs... args) { auto trans = [&fmt](const std::string& domain) {return translate(fmt, domain);}; return format_common(std::move(trans), std::forward(args)...); } /** * Translates and formats text using the locale initialized by this library * Alias for format(...); Convenience function for adding i18n support. * @param fmt The format string. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template inline std::string _(std::string const& fmt, TArgs&&... args) { return format(std::forward(fmt), std::forward(args)...); } /** * Translates and formats text in a given context using the locale initialized by this library. * Use the default domain, i.e. PROJECT_NAME. * Replaces the use of boost::format with a variadic function call. * Specialized to the PROJECT_NAME domain. * @param context The context string. * @param fmt The format string. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template std::string format_p(std::string const& context, std::string const& fmt, TArgs... args) { auto trans = [&context, &fmt](const std::string& domain) {return translate_p(context, fmt, domain);}; return format_common(std::move(trans), std::forward(args)...); } /** * Translates and formats text using the locale initialized by this library * Alias for format_p(...); Convenience function for adding i18n support. * @param context The context string. * @param fmt The format string. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template inline std::string p_(std::string const& context, std::string const& fmt, TArgs&&... args) { return format_p(std::forward(context), std::forward(fmt), std::forward(args)...); } /** * Translates and formats plural text using the locale initialized by this library. * Use the default domain, i.e. PROJECT_NAME. * Replaces the use of boost::format with a variadic function call. * Specialized to the PROJECT_NAME domain. * @param single The singular format string. * @param plural The plural format string. * @param n Number of items, used to choose singular or plural. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template std::string format_n(std::string const& single, std::string const& plural, int n, TArgs... args) { auto trans = [&single, &plural, n](const std::string& domain) {return translate_n(single, plural, n, domain);}; return format_common(std::move(trans), std::forward(args)...); } /** * Translates and formats plural text using the locale initialized by this library. * Alias for format_n(...); Convenience function for adding i18n support. * @param single The singular format string. * @param plural The plural format string. * @param n Number of items, used to choose singular or plural. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template inline std::string n_(std::string const& single, std::string const& plural, int n, TArgs&&... args) { return format_n(std::forward(single), std::forward(plural), std::forward(n), std::forward(args)...); } /** * Translates and formats plural text in a given context using the locale initialized by this library. * Replaces the use of boost::format with a variadic function call. * Use the default domain, i.e. PROJECT_NAME. * Specialized to the PROJECT_NAME domain. * @param context The context string. * @param single The singular format string. * @param plural The plural format string. * @param n Number of items, used to choose singular or plural. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template std::string format_np(std::string const& context, std::string const& single, std::string const& plural, int n, TArgs... args) { auto trans = [&context, &single, &plural, n](const std::string& domain) {return translate_np(context, single, plural, n, domain);}; return format_common(std::move(trans), std::forward(args)...); } /** * Translates and formats plural text in a given context using the locale initialized by this library. * Alias for format_np(...); Convenience function for adding i18n support. * @param context The context string. * @param single The singular format string. * @param plural The plural format string. * @param n Number of items, used to choose singular or plural. * @param args Format arguments. * @return The string generated by translating the format string, then applying the arguments. */ template inline std::string np_(std::string const& context, std::string const& single, std::string const& plural, int n, TArgs&&... args) { return format_np(std::forward(context), std::forward(single), std::forward(plural), std::forward(n), std::forward(args)...); } }} // namespace leatherman::locale leatherman-1.4.2+dfsg/locale/locales/000075500000000000000000000000001332360634000174345ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/locales/CMakeLists.txt000064400000000000000000000003701332360634000221740ustar00rootroot00000000000000if (LEATHERMAN_TOPLEVEL) gettext_templates(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/../tests/locale.cc) endif() gettext_compile(${CMAKE_CURRENT_SOURCE_DIR} share/locale) SET_DIRECTORY_PROPERTIES(PROPERTIES CLEAN_NO_CUSTOM TRUE) leatherman-1.4.2+dfsg/locale/locales/fr.po000064400000000000000000000023041332360634000204020ustar00rootroot00000000000000# French translations for leatherman_locale package. # Copyright (C) 2016 Puppet # This file is distributed under the same license as the leatherman_locale package. # Automatically generated, 2016. # msgid "" msgstr "" "Project-Id-Version: leatherman_locale \n" "Report-Msgid-Bugs-To: docs@puppet.com\n" "POT-Creation-Date: \n" "PO-Revision-Date: \n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #: locale/locales/../tests/locale.cc msgid "requesting {1,number}." msgstr "demande {1,number}." #: locale/locales/../tests/locale.cc msgctxt "foo" msgid "requesting {1,number}." msgstr "demandé {1,number}." #: locale/locales/../tests/locale.cc msgid "requesting {1,number} item." msgid_plural "requesting {1,number} items." msgstr[0] "demande {1,number} objet." msgstr[1] "demande {1,number} objets." #: locale/locales/../tests/locale.cc msgctxt "foo" msgid "requesting {1,number} item." msgid_plural "requesting {1,number} items." msgstr[0] "demandé {1,number} objet." msgstr[1] "demandé {1,number} objets." leatherman-1.4.2+dfsg/locale/locales/leatherman_locale.pot000064400000000000000000000021261332360634000236200ustar00rootroot00000000000000# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR Puppet # This file is distributed under the same license as the leatherman_locale package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: leatherman_locale \n" "Report-Msgid-Bugs-To: docs@puppet.com\n" "POT-Creation-Date: \n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n" #: locale/locales/../tests/locale.cc msgid "requesting {1,number}." msgstr "" #: locale/locales/../tests/locale.cc msgctxt "foo" msgid "requesting {1,number}." msgstr "" #: locale/locales/../tests/locale.cc msgid "requesting {1,number} item." msgid_plural "requesting {1,number} items." msgstr[0] "" msgstr[1] "" #: locale/locales/../tests/locale.cc msgctxt "foo" msgid "requesting {1,number} item." msgid_plural "requesting {1,number} items." msgstr[0] "" msgstr[1] "" leatherman-1.4.2+dfsg/locale/src/000075500000000000000000000000001332360634000166015ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/src/locale.cc000064400000000000000000000062351332360634000203550ustar00rootroot00000000000000#include #include #include // boost includes are not always warning-clean. Disable warnings that // cause problems before including the headers, then re-enable the warnings. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include #pragma GCC diagnostic pop namespace leatherman { namespace locale { using namespace std; static map g_locales; const std::locale get_locale(string const& id, string const& domain, vector const& paths) { auto it = g_locales.find(domain); if (it != g_locales.end()) { return it->second; } // The system default locale is set with id == "", except on Windows boost::locale's // generator uses a compatible UTF-8 equivalent. Using boost results in UTF-8 being // the default on all platforms. boost::locale::generator gen; if (!domain.empty()) { // Setup so we can find installed locales. Expects a default path unless // an environment variable is specified. #ifdef LEATHERMAN_LOCALE_VAR string locale_path; if (util::environment::get(LEATHERMAN_LOCALE_VAR, locale_path)) { gen.add_messages_path(locale_path+'/'+LEATHERMAN_LOCALE_INSTALL); } #else gen.add_messages_path(LEATHERMAN_LOCALE_INSTALL); #endif for (auto& path : paths) { gen.add_messages_path(path); } gen.add_messages_domain(domain); } // Ensure creating and adding a new locale is thread-safe. try { return g_locales.insert(make_pair(domain, gen(id))).first->second; } catch(boost::locale::conv::conversion_error &e) { return g_locales.insert(make_pair(domain, std::locale())).first->second; } } void clear_domain(string const& domain) { g_locales.erase(domain); } string translate(string const& msg, string const& domain) { try { return boost::locale::translate(msg).str(get_locale("", domain)); } catch (exception const&) { return msg; } } string translate_p(string const& context, string const& msg, string const& domain) { try { return boost::locale::translate(context, msg).str(get_locale("", domain)); } catch (exception const&) { return msg; } } string translate_n(string const& single, string const& plural, int n, string const& domain) { try { return boost::locale::translate(single, plural, n).str(get_locale("", domain)); } catch (exception const&) { return n == 1 ? single : plural; } } string translate_np(string const& context, string const& single, string const& plural, int n, string const& domain) { try { return boost::locale::translate(context, single, plural, n).str(get_locale("", domain)); } catch (exception const&) { return n == 1 ? single : plural; } } }} // namespace leatherman::locale leatherman-1.4.2+dfsg/locale/tests/000075500000000000000000000000001332360634000171545ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locale/tests/format.cc000064400000000000000000000071001332360634000207510ustar00rootroot00000000000000#ifdef LEATHERMAN_I18N #undef LEATHERMAN_I18N #endif #include #include using namespace std; using namespace leatherman::locale; SCENARIO("a format string", "[locale]") { auto literal = "requesting {1} item."; GIVEN("basic leatherman::locale::translate") { THEN("messages should not be translated") { REQUIRE(translate(literal) == literal); } THEN("messages with context should not be translated") { REQUIRE(translate_p("foo", literal) == literal); } } GIVEN("plural leatherman::locale::translate") { auto plural = "requesting {1} items."; THEN("1 item should be singular") { REQUIRE(translate_n(literal, plural, 1) == literal); } THEN("0 items should be plural") { REQUIRE(translate_n(literal, plural, 0) == plural); } THEN("2 items should be plural") { REQUIRE(translate_n(literal, plural, 2) == plural); } THEN("1 item with context should be singular") { REQUIRE(translate_np("foo", literal, plural, 1) == literal); } THEN("2 items with context should be plural") { REQUIRE(translate_np("foo", literal, plural, 2) == plural); } } GIVEN("leatherman::locale::format") { THEN("messages should perform substitution") { REQUIRE(format(literal, 1.25) == "requesting 1.25 item."); } THEN("messages with context should perform substitution") { REQUIRE(format_p("foo", literal, 1.25) == "requesting 1.25 item."); } /* * Apply same tests with *_(...) convenience functions */ THEN("messages should perform substitution") { REQUIRE(_(literal, 1.25) == "requesting 1.25 item."); } THEN("messages with context should perform substitution") { REQUIRE(p_("foo", literal, 1.25) == "requesting 1.25 item."); } } GIVEN("plural leatherman::locale::format") { auto plural = "requesting {1} items."; THEN("1 item should be singular") { REQUIRE(format_n(literal, plural, 1, 3.7) == "requesting 3.7 item."); } THEN("0 item should be plural") { REQUIRE(format_n(literal, plural, 0, 3.7) == "requesting 3.7 items."); } THEN("2 items should be plural") { REQUIRE(format_n(literal, plural, 2, 3.7) == "requesting 3.7 items."); } THEN("1 item with context should be singular") { REQUIRE(format_np("foo", literal, plural, 1, 3.7) == "requesting 3.7 item."); } THEN("2 items with context should be plural") { REQUIRE(format_np("foo", literal, plural, 2, 3.7) == "requesting 3.7 items."); } /* * Apply same tests with *_(...) convenience functions */ THEN("1 item should be singular") { REQUIRE(n_(literal, plural, 1, 3.7) == "requesting 3.7 item."); } THEN("0 item should be plural") { REQUIRE(n_(literal, plural, 0, 3.7) == "requesting 3.7 items."); } THEN("2 items should be plural") { REQUIRE(n_(literal, plural, 2, 3.7) == "requesting 3.7 items."); } THEN("1 item with context should be singular") { REQUIRE(np_("foo", literal, plural, 1, 3.7) == "requesting 3.7 item."); } THEN("2 items with context should be plural") { REQUIRE(np_("foo", literal, plural, 2, 3.7) == "requesting 3.7 items."); } } } leatherman-1.4.2+dfsg/locale/tests/locale.cc000064400000000000000000000172261332360634000207320ustar00rootroot00000000000000#ifndef LEATHERMAN_I18N #define LEATHERMAN_I18N #endif #undef PROJECT_NAME #define PROJECT_NAME "leatherman_locale" #include #include using namespace std; using namespace leatherman::locale; SCENARIO("a default locale", "[locale]") { GIVEN("basic leatherman::locale::translate") { THEN("messages should not be translated") { REQUIRE(translate("requesting {1,number}.") == "requesting {1,number}."); } THEN("messages with context should not be translated") { REQUIRE(translate_p("foo", "requesting {1,number}.") == "requesting {1,number}."); } } GIVEN("plural leatherman::locale::translate") { THEN("1 item should be singular") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 1) == "requesting {1,number} item."); } THEN("0 items should be plural") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 0) == "requesting {1,number} items."); } THEN("2 items should be plural") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 2) == "requesting {1,number} items."); } THEN("1 item with context should be singular") { REQUIRE(translate_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 1) == "requesting {1,number} item."); } THEN("2 items with context should be plural") { REQUIRE(translate_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 2) == "requesting {1,number} items."); } } GIVEN("leatherman::locale::format") { THEN("messages should not be translated") { REQUIRE(format("requesting {1,number}.", 1.25) == "requesting 1.25."); } } clear_domain(); } SCENARIO("a french locale", "[locale]") { auto loc = get_locale("fr.UTF-8"); GIVEN("basic leatherman::locale::translate") { THEN("messages should be translated") { REQUIRE(translate("requesting {1,number}.") == "demande {1,number}."); } THEN("messages with context should be translated") { REQUIRE(translate_p("foo", "requesting {1,number}.") == "demandé {1,number}."); } } GIVEN("plural leatherman::locale::translate") { THEN("1 item should be singular") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 1) == "demande {1,number} objet."); } THEN("0 items should be singular") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 0) == "demande {1,number} objet."); } THEN("2 items should be plural") { REQUIRE(translate_n("requesting {1,number} item.", "requesting {1,number} items.", 2) == "demande {1,number} objets."); } THEN("1 item with context should be singular") { REQUIRE(translate_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 1) == "demandé {1,number} objet."); } THEN("2 items with context should be plural") { REQUIRE(translate_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 2) == "demandé {1,number} objets."); } } GIVEN("leatherman::locale::format") { THEN("messages should be translated") { auto formatted = format("requesting {1,number}.", 1.25); // This doesn't seem to be treated consistently anywhere. Leave it as // flexible until we can resolve why. CAPTURE(formatted); REQUIRE((formatted == "demande 1.25." || formatted == "demande 1,25.")); } THEN("messages with context should be translated") { auto formatted = format_p("foo", "requesting {1,number}.", 1.25); CAPTURE(formatted); REQUIRE((formatted == "demandé 1.25." || formatted == "demandé 1,25.")); } /* * Apply same tests with *_(...) convenience functions */ THEN("messages should be translated") { auto formatted = _("requesting {1,number}.", 1.25); CAPTURE(formatted); REQUIRE((formatted == "demande 1.25." || formatted == "demande 1,25.")); } THEN("messages with context should be translated") { auto formatted = p_("foo", "requesting {1,number}.", 1.25); CAPTURE(formatted); REQUIRE((formatted == "demandé 1.25." || formatted == "demandé 1,25.")); } } GIVEN("plural leatherman::locale::format") { THEN("1 item should be singular") { auto formatted = format_n("requesting {1,number} item.", "requesting {1,number} items.", 1, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objet." || formatted == "demande 3,7 objet.")); } THEN("0 items should be singular") { auto formatted = format_n("requesting {1,number} item.", "requesting {1,number} items.", 0, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objet." || formatted == "demande 3,7 objet.")); } THEN("2 items should be plural") { auto formatted = format_n("requesting {1,number} item.", "requesting {1,number} items.", 2, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objets." || formatted == "demande 3,7 objets.")); } THEN("1 item with context should be singular") { auto formatted = format_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 1, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demandé 3.7 objet." || formatted == "demandé 3,7 objet.")); } THEN("2 items with context should be plural") { auto formatted = format_np("foo", "requesting {1,number} item.", "requesting {1,number} items.", 2, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demandé 3.7 objets." || formatted == "demandé 3,7 objets.")); } /* * Apply same tests with *_(...) convenience functions */ THEN("1 item should be singular") { auto formatted = n_("requesting {1,number} item.", "requesting {1,number} items.", 1, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objet." || formatted == "demande 3,7 objet.")); } THEN("0 items should be singular") { auto formatted = n_("requesting {1,number} item.", "requesting {1,number} items.", 0, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objet." || formatted == "demande 3,7 objet.")); } THEN("2 items should be plural") { auto formatted = n_("requesting {1,number} item.", "requesting {1,number} items.", 2, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demande 3.7 objets." || formatted == "demande 3,7 objets.")); } THEN("1 item with context should be singular") { auto formatted = np_("foo", "requesting {1,number} item.", "requesting {1,number} items.", 1, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demandé 3.7 objet." || formatted == "demandé 3,7 objet.")); } THEN("2 items with context should be plural") { auto formatted = np_("foo", "requesting {1,number} item.", "requesting {1,number} items.", 2, 3.7); CAPTURE(formatted); REQUIRE((formatted == "demandé 3.7 objets." || formatted == "demandé 3,7 objets.")); } } clear_domain(); } leatherman-1.4.2+dfsg/locales/000075500000000000000000000000001332360634000161755ustar00rootroot00000000000000leatherman-1.4.2+dfsg/locales/CMakeLists.txt000064400000000000000000000003771332360634000207440ustar00rootroot00000000000000if (LEATHERMAN_TOPLEVEL) gettext_templates(${CMAKE_CURRENT_SOURCE_DIR} ${ALL_LEATHERMAN_SOURCES}) endif() gettext_compile(${CMAKE_CURRENT_SOURCE_DIR} share/locale) SET_DIRECTORY_PROPERTIES(PROPERTIES CLEAN_NO_CUSTOM TRUE) export_var(GETTEXT_ENABLED) leatherman-1.4.2+dfsg/locales/leatherman.pot000064400000000000000000000362421332360634000210500ustar00rootroot00000000000000# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR Puppet # This file is distributed under the same license as the leatherman package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: leatherman 1.5.0\n" "Report-Msgid-Bugs-To: docs@puppet.com\n" "POT-Creation-Date: \n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #: curl/inc/leatherman/curl/client.hpp msgid "Failed setting up libcurl. Reason: {1}" msgstr "" #: curl/src/client.cc msgid "curl_easy_escape failed to escape string." msgstr "" #: curl/src/client.cc msgid "File operation error: {1}" msgstr "" #: curl/src/client.cc msgid "failed to open temporary file for writing" msgstr "" #: curl/src/client.cc msgid "failed to modify permissions of temporary file" msgstr "" #. debug #: curl/src/client.cc msgid "Download completed, now writing result to file {1}" msgstr "" #. warning #: curl/src/client.cc msgid "" "Failed to write the results of the temporary file to the actual file {1}" msgstr "" #: curl/src/client.cc msgid "failed to move over the temporary file's downloaded contents" msgstr "" #. debug #: curl/src/client.cc msgid "Writing the temp file's contents to the response body" msgstr "" #. warning #: curl/src/client.cc msgid "" "Failed to write the contents of the temporary file to the response body." msgstr "" #: curl/src/client.cc msgid "failed to write the temporary file's contents to the response body" msgstr "" #. warning #: curl/src/client.cc msgid "Failed to properly clean-up the temporary file {1}" msgstr "" #: curl/src/client.cc msgid "failed to create cURL handle." msgstr "" #. debug #: curl/src/client.cc msgid "request completed (status {1})." msgstr "" #: curl/src/client.cc msgid "failed to write to the temporary file during download" msgstr "" #: curl/src/client.cc msgid "File download server side error: {1}" msgstr "" #: curl/src/client.cc msgid "unexpected HTTP method specified." msgstr "" #. debug #: curl/src/client.cc msgid "requesting {1}." msgstr "" #. warning #: curl/src/client.cc msgid "unexpected HTTP response header: {1}." msgstr "" #. debug #: dynamic_library/src/posix/dynamic_library.cc msgid "library {1} not found {2} ({3})." msgstr "" #. debug #: dynamic_library/src/posix/dynamic_library.cc #: dynamic_library/src/windows/dynamic_library.cc msgid "library {1} is not loaded when attempting to load symbol {2}." msgstr "" #. debug #: dynamic_library/src/posix/dynamic_library.cc #: dynamic_library/src/windows/dynamic_library.cc msgid "symbol {1} not found in library {2}, trying alias {3}." msgstr "" #: dynamic_library/src/posix/dynamic_library.cc #: dynamic_library/src/windows/dynamic_library.cc msgid "symbol {1} was not found in {2}." msgstr "" #. debug #: dynamic_library/src/posix/dynamic_library.cc #: dynamic_library/src/windows/dynamic_library.cc msgid "symbol {1} not found in library {2}." msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "" "library matching pattern {1} not found, CreateToolhelp32Snapshot failed: {2}." msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "library matching pattern {1} not found, Module32First failed: {2}." msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "library {1} found from pattern {2}" msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "" "library {1} found from pattern {2}, but unloaded before handle was acquired" msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "no loaded libraries found matching pattern {1}" msgstr "" #. debug #: dynamic_library/src/windows/dynamic_library.cc msgid "library {1} not found {2}." msgstr "" #: dynamic_library/src/windows/dynamic_library.cc msgid "library is not loaded" msgstr "" #. debug #: execution/src/execution.cc msgid "executing command: {1}" msgstr "" #: execution/src/execution.cc msgid "failed to open output file {1}" msgstr "" #: execution/src/execution.cc msgid "failed to modify permissions on output file {1} to {2,num,oct}: {3}" msgstr "" #: execution/src/execution.cc msgid "failed to open error file {1}" msgstr "" #: execution/src/execution.cc msgid "failed to modify permissions on error file {1} to {2,num,oct}: {3}" msgstr "" #. debug #: execution/src/execution.cc msgid "completed processing output: closing child pipes." msgstr "" #: execution/src/posix/execution.cc #: windows/src/system_error.cc msgid "{1} ({2})" msgstr "" #: execution/src/posix/execution.cc msgid "{1}: {2} ({3})." msgstr "" #: execution/src/posix/execution.cc msgid "select call failed waiting for child i/o" msgstr "" #. debug #: execution/src/posix/execution.cc msgid "select call was interrupted and will be retried." msgstr "" #. debug #: execution/src/posix/execution.cc msgid "{1} pipe i/o was closed early, process may have ignored input." msgstr "" #. debug #: execution/src/posix/execution.cc msgid "{1} pipe i/o was interrupted and will be retried." msgstr "" #: execution/src/posix/execution.cc msgid "{1} pipe i/o failed: {2}" msgstr "" #: execution/src/posix/execution.cc #: execution/src/windows/execution.cc msgid "command timed out after {1} seconds." msgstr "" #: execution/src/posix/execution.cc msgid "{1}={2}" msgstr "" #. debug #: execution/src/posix/execution.cc #: execution/src/windows/execution.cc msgid "{1} was not found on the PATH." msgstr "" #: execution/src/posix/execution.cc #: execution/src/windows/execution.cc msgid "child process returned non-zero exit status." msgstr "" #: execution/src/posix/execution.cc msgid "failed to allocate pipe for stdin redirection" msgstr "" #: execution/src/posix/execution.cc msgid "failed to allocate pipe for stdout redirection" msgstr "" #: execution/src/posix/execution.cc msgid "failed to allocate pipe for stderr redirection" msgstr "" #: execution/src/posix/execution.cc msgid "waitpid failed" msgstr "" #: execution/src/posix/execution.cc msgid "sigaction failed while setting up timeout" msgstr "" #: execution/src/posix/execution.cc msgid "setitimer failed while setting up timeout" msgstr "" #. debug #: execution/src/posix/execution.cc msgid "process was signaled with signal {1}." msgstr "" #. debug #: execution/src/posix/execution.cc msgid "process exited with status code {1}." msgstr "" #: execution/src/posix/execution.cc msgid "child process returned non-zero exit status ({1})." msgstr "" #: execution/src/posix/execution.cc msgid "child process was terminated by signal ({1})." msgstr "" #: execution/src/posix/generic/platform.cc #: execution/src/posix/solaris/platform.cc msgid "failed to fork child process" msgstr "" #: execution/src/posix/solaris/platform.cc msgid "failed to create process contract template" msgstr "" #: execution/src/posix/solaris/platform.cc msgid "failed to lookup the latest child process contract" msgstr "" #: execution/src/posix/solaris/platform.cc msgid "failed to abandon contract created for a child process" msgstr "" #: execution/src/posix/solaris/platform.cc msgid "failed to deactivate contract template created for a child process" msgstr "" #: execution/src/windows/execution.cc msgid "\\\\.\\Pipe\\leatherman.{1}.{2}.{3}.{4}" msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create read pipe: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create read pipe." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create write pipe: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create write pipe." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create {1} read event: {2}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create read event." msgstr "" #. error #: execution/src/windows/execution.cc msgid "{1} pipe i/o failed: {2}." msgstr "" #: execution/src/windows/execution.cc msgid "child i/o failed." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to wait for child process i/o: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to wait for child process i/o." msgstr "" #. error #: execution/src/windows/execution.cc msgid "asynchronous i/o on {1} failed: {2}." msgstr "" #: execution/src/windows/execution.cc msgid "asynchronous i/o failed." msgstr "" #: execution/src/windows/execution.cc msgid "could not determine if the parent process is running in a job object" msgstr "" #. debug #: execution/src/windows/execution.cc msgid "child environment {1}={2}" msgstr "" #: execution/src/windows/execution.cc msgid "pipe could not be modified" msgstr "" #: execution/src/windows/execution.cc msgid "cannot open NUL device for redirecting stderr." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create process: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create child process." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create job object: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create job object." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to associate process with job object: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to associate process with job object." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to terminate process: {1}." msgstr "" #. warning #: execution/src/windows/execution.cc msgid "could not terminate process {1} because a job object could not be used." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to create waitable timer: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to create waitable timer." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to set waitable timer: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to set waitable timer." msgstr "" #. error #: execution/src/windows/execution.cc msgid "failed to wait for child process to terminate: {1}." msgstr "" #: execution/src/windows/execution.cc msgid "failed to wait for child process to terminate." msgstr "" #: execution/src/windows/execution.cc msgid "error retrieving exit code of completed process" msgstr "" #. debug #: execution/src/windows/execution.cc msgid "process exited with exit code {1}." msgstr "" #. warning #: file_util/src/file.cc msgid "file path is an empty string" msgstr "" #. debug #: file_util/src/file.cc msgid "Error reading file: {1}" msgstr "" #: file_util/src/file.cc msgid "failed to open {1}" msgstr "" #. warning #: file_util/src/file.cc msgid "{1} has not been set" msgstr "" #: json_container/inc/leatherman/json_container/json_container.hpp #: json_container/src/json_container.cc msgid "not an object" msgstr "" #: json_container/inc/leatherman/json_container/json_container.hpp msgid "root is not a valid JSON object" msgstr "" #: json_container/inc/leatherman/json_container/json_container.hpp msgid "invalid key supplied; cannot navigate the provided path" msgstr "" #: json_container/src/json_container.cc msgid "invalid json" msgstr "" #: json_container/src/json_container.cc msgid "unknown object entry with key: {1}" msgstr "" #: json_container/src/json_container.cc msgid "not an array" msgstr "" #: json_container/src/json_container.cc msgid "array index out of bounds" msgstr "" #: json_container/src/json_container.cc msgid "not an integer" msgstr "" #: json_container/src/json_container.cc msgid "not a boolean" msgstr "" #: json_container/src/json_container.cc msgid "not a string" msgstr "" #: json_container/src/json_container.cc msgid "not a double" msgstr "" #: logging/src/logging.cc msgid "" "invalid log level '{1}': expected none, trace, debug, info, warn, error, or " "fatal." msgstr "" #: ruby/src/api.cc msgid "could not locate a ruby library" msgstr "" #. info #: ruby/src/api.cc msgid "ruby loaded from \"{1}\"." msgstr "" #. info #: ruby/src/api.cc msgid "ruby was already loaded." msgstr "" #. info #: ruby/src/api.cc msgid "using ruby version {1}" msgstr "" #: ruby/src/api.cc msgid "size_t maximum exceeded, requested size was {1}" msgstr "" #: ruby/src/api.cc msgid "maximum array size exceeded, reported size was {1}" msgstr "" #. warning #: ruby/src/api.cc msgid "preferred ruby library \"{1}\" could not be loaded." msgstr "" #. warning #: ruby/src/api.cc msgid "ruby library \"{1}\" could not be loaded." msgstr "" #. debug #: ruby/src/api.cc msgid "ruby could not be found on the PATH." msgstr "" #. debug #: ruby/src/api.cc msgid "ruby was found at \"{1}\"." msgstr "" #. warning #: ruby/src/api.cc msgid "ruby failed to run: {1}" msgstr "" #. debug #: ruby/src/api.cc msgid "" "ruby library \"{1}\" was not found: ensure ruby was built with the --enable-" "shared configuration option." msgstr "" #: windows/src/file_util.cc msgid "error finding FOLDERID_ProgramData: {1}" msgstr "" #. debug #: windows/src/process.cc #: windows/src/user.cc msgid "OpenProcessToken call failed: {1}" msgstr "" #. debug #: windows/src/process.cc msgid "GetTokenInformation call failed: {1}" msgstr "" #: windows/src/registry.cc msgid "invalid HKEY specified" msgstr "" #: windows/src/registry.cc msgid "error reading registry key {1} {2}: {3}" msgstr "" #: windows/src/system_error.cc msgid "unknown error ({1})" msgstr "" #. debug #: windows/src/user.cc msgid "Failed to create administrators SID: {1}" msgstr "" #. debug #: windows/src/user.cc msgid "Invalid SID" msgstr "" #. debug #: windows/src/user.cc msgid "Failed to check membership: {1}" msgstr "" #. debug #: windows/src/user.cc msgid "GetUserProfileDirectoryW call returned unexpectedly" msgstr "" #. debug #: windows/src/user.cc msgid "GetUserProfileDirectoryW call failed: {1}" msgstr "" #. LOCALE: format a pointer as hex for printing an error message. #: windows/src/wmi.cc msgid "{1} (0x{2,num=hex})" msgstr "" #: windows/src/wmi.cc msgid "%1% (0x%2$#x)" msgstr "" #. debug #: windows/src/wmi.cc msgid "initializing WMI" msgstr "" #. debug #: windows/src/wmi.cc msgid "using prior COM concurrency model" msgstr "" #: windows/src/wmi.cc msgid "failed to initialize COM library" msgstr "" #. debug #: windows/src/wmi.cc msgid "COM single-threaded apartment not supported, using multi-threaded" msgstr "" #: windows/src/wmi.cc msgid "failed to create IWbemLocator object" msgstr "" #: windows/src/wmi.cc msgid "could not connect to WMI server" msgstr "" #: windows/src/wmi.cc msgid "could not set proxy blanket" msgstr "" #. debug #: windows/src/wmi.cc msgid "ignoring {1}-dimensional array in query {2}.{3}" msgstr "" #. debug #: windows/src/wmi.cc msgid "" "WMI query {1}.{2} result could not be converted from type {3} to a string" msgstr "" #. debug #: windows/src/wmi.cc msgid "query {1} failed" msgstr "" #. debug #: windows/src/wmi.cc msgid "query {1}.{2} could not be found" msgstr "" #. debug #: windows/src/wmi.cc msgid "only single value requested from array for key {1}" msgstr "" #. debug #: windows/src/wmi.cc msgid "only single entry requested from array of entries for key {1}" msgstr "" #: windows/src/wmi.cc msgid "unable to get from empty array of objects" msgstr "" #: windows/src/wmi.cc msgid "unable to get_range from empty array of objects" msgstr "" leatherman-1.4.2+dfsg/logging/000075500000000000000000000000001332360634000162015ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/CMakeLists.txt000064400000000000000000000023671332360634000207510ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS log log_setup thread date_time filesystem system chrono regex) find_package(Threads) add_leatherman_deps(${Boost_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(nowide) leatherman_dependency(locale) if (CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "SunOS") add_leatherman_deps(rt) endif() if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.logging") leatherman_logging_line_numbers() endif() if(WIN32) set(PLATFORM_SRCS "src/windows/logging.cc") set(PLATFORM_TEST_SRCS "tests/windows/logging.cc") else() set(PLATFORM_SRCS "src/posix/logging.cc") set(PLATFORM_TEST_SRCS "tests/posix/logging.cc") endif() if (LEATHERMAN_USE_LOCALES AND GETTEXT_ENABLED) list(APPEND PLATFORM_TEST_SRCS tests/logging_i18n.cc) endif() add_leatherman_library(src/logging.cc ${PLATFORM_SRCS}) add_leatherman_test( tests/logging.cc tests/logging_stream.cc tests/logging_stream_lines.cc tests/logging_on_message.cc ${PLATFORM_TEST_SRCS}) add_leatherman_headers(inc/leatherman) if (LEATHERMAN_USE_LOCALES AND BUILDING_LEATHERMAN) project(leatherman_logging) add_subdirectory(locales) endif() leatherman-1.4.2+dfsg/logging/inc/000075500000000000000000000000001332360634000167525ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/inc/leatherman/000075500000000000000000000000001332360634000210725ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/inc/leatherman/logging/000075500000000000000000000000001332360634000225205ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/inc/leatherman/logging/logging.hpp000064400000000000000000000230551332360634000246640ustar00rootroot00000000000000/** * @file * Declares the logging functions and macros. */ #pragma once // To use this header, you must: // - Have Boost on the include path // - Link in Boost.Log // - Configure Boost.Log at runtime before any logging takes place /** * See Boost.Log's documentation. */ #include #include #include #include #include /** * Defines the logging namespace. */ #ifndef LEATHERMAN_LOGGING_NAMESPACE #error "LEATHERMAN_LOGGING_NAMESPACE must be set. This is typically done via CMake." #else #define LOG_NAMESPACE LEATHERMAN_LOGGING_NAMESPACE #endif /** * Logs a message. * @param level The logging level for the message. * @param line_num The source line number of the logging call. * @param format The format message. * @param ... The format message parameters. */ #ifdef LEATHERMAN_LOGGING_LINE_NUMBERS #define LOG_MESSAGE(level, line_num, format, ...) \ if (leatherman::logging::is_enabled(level)) { \ leatherman::logging::log(LOG_NAMESPACE, level, line_num, format, ##__VA_ARGS__); \ } #else #define LOG_MESSAGE(level, line_num, format, ...) \ if (leatherman::logging::is_enabled(level)) { \ leatherman::logging::log(LOG_NAMESPACE, level, 0, format, ##__VA_ARGS__); \ } #endif /** * Logs a trace message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_TRACE(format, ...) LOG_MESSAGE(leatherman::logging::log_level::trace, __LINE__, format, ##__VA_ARGS__) /** * Logs a debug message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_DEBUG(format, ...) LOG_MESSAGE(leatherman::logging::log_level::debug, __LINE__, format, ##__VA_ARGS__) /** * Logs an info message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_INFO(format, ...) LOG_MESSAGE(leatherman::logging::log_level::info, __LINE__, format, ##__VA_ARGS__) /** * Logs a warning message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_WARNING(format, ...) LOG_MESSAGE(leatherman::logging::log_level::warning, __LINE__, format, ##__VA_ARGS__) /** * Logs an error message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_ERROR(format, ...) LOG_MESSAGE(leatherman::logging::log_level::error, __LINE__, format, ##__VA_ARGS__) /** * Logs a fatal message. * @param format The format message. * @param ... The format message parameters. */ #define LOG_FATAL(format, ...) LOG_MESSAGE(leatherman::logging::log_level::fatal, __LINE__, format, ##__VA_ARGS__) /** * Determines if the trace logging level is enabled. * @returns Returns true if trace logging is enabled or false if it is not enabled. */ #define LOG_IS_TRACE_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::trace) /** * Determines if the debug logging level is enabled. * @returns Returns true if debug logging is enabled or false if it is not enabled. */ #define LOG_IS_DEBUG_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::debug) /** * Determines if the info logging level is enabled. * @returns Returns true if info logging is enabled or false if it is not enabled. */ #define LOG_IS_INFO_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::info) /** * Determines if the warning logging level is enabled. * @returns Returns true if warning logging is enabled or false if it is not enabled. */ #define LOG_IS_WARNING_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::warning) /** * Determines if the error logging level is enabled. * @returns Returns true if error logging is enabled or false if it is not enabled. */ #define LOG_IS_ERROR_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::error) /** * Determines if the fatal logging level is enabled. * @returns Returns true if fatal logging is enabled or false if it is not enabled. */ #define LOG_IS_FATAL_ENABLED() leatherman::logging::is_enabled(leatherman::logging::log_level::fatal) namespace leatherman { namespace logging { /** * Represents the supported logging levels. */ enum class log_level { none, trace, debug, info, warning, error, fatal }; /** * Reads a log level from an input stream. * This is used in boost::lexical_cast. * @param in The input stream. * @param level The returned log level. * @returns Returns the input stream. */ std::istream& operator>>(std::istream& in, log_level& level); /** * Produces the printed representation of logging level. * @param strm The stream to write. * @param level The logging level to print. * @return Returns the stream after writing to it. */ std::ostream& operator<<(std::ostream& strm, log_level level); /** * Sets up logging for the given stream. * The logging level is set to warning by default. * @param dst Destination stream for logging output. * @param locale The locale identifier to use for logging. * @param domain The catalog domain to use for i18n via gettext. * @param use_locale Whether to use locales in logging setup. If locales are disabled this parameter is ignored. */ void setup_logging(std::ostream &dst, std::string locale = "", std::string domain = PROJECT_NAME, bool use_locale = true); /** * Sets the current log level. * @param level The new current log level to set. */ void set_level(log_level level); /** * Gets the current log level. * @return Returns the current log level. */ log_level get_level(); /** * Sets whether or not log output is colorized. * @param color Pass true if log output is colorized or false if it is not colorized. */ void set_colorization(bool color); /** * Gets whether or not the log output is colorized. * @return Returns true if log output is colorized or false if it is not colorized. */ bool get_colorization(); /** * Provides a callback for when a message is logged. * If the callback returns false, the message will not be logged. * @param callback The callback to call when a message is about to be logged. */ void on_message(std::function callback); /** * Determines if the given log level is enabled for the given logger. * @param level The logging level to check. * @return Returns true if the logging level is enabled or false if it is not. */ bool is_enabled(log_level level); /** * Determine if an error has been logged * @return Returns true if an error or critical message has been logged */ bool error_has_been_logged(); /** * Clear the flag that indicates an error has been logged. * This is necessary for testing the flagging functionality. This function should * not be used by library consumers. */ void clear_error_logged_flag(); /** * Logs a given message to the given logger with the specified line number (if > 0). * Does no translation on the message. * @param logger The logger to log the message to. * @param level The logging level to log with. * @param line_num The source line number of the logging call. * @param message The message to log. */ void log_helper(const std::string &logger, log_level level, int line_num, std::string const& message); /** * Logs a given message to the given logger with the specified line number (if > 0). * If LEATHERMAN_I18N is specified it does translation on the message. * @param logger The logger to log to. * @param level The logging level to log with. * @param line_num The source line number of the logging call. * @param msg The message format. */ static inline void log(const std::string &logger, log_level level, int line_num, std::string const& msg) { log_helper(logger, level, line_num, leatherman::locale::translate(msg)); } /** * Logs a given format message to the given logger with the specified line number (if > 0). * If LEATHERMAN_I18N is specified, does translation on the format string, but not following arguments. * @tparam TArgs The types of the arguments to format the message with. * @param logger The logger to log to. * @param level The logging level to log with. * @param line_num The source line number of the logging call. * @param format The message format. * @param args The remaining arguments to the message. */ template static void log(const std::string &logger, log_level level, int line_num, std::string const& fmt, TArgs... args) { log_helper(logger, level, line_num, leatherman::locale::format(fmt, std::forward(args)...)); } /** * Starts colorizing for the given log level. * This is a no-op on platforms that don't natively support terminal colors. * @param dst The stream to colorize. * @param level The log level to colorize for. Defaults to none, which resets colorization. */ void colorize(std::ostream &dst, log_level level = log_level::none); /** * Returns whether terminal colors are supported. * @param dst The stream to check. * @return True if terminal colors are supported for the specified stream on this platform, else false. */ bool color_supported(std::ostream &dst); }} // namespace leatherman::logging leatherman-1.4.2+dfsg/logging/locales/000075500000000000000000000000001332360634000176235ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/locales/CMakeLists.txt000064400000000000000000000003761332360634000223710ustar00rootroot00000000000000if (LEATHERMAN_TOPLEVEL) gettext_templates(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/../tests/logging_i18n.cc) endif() gettext_compile(${CMAKE_CURRENT_SOURCE_DIR} share/locale) SET_DIRECTORY_PROPERTIES(PROPERTIES CLEAN_NO_CUSTOM TRUE) leatherman-1.4.2+dfsg/logging/locales/fr.po000064400000000000000000000035141332360634000205750ustar00rootroot00000000000000# French translations for leatherman_logging package. # Copyright (C) 2016 Puppet # This file is distributed under the same license as the leatherman_logging package. # Automatically generated, 2016. # msgid "" msgstr "" "Project-Id-Version: leatherman_logging \n" "Report-Msgid-Bugs-To: docs@puppet.com\n" "POT-Creation-Date: \n" "PO-Revision-Date: \n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #. debug #: logging/locales/../tests/logging_i18n.cc msgid "debug logging" msgstr "l'enregistrement de débogage" #. debug #: logging/locales/../tests/logging_i18n.cc msgid "debug logging is {1}" msgstr "l'enregistrement de débogage est {1}" #. info #: logging/locales/../tests/logging_i18n.cc msgid "info logging" msgstr "info exploitation forestière" #. info #: logging/locales/../tests/logging_i18n.cc msgid "info logging is {1}" msgstr "info exploitation forestière est {1}" #. warning #: logging/locales/../tests/logging_i18n.cc msgid "warning logging" msgstr "journalisation d'avertissement" #. warning #: logging/locales/../tests/logging_i18n.cc msgid "warning logging is {1}" msgstr "journalisation d'avertissement est {1}" #. error #: logging/locales/../tests/logging_i18n.cc msgid "error message" msgstr "message d'erreur" #. error #: logging/locales/../tests/logging_i18n.cc msgid "error message is {1}" msgstr "un message d'erreur est {1}" #. fatal #: logging/locales/../tests/logging_i18n.cc msgid "fatal message" msgstr "un message fatal" #. fatal #: logging/locales/../tests/logging_i18n.cc msgid "fatal message is {1}" msgstr "un message fatal est {1}" #: logging/locales/../tests/logging_i18n.cc msgid "™❄Λ" msgstr "Λ❄™" leatherman-1.4.2+dfsg/logging/locales/leatherman_logging.pot000064400000000000000000000030221332360634000241720ustar00rootroot00000000000000# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR Puppet # This file is distributed under the same license as the leatherman_logging package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: leatherman_logging \n" "Report-Msgid-Bugs-To: docs@puppet.com\n" "POT-Creation-Date: \n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. debug #: logging/locales/../tests/logging_i18n.cc msgid "debug logging" msgstr "" #. debug #: logging/locales/../tests/logging_i18n.cc msgid "debug logging is {1}" msgstr "" #. info #: logging/locales/../tests/logging_i18n.cc msgid "info logging" msgstr "" #. info #: logging/locales/../tests/logging_i18n.cc msgid "info logging is {1}" msgstr "" #. warning #: logging/locales/../tests/logging_i18n.cc msgid "warning logging" msgstr "" #. warning #: logging/locales/../tests/logging_i18n.cc msgid "warning logging is {1}" msgstr "" #. error #: logging/locales/../tests/logging_i18n.cc msgid "error message" msgstr "" #. error #: logging/locales/../tests/logging_i18n.cc msgid "error message is {1}" msgstr "" #. fatal #: logging/locales/../tests/logging_i18n.cc msgid "fatal message" msgstr "" #. fatal #: logging/locales/../tests/logging_i18n.cc msgid "fatal message is {1}" msgstr "" #: logging/locales/../tests/logging_i18n.cc msgid "™❄Λ" msgstr "" leatherman-1.4.2+dfsg/logging/src/000075500000000000000000000000001332360634000167705ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/src/logging.cc000064400000000000000000000161011332360634000207240ustar00rootroot00000000000000#include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; // boost includes are not always warning-clean. Disable warnings that // cause problems before including the headers, then re-enable the warnings. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wextra" #include #include #include #include #include #include #include #include #include #pragma GCC diagnostic pop using namespace std; namespace expr = boost::log::expressions; namespace src = boost::log::sources; namespace attrs = boost::log::attributes; namespace keywords = boost::log::keywords; namespace sinks = boost::log::sinks; namespace leatherman { namespace logging { static function g_callback; static log_level g_level = log_level::none; static bool g_colorize = false; static bool g_error_logged = false; namespace lth_locale = leatherman::locale; class color_writer : public sinks::basic_sink_backend { public: color_writer(ostream *dst); void consume(boost::log::record_view const& rec); private: ostream &_dst; }; color_writer::color_writer(ostream *dst) : _dst(*dst) {} void color_writer::consume(boost::log::record_view const& rec) { auto level = boost::log::extract("Severity", rec); if (!is_enabled(*level)) { return; } auto line_num = boost::log::extract("LineNum", rec); auto name_space = boost::log::extract("Namespace", rec); auto timestamp = boost::log::extract("TimeStamp", rec); auto message = rec[expr::smessage]; _dst << boost::gregorian::to_iso_extended_string(timestamp->date()); _dst << " " << boost::posix_time::to_simple_string(timestamp->time_of_day()); _dst << " " << left << setfill(' ') << setw(5) << level << " " << *name_space; if (line_num) { _dst << ":" << *line_num; } _dst << " - "; colorize(_dst, *level); _dst << *message; colorize(_dst); _dst << endl; } // cppcheck-suppress passedByValue void setup_logging(ostream &dst, string locale, string domain, bool use_locale) { // Remove existing sinks before adding a new one auto core = boost::log::core::get(); core->remove_all_sinks(); using sink_t = sinks::synchronous_sink; boost::shared_ptr sink = boost::make_shared(boost::make_shared(&dst)); core->add_sink(sink); #ifdef LEATHERMAN_USE_LOCALES // Imbue the logging sink with the requested locale. // Locale in GCC is busted on Solaris, so skip it. // TODO: Imbue may not be useful, as setup_logging can be called multiple times // with different domains for the same ostream. // Note that this creates a locale that's not usable for testing, as it // only includes paths for install locations. This is intentional, to avoid leaving // searching paths that have unknown permissions. if (use_locale) { dst.imbue(lth_locale::get_locale(locale, domain, {})); } #endif boost::log::add_common_attributes(); // Default to the warning level set_level(log_level::warning); // Set whether or not to use colorization depending if the destination is a tty g_colorize = color_supported(dst); } // This version exists for binary compatibility only. void setup_logging(ostream &dst, string locale, string domain) { setup_logging(dst, move(locale), move(domain), true); } void set_level(log_level level) { auto core = boost::log::core::get(); core->set_logging_enabled(level != log_level::none); g_level = level; } log_level get_level() { return g_level; } void set_colorization(bool color) { g_colorize = color; } bool get_colorization() { return g_colorize; } bool is_enabled(log_level level) { return g_level != log_level::none && static_cast(level) >= static_cast(g_level); } bool error_has_been_logged() { return g_error_logged; } void clear_error_logged_flag() { g_error_logged = false; } // cppcheck-suppress passedByValue void on_message(function callback) { g_callback = callback; } void log_helper(const string &logger, log_level level, int line_num, string const& message) { if (level >= log_level::error) { g_error_logged = true; } if (!is_enabled(level) || (g_callback && !g_callback(level, message))) { return; } src::logger slg; slg.add_attribute("Severity", attrs::constant(level)); slg.add_attribute("Namespace", attrs::constant(logger)); if (line_num > 0) { slg.add_attribute("LineNum", attrs::constant(line_num)); } BOOST_LOG(slg) << message; } istream& operator>>(istream& in, log_level& level) { string value; if (in >> value) { boost::algorithm::to_lower(value); if (value == "none") { level = log_level::none; return in; } if (value == "trace") { level = log_level::trace; return in; } if (value == "debug") { level = log_level::debug; return in; } if (value == "info") { level = log_level::info; return in; } if (value == "warn") { level = log_level::warning; return in; } if (value == "error") { level = log_level::error; return in; } if (value == "fatal") { level = log_level::fatal; return in; } } throw runtime_error(_("invalid log level '{1}': expected none, trace, debug, info, warn, error, or fatal.", value)); } ostream& operator<<(ostream& strm, log_level level) { static const vector strings = {"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL"}; if (level != log_level::none) { size_t index = static_cast(level) - 1; if (index < strings.size()) { strm << strings[index]; } } return strm; } }} // namespace leatherman::logging leatherman-1.4.2+dfsg/logging/src/posix/000075500000000000000000000000001332360634000201325ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/src/posix/logging.cc000064400000000000000000000021461332360634000220720ustar00rootroot00000000000000#include #include #include using namespace std; namespace leatherman { namespace logging { void colorize(ostream& dst, log_level level) { if (!get_colorization()) { return; } static const string cyan = "\33[0;36m"; static const string green = "\33[0;32m"; static const string yellow = "\33[0;33m"; static const string red = "\33[0;31m"; static const string reset = "\33[0m"; if (level == log_level::trace || level == log_level::debug) { dst << cyan; } else if (level == log_level::info) { dst << green; } else if (level == log_level::warning) { dst << yellow; } else if (level == log_level::error || level == log_level::fatal) { dst << red; } else { dst << reset; } } bool color_supported(ostream& dst) { return (&dst == &cout && isatty(fileno(stdout))) || (&dst == &cerr && isatty(fileno(stderr))); } }} // namespace leatherman::logging leatherman-1.4.2+dfsg/logging/src/windows/000075500000000000000000000000001332360634000204625ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/src/windows/logging.cc000064400000000000000000000035171332360634000224250ustar00rootroot00000000000000#include #include #include using namespace std; namespace leatherman { namespace logging { static HANDLE stdHandle; static WORD originalAttributes; void colorize(ostream& dst, log_level level) { if (!get_colorization()) { return; } // The ostream may have buffered data, and changing the console color will affect any buffered data written // later. Ensure the buffer is flushed before changing the console color. dst.flush(); if (level == log_level::trace || level == log_level::debug) { SetConsoleTextAttribute(stdHandle, FOREGROUND_BLUE | FOREGROUND_GREEN); } else if (level == log_level::info) { SetConsoleTextAttribute(stdHandle, FOREGROUND_GREEN); } else if (level == log_level::warning) { SetConsoleTextAttribute(stdHandle, FOREGROUND_RED | FOREGROUND_GREEN); } else if (level == log_level::error || level == log_level::fatal) { SetConsoleTextAttribute(stdHandle, FOREGROUND_RED); } else { SetConsoleTextAttribute(stdHandle, originalAttributes); } } bool color_supported(ostream& dst) { bool colorize = false; if (&dst == &cout || &dst == &boost::nowide::cout) { stdHandle = GetStdHandle(STD_OUTPUT_HANDLE); colorize = true; } else if (&dst == &cerr || &dst == &boost::nowide::cerr) { stdHandle = GetStdHandle(STD_ERROR_HANDLE); colorize = true; } if (colorize) { CONSOLE_SCREEN_BUFFER_INFO csbiInfo; GetConsoleScreenBufferInfo(stdHandle, &csbiInfo); originalAttributes = csbiInfo.wAttributes; } return colorize; } }} // namespace leatherman::logging leatherman-1.4.2+dfsg/logging/tests/000075500000000000000000000000001332360634000173435ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/tests/logging.cc000064400000000000000000000064531332360634000213100ustar00rootroot00000000000000#include "logging.hpp" #include #include #include namespace leatherman { namespace test { static bool all_spaces(string const& s) { return boost::algorithm::all(s, [](char c) { return c == ' '; }); } std::streamsize colored_tokenizing_stringbuf::xsputn(char_type const* s, std::streamsize count) { auto str = string(s, count); if (all_spaces(str) && !tokens.empty() && all_spaces(tokens.back())) { // Lump all white space strings together. tokens.back() += move(str); } else { tokens.emplace_back(move(str)); } return stringbuf::xsputn(s, count); } logging_context::logging_context(log_level lvl) { set_level(lvl); REQUIRE(get_level() == lvl); clear_error_logged_flag(); } logging_context::~logging_context() { set_level(log_level::none); REQUIRE(get_level() == log_level::none); on_message(nullptr); clear_error_logged_flag(); } logging_format_context::logging_format_context(log_level lvl, string ns, int line_num) : logging_context(lvl) { _strm_buf = boost::nowide::cout.rdbuf(); boost::nowide::cout.rdbuf(&_buf); setup_logging(boost::nowide::cout); set_level(lvl); REQUIRE(get_level() == lvl); set_colorization(true); static const boost::regex rdate("\\d{4}-\\d{2}-\\d{2}"); static const boost::regex rtime("[0-2]\\d:[0-5]\\d:\\d{2}\\.\\d{6}"); _expected = {rdate, " ", rtime, " ", lvl, boost::regex("[ ]+"), ns}; if (line_num > 0) { _expected.emplace_back(":"); _expected.emplace_back(to_string(line_num)); } _expected.emplace_back(" - "); auto color = get_color(lvl); if (!color.empty()) { _expected.emplace_back(color); } _expected.emplace_back("testing 1 2 3"); if (!color.empty()) { _expected.emplace_back(get_color(log_level::none)); } } logging_format_context::~logging_format_context() { boost::nowide::cout.rdbuf(_strm_buf); auto core = boost::log::core::get(); core->reset_filter(); core->remove_all_sinks(); } vector const& logging_format_context::tokens() const { return _buf.tokens; } string logging_format_context::message() const { return _buf.str(); } vector const& logging_format_context::expected() const { return _expected; } }} // namespace leatherman::test namespace boost { bool operator== (std::string const& lhs, leatherman::test::matcher const& rhs) { using leatherman::logging::log_level; if (auto *expected = boost::get(&rhs)) { return boost::regex_match(lhs, *expected); } else if (auto *expected = boost::get(&rhs)) { std::stringstream ss{lhs}; log_level lvl = log_level::none; ss >> lvl; return lvl == *expected; } else if (auto *expected = boost::get(&rhs)) { return lhs == *expected; } else { return false; } } } // namespace boost leatherman-1.4.2+dfsg/logging/tests/logging.hpp000064400000000000000000000050061332360634000215030ustar00rootroot00000000000000/** * @file Utilities for testing logging */ #pragma once #include #include #include #include #include #include namespace leatherman { namespace test { using namespace std; using namespace leatherman::logging; using matcher = boost::variant; /** * Declare colors. */ constexpr char cyan[] = "\33[0;36m"; constexpr char green[] = "\33[0;32m"; constexpr char yellow[] = "\33[0;33m"; constexpr char red[] = "\33[0;31m"; constexpr char reset[] = "\33[0m"; /** * Zip view for iterating over multiple containers at once */ template auto zip_view(T const&... containers) -> boost::iterator_range> { auto zip_begin = boost::make_zip_iterator(boost::make_tuple(std::begin(containers)...)); auto zip_end = boost::make_zip_iterator(boost::make_tuple(std::end(containers)...)); return boost::make_iterator_range(zip_begin, zip_end); } /** * Stringbuf for capturing tokens written to the attached stream, and insert color * codes on Windows that match to the current console attributes. */ class colored_tokenizing_stringbuf : public stringbuf { public: vector tokens; protected: virtual std::streamsize xsputn(char_type const* s, std::streamsize count); }; /** * Context for simple logging tests. */ struct logging_context { logging_context(log_level lvl = log_level::trace); virtual ~logging_context(); }; /** * Context for capturing the format of log messages as they would appear on cout/cerr. */ struct logging_format_context : logging_context { logging_format_context(log_level lvl = log_level::trace, string ns = LOG_NAMESPACE, int line_num = 0); ~logging_format_context() final; vector const& tokens() const; string message() const; vector const& expected() const; private: string get_color(log_level lvl) const; colored_tokenizing_stringbuf _buf; streambuf *_strm_buf; vector _expected; }; }} // namespace leatherman::test namespace boost { bool operator== (std::string const& lhs, leatherman::test::matcher const& rhs); } // namespace boost leatherman-1.4.2+dfsg/logging/tests/logging_i18n.cc000064400000000000000000000072051332360634000221430ustar00rootroot00000000000000#ifndef LEATHERMAN_I18N #define LEATHERMAN_I18N #endif #undef PROJECT_NAME #define PROJECT_NAME "leatherman_logging" #include #include #include #include #include #include "logging.hpp" using namespace std; using namespace leatherman::logging; #define _(x) x TEST_CASE("logging i18n with on_message") { leatherman::locale::get_locale("fr.UTF-8", PROJECT_NAME, {PROJECT_DIR}); leatherman::test::logging_context ctx(log_level::trace); string message; log_level level; on_message([&](log_level lvl, string const& msg) { level = lvl; message = msg; return false; }); SECTION("a TRACE message to log is not translated") { LOG_TRACE("trace logging"); REQUIRE(level == log_level::trace); REQUIRE(message == "trace logging"); } SECTION("a TRACE message with substitution is not translated") { LOG_TRACE("trace logging is {1}", "trace"); REQUIRE(level == log_level::trace); REQUIRE(message == "trace logging is trace"); } SECTION("a DEBUG message to log is translated") { LOG_DEBUG("debug logging"); REQUIRE(level == log_level::debug); REQUIRE(message == "l'enregistrement de débogage"); } SECTION("a DEBUG message with substitution is translated") { LOG_DEBUG("debug logging is {1}", "debug"); REQUIRE(level == log_level::debug); REQUIRE(message == "l'enregistrement de débogage est debug"); } SECTION("a INFO message to log is translated") { LOG_INFO("info logging"); REQUIRE(level == log_level::info); REQUIRE(message == "info exploitation forestière"); } SECTION("a INFO message with substitution is translated") { LOG_INFO("info logging is {1}", "info"); REQUIRE(level == log_level::info); REQUIRE(message == "info exploitation forestière est info"); } SECTION("a WARNING message to log is translated") { LOG_WARNING("warning logging"); REQUIRE(level == log_level::warning); REQUIRE(message == "journalisation d'avertissement"); } SECTION("a WARNING message with substitution is translated") { LOG_WARNING("warning logging is {1}", "warning"); REQUIRE(level == log_level::warning); REQUIRE(message == "journalisation d'avertissement est warning"); } SECTION("a ERROR message to log is translated") { LOG_ERROR("error message"); REQUIRE(level == log_level::error); REQUIRE(message == "message d'erreur"); } SECTION("a ERROR message with substitution is translated") { LOG_ERROR("error message is {1}", "error"); REQUIRE(level == log_level::error); REQUIRE(message == "un message d'erreur est error"); } SECTION("a FATAL message to log is translated") { LOG_FATAL("fatal message"); REQUIRE(level == log_level::fatal); REQUIRE(message == "un message fatal"); } SECTION("a FATAL message with substitution is translated") { LOG_FATAL("fatal message is {1}", "fatal"); REQUIRE(level == log_level::fatal); REQUIRE(message == "un message fatal est fatal"); } SECTION("a unicode characters to log") { wstring symbols = _(L"\u2122\u2744\u039b"); auto utf8 = boost::nowide::narrow(symbols); reverse(symbols.begin(), symbols.end()); auto utf8_reverse = boost::nowide::narrow(symbols); LOG_INFO(utf8); REQUIRE(level == log_level::info); REQUIRE(message == utf8_reverse); } leatherman::locale::clear_domain(); } leatherman-1.4.2+dfsg/logging/tests/logging_on_message.cc000064400000000000000000000036211332360634000235020ustar00rootroot00000000000000#include #include #include #include "logging.hpp" using namespace std; using namespace leatherman::logging; TEST_CASE("logging with on_message") { leatherman::test::logging_context ctx(log_level::trace); string message; log_level level; on_message([&](log_level lvl, string const& msg) { level = lvl; message = msg; return false; }); SECTION("a TRACE message is logged to on_message") { LOG_TRACE("trace message"); REQUIRE(level == log_level::trace); REQUIRE(message == "trace message"); } SECTION("a DEBUG message is logged to on_message") { LOG_DEBUG("debug message"); REQUIRE(level == log_level::debug); REQUIRE(message == "debug message"); } SECTION("an INFO message is logged to on_message") { LOG_INFO("info message"); REQUIRE(level == log_level::info); REQUIRE(message == "info message"); } SECTION("a WARNING message is logged to on_message") { LOG_WARNING("warning message"); REQUIRE(level == log_level::warning); REQUIRE(message == "warning message"); } SECTION("an ERROR message is logged to on_message") { LOG_ERROR("error message"); REQUIRE(level == log_level::error); REQUIRE(message == "error message"); } SECTION("a FATAL message is logged to on_message") { LOG_FATAL("fatal message"); REQUIRE(level == log_level::fatal); REQUIRE(message == "fatal message"); } #if 0 SECTION("a unicode characters to log") { const wstring symbols[] = {L"\u2122", L"\u2744", L"\u039b"}; for (auto const& s : symbols) { auto utf8 = boost::nowide::narrow(s); LOG_INFO(utf8); REQUIRE(level == log_level::info); REQUIRE(message == utf8); } } #endif } leatherman-1.4.2+dfsg/logging/tests/logging_stream.cc000064400000000000000000000133401332360634000226540ustar00rootroot00000000000000#include "logging.hpp" using namespace leatherman::test; SCENARIO("formatting with a TRACE level macro") { logging_format_context context(log_level::trace, LOG_NAMESPACE); REQUIRE(LOG_IS_TRACE_ENABLED()); LOG_TRACE("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a TRACE level directly") { logging_format_context context(log_level::trace, "test"); REQUIRE(LOG_IS_TRACE_ENABLED()); log("test", log_level::trace, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a DEBUG level macro") { logging_format_context context(log_level::debug, LOG_NAMESPACE); REQUIRE(LOG_IS_DEBUG_ENABLED()); LOG_DEBUG("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a DEBUG level directly") { logging_format_context context(log_level::debug, "test"); REQUIRE(LOG_IS_DEBUG_ENABLED()); log("test", log_level::debug, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a INFO level macro") { logging_format_context context(log_level::info, LOG_NAMESPACE); REQUIRE(LOG_IS_INFO_ENABLED()); LOG_INFO("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a INFO level directly") { logging_format_context context(log_level::info, "test"); REQUIRE(LOG_IS_INFO_ENABLED()); log("test", log_level::info, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a WARNING level macro") { logging_format_context context(log_level::warning, LOG_NAMESPACE); REQUIRE(LOG_IS_WARNING_ENABLED()); LOG_WARNING("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a WARNING level directly") { logging_format_context context(log_level::warning, "test"); REQUIRE(LOG_IS_WARNING_ENABLED()); log("test", log_level::warning, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with a ERROR level macro") { logging_format_context context(log_level::error, LOG_NAMESPACE); REQUIRE(LOG_IS_ERROR_ENABLED()); LOG_ERROR("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with a ERROR level directly") { logging_format_context context(log_level::error, "test"); REQUIRE(LOG_IS_ERROR_ENABLED()); log("test", log_level::error, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with a FATAL level macro") { logging_format_context context(log_level::fatal, LOG_NAMESPACE); REQUIRE(LOG_IS_FATAL_ENABLED()); LOG_FATAL("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with a FATAL level directly") { logging_format_context context(log_level::fatal, "test"); REQUIRE(LOG_IS_FATAL_ENABLED()); log("test", log_level::fatal, 0, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } leatherman-1.4.2+dfsg/logging/tests/logging_stream_lines.cc000064400000000000000000000146721332360634000240570ustar00rootroot00000000000000#define LEATHERMAN_LOGGING_LINE_NUMBERS #include "logging.hpp" using namespace leatherman::test; SCENARIO("formatting with lines with a TRACE level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::trace, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_TRACE_ENABLED()); LOG_TRACE("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a TRACE level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::trace, "test", line_num); REQUIRE(LOG_IS_TRACE_ENABLED()); log("test", log_level::trace, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a DEBUG level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::debug, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_DEBUG_ENABLED()); LOG_DEBUG("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a DEBUG level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::debug, "test", line_num); REQUIRE(LOG_IS_DEBUG_ENABLED()); log("test", log_level::debug, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a INFO level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::info, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_INFO_ENABLED()); LOG_INFO("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a INFO level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::info, "test", line_num); REQUIRE(LOG_IS_INFO_ENABLED()); log("test", log_level::info, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a WARNING level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::warning, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_WARNING_ENABLED()); LOG_WARNING("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a WARNING level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::warning, "test", line_num); REQUIRE(LOG_IS_WARNING_ENABLED()); log("test", log_level::warning, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE_FALSE(error_has_been_logged()); } SCENARIO("formatting with lines with a ERROR level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::error, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_ERROR_ENABLED()); LOG_ERROR("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with lines with a ERROR level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::error, "test", line_num); REQUIRE(LOG_IS_ERROR_ENABLED()); log("test", log_level::error, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with lines with a FATAL level macro") { int line_num = __LINE__ + 4; logging_format_context context(log_level::fatal, LOG_NAMESPACE, line_num); REQUIRE(LOG_IS_FATAL_ENABLED()); LOG_FATAL("testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } SCENARIO("formatting with lines with a FATAL level directly") { int line_num = __LINE__ + 4; logging_format_context context(log_level::fatal, "test", line_num); REQUIRE(LOG_IS_FATAL_ENABLED()); log("test", log_level::fatal, line_num, "testing {1} {2} {3}", 1, "2", 3.0); CAPTURE(context.message()); REQUIRE(context.expected().size() == context.tokens().size()); for (auto tup : zip_view(context.tokens(), context.expected())) { REQUIRE(get<0>(tup) == get<1>(tup)); } REQUIRE(error_has_been_logged()); } leatherman-1.4.2+dfsg/logging/tests/posix/000075500000000000000000000000001332360634000205055ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/tests/posix/logging.cc000064400000000000000000000011221332360634000224360ustar00rootroot00000000000000#include "../logging.hpp" namespace leatherman { namespace test { string logging_format_context::get_color(log_level lvl) const { switch (lvl) { case log_level::trace: case log_level::debug: return cyan; case log_level::info: return green; case log_level::warning: return yellow; case log_level::error: case log_level::fatal: return red; default: return reset; } } }} // namespace leatherman::test leatherman-1.4.2+dfsg/logging/tests/windows/000075500000000000000000000000001332360634000210355ustar00rootroot00000000000000leatherman-1.4.2+dfsg/logging/tests/windows/logging.cc000064400000000000000000000005731332360634000227770ustar00rootroot00000000000000#include "../logging.hpp" #include namespace leatherman { namespace test { string logging_format_context::get_color(log_level lvl) const { // So far I've found no way to successfully test console color when output is redirected, // as happens when running with ctest or make test. return ""; } }} // namespace leatherman::test leatherman-1.4.2+dfsg/nowide/000075500000000000000000000000001332360634000160405ustar00rootroot00000000000000leatherman-1.4.2+dfsg/nowide/CMakeLists.txt000064400000000000000000000004201332360634000205740ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED) add_leatherman_includes(${Boost_INCLUDE_DIRS} "${CMAKE_CURRENT_SOURCE_DIR}/../vendor/nowide/include") add_leatherman_headers(../vendor/nowide/include/boost) if(WIN32) add_leatherman_library(../vendor/nowide/src/iostream.cpp) endif() leatherman-1.4.2+dfsg/rapidjson/000075500000000000000000000000001332360634000165445ustar00rootroot00000000000000leatherman-1.4.2+dfsg/rapidjson/CMakeLists.txt000064400000000000000000000001051332360634000213000ustar00rootroot00000000000000add_leatherman_vendored("rapidjson-1.0.2.zip" "rapidjson" "include") leatherman-1.4.2+dfsg/ruby/000075500000000000000000000000001332360634000155345ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/CMakeLists.txt000064400000000000000000000012571332360634000203010ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS regex filesystem system) add_leatherman_deps("${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(dynamic_library) leatherman_dependency(util) leatherman_dependency(execution) leatherman_dependency(locale) leatherman_dependency(logging) if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.ruby") leatherman_logging_line_numbers() endif() add_leatherman_headers(inc/leatherman) if(WIN32) set(PLATFORM_SRCS src/windows/api.cc) else() set(PLATFORM_SRCS src/posix/api.cc) endif() add_leatherman_library(src/api.cc ${PLATFORM_SRCS}) add_leatherman_test(tests/api-test.cc) leatherman-1.4.2+dfsg/ruby/inc/000075500000000000000000000000001332360634000163055ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/inc/leatherman/000075500000000000000000000000001332360634000204255ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/inc/leatherman/ruby/000075500000000000000000000000001332360634000214065ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/inc/leatherman/ruby/api.hpp000064400000000000000000000565511332360634000227040ustar00rootroot00000000000000/** * @file * Declares the API imported from Ruby. */ #pragma once #include #include #include #include #include #include #include #include #ifndef _WIN32 #include #endif namespace leatherman { namespace ruby { /* * Parts of the MRI (Matz's Ruby Interpreter; a.k.a. CRuby) we use is documented here: * https://github.com/ruby/ruby/blob/trunk/README.EXT * * Otherwise, the canonical documentation is unfortunately the MRI source code itself. * A useful index of the various MRI versions can be found here: * http://rxr.whitequark.org/mri/source * */ /** * Represents a MRI VALUE (a Ruby object). * VALUEs can be constants denoting things like true, false, or nil. * They can also be encoded numerical values (Integer, for example). * They can also be pointers to a heap-allocated Ruby object (class, module, etc). * The Ruby garbage collector scans the main thread's stack for VALUEs to mark during garbage collection. * Therefore, you may encounter "volatile" VALUES. These are marked simply to ensure the compiler * does not do any optimizations that may prevent the garbage collector from finding them. * This is likely not needed, but it isn't hurting us to do. */ typedef uintptr_t VALUE; /** * See MRI documentation. */ typedef intptr_t SIGNED_VALUE; /** * See MRI documentation. */ using LONG_LONG = int64_t; using ULONG_LONG = uint64_t; /** * See MRI documentation. */ typedef uintptr_t ID; /** * See MRI documentation. This is a complex struct type; we only use it as an opaque object. */ typedef void * rb_encoding_p; #ifdef _WIN32 typedef int rb_pid_t; #else typedef pid_t rb_pid_t; #endif /** * Macro to cast function pointers to a Ruby method. */ #define RUBY_METHOD_FUNC(x) reinterpret_cast(x) /** * Exception thrown when ruby library could not be loaded. */ struct library_not_loaded_exception : std::runtime_error { /** * Constructs a library_not_loaded_exception. * @param message The exception message. */ explicit library_not_loaded_exception(std::string const& message); }; /** * Exception thrown when Ruby to C type conversions fail. */ struct invalid_conversion : std::runtime_error { /** * Constructs an invalid_conversion exception. * @param message The exception message. */ explicit invalid_conversion(std::string const& message); }; /** * Contains utility functions and the pointers to the Ruby API. */ struct api { /** * Destructs the Ruby API. */ ~api(); /** * Prevents the API from being copied. */ api(api const&) = delete; /** * Prevents the API from being copied. * @returns Returns this API. */ api& operator=(api const&) = delete; /** * Prevents the API from being moved. */ api(api&&) = delete; /** * Prevents the API from being moved. * @return Returns this API. */ api& operator=(api&&) = delete; /** * Gets the Ruby API instance. * Throws a library_not_loaded_exception if the API instance can't be created. * @return Returns the Ruby API instance. */ static api& instance(); /** * Called to initialize the API. * This should be done at the same stack frame where code is loaded into the Ruby VM. */ void initialize(); /** * Gets whether or not the API has been initialized. * @return Returns true if the API has been initialized or false if it has not been initialized. */ bool initialized() const; /** * Called to uninitialize the API. * Called during destruction, but can also be called earlier to cleanup Ruby, avoiding potential * ordering conflicts between unloading the libfacter DLL and libruby DLL. */ void uninitialize(); /** * Gets whether or not exception stack traces are included when formatting exception messages. * @return Returns true if stack traces will be included in exception messages or false if they will not be. */ bool include_stack_trace() const; /** * Sets whether or not exception stack traces are included when formatting exception messages. * @param value True if stack traces will be included in exception messages or false if they will not be. */ void include_stack_trace(bool value); /** * See MRI documentation. */ ID (* const rb_intern)(char const*); /** * See MRI documentation. */ VALUE (* const rb_const_get)(VALUE, ID); /** * See MRI documentation. */ void (* const rb_const_set)(VALUE, ID, VALUE); /** * See MRI documentation. */ VALUE (* const rb_const_remove)(VALUE, ID); /** * See MRI documentation. */ int (* const rb_const_defined)(VALUE, ID); /** * See MRI documentation. */ VALUE (* const rb_define_module)(char const*); /** * See MRI documentation. */ VALUE (* const rb_define_module_under)(VALUE, char const*); /** * See MRI documentation. */ VALUE (* const rb_define_class_under)(VALUE, char const*, VALUE super); /** * See MRI documentation. */ void (* const rb_define_method)(VALUE, char const*, VALUE(*)(...), int); /** * See MRI documentation. */ void (* const rb_define_singleton_method)(VALUE, char const*, VALUE(*)(...), int); /** * See MRI documentation. */ VALUE (* const rb_class_new_instance)(int, VALUE*, VALUE); /** * See MRI documentation. */ VALUE (* const rb_gv_get)(char const*); /** * See MRI documentation. */ VALUE (* const rb_eval_string)(char const*); /** * See MRI documentation. */ VALUE (* const rb_funcall)(VALUE, ID, int, ...); /** * See MRI documentation. */ VALUE (* const rb_funcallv)(VALUE, ID, int, VALUE const*); /** * See MRI documentation. */ VALUE (* const rb_proc_new)(VALUE (*)(...), VALUE); /** * See MRI documentation. */ VALUE (* const rb_block_call)(VALUE, ID, int, VALUE*, VALUE(*)(...), VALUE); /** * See MRI documentation. */ VALUE (* const rb_funcall_passing_block)(VALUE, ID, int, VALUE const *); /** * See MRI documentation. */ ULONG_LONG (* const rb_num2ull)(VALUE); /** * See MRI documentation. */ LONG_LONG (* const rb_num2ll)(VALUE); /** * See MRI documentation. */ double (* const rb_num2dbl)(VALUE); /** * See MRI documentation. */ char const* (* const rb_string_value_ptr)(volatile VALUE*); /** * See MRI documentation. */ VALUE (* const rb_rescue2)(VALUE(*)(...), VALUE, VALUE(*)(...), VALUE, ...); /** * See MRI documentation. */ VALUE (* const rb_protect)(VALUE (*)(VALUE), VALUE, int*); /** * See MRI documentation. */ void (* const rb_jump_tag)(int); /** * See MRI documentation. */ VALUE (* const rb_int2inum)(SIGNED_VALUE); /** * See MRI documentation. */ VALUE (* const rb_ll2inum)(LONG_LONG); /** * See MRI documentation. */ VALUE (* const rb_enc_str_new)(char const*, long, rb_encoding_p); /** * See MRI documentation. */ rb_encoding_p (* const rb_utf8_encoding)(void); /** * See MRI documentation. */ VALUE (* const rb_str_encode)(VALUE, VALUE, int, VALUE); /** * See MRI documentation. */ VALUE (* const rb_load)(VALUE, int); /** * See MRI documentation. */ void (* const rb_raise)(VALUE, char const* fmt, ...); /** * See MRI documentation. */ VALUE (* const rb_block_proc)(); /** * See MRI documentation. */ int (* const rb_block_given_p)(); /** * See MRI documentation. */ void (* const rb_gc_register_address)(VALUE*); /** * See MRI documentation. */ void (* const rb_gc_unregister_address)(VALUE*); /** * See MRI documentation. */ void (* const rb_hash_foreach)(VALUE, int (*)(...), VALUE); /** * See MRI documentation. */ void (* const rb_define_attr)(VALUE, char const*, int, int); /** * See MRI documentation. */ VALUE (* const rb_ivar_set)(VALUE, ID, VALUE); /** * See MRI documentation. */ VALUE (* const rb_ivar_get)(VALUE, ID); /** * See MRI documentation. */ VALUE (* const rb_float_new_in_heap)(double); /** * See MRI documentation. */ VALUE (* const rb_ary_new_capa)(long); /** * See MRI documentation. */ VALUE (* const rb_ary_push)(VALUE, VALUE); /** * See MRI documentation. */ VALUE (* const rb_ary_entry)(VALUE, long); /** * See MRI documentation. */ VALUE (* const rb_hash_new)(); /** * See MRI documentation. */ VALUE (* const rb_hash_aset)(VALUE, VALUE, VALUE); /** * See MRI documentation. */ VALUE (* const rb_hash_lookup)(VALUE, VALUE); /** * See MRI documentation. */ VALUE (* const rb_hash_lookup2)(VALUE, VALUE, VALUE); /** * See MRI documentation. */ VALUE (* const rb_sym_to_s)(VALUE); /** * See MRI documentation. */ ID (* const rb_to_id)(VALUE); /** * See MRI documentation. */ char const* (* const rb_id2name)(ID); /** * See MRI documentation. */ void (* const rb_define_alloc_func)(VALUE, VALUE (*)(VALUE)); /** * See MRI documentation. */ typedef void (*RUBY_DATA_FUNC)(void*); /** * See MRI documentation. */ VALUE (* const rb_data_object_alloc)(VALUE, void*, RUBY_DATA_FUNC, RUBY_DATA_FUNC); /** * See MRI documentation. */ void (* const rb_gc_mark)(VALUE); /** * See MRI documentation. */ VALUE (* const rb_yield_values)(int n, ...); /** * See MRI documentation. */ VALUE (* const rb_require)(char const*); /** * Intern MRI method. We're being naughty */ VALUE(* const rb_last_status_set)(int, rb_pid_t); /** * See MRI documentation. */ VALUE* const rb_cObject; /** * See MRI documentation. */ VALUE* const rb_cArray; /** * See MRI documentation. */ VALUE* const rb_cHash; /** * See MRI documentation. */ VALUE* const rb_cString; /** * See MRI documentation. */ VALUE* const rb_cSymbol; /** * See MRI documentation. */ VALUE* const rb_cFloat; /** * See MRI documentation. */ VALUE* const rb_cInteger; /** * See MRI documentation. */ VALUE* const rb_eException; /** * See MRI documentation. */ VALUE* const rb_eArgError; /** * See MRI documentation. */ VALUE* const rb_eTypeError; /** * See MRI documentation. */ VALUE* const rb_eStandardError; /** * See MRI documentation. */ VALUE* const rb_eRuntimeError; /** * See MRI documentation. */ VALUE* const rb_eLoadError; /** * Gets the load path being used by Ruby. * @return Returns the load path being used by Ruby. */ std::vector get_load_path() const; /** * Converts a Ruby number into a size_t integer, with checking for overflow. * Throws an invalid_conversion if overflow is detected. * @param v The Ruby value to convert. * @return Returns the Ruby value as a size_t integer. */ size_t num2size_t(VALUE v) const; /** * Converts a Ruby value into a C++ string. * @param v The Ruby value to convert. * @return Returns the Ruby value as a string. */ std::string to_string(VALUE v) const; /** * Converts the given string to a Ruby symbol. * @param s The string to convert to a symbol. * @return Returns the symbol. */ VALUE to_symbol(std::string const& s) const; /** * Converts a C string to a Ruby UTF-8 encoded string value. * @param s The C string. * @param len The number of bytes in the C string. * @return Returns the string as a UTF-8 encoded Ruby value. */ VALUE utf8_value(char const* s, size_t len) const; /** * Converts a C string to a Ruby UTF-8 encoded string value. * @param s The C string. * @return Returns the string as a UTF-8 encoded Ruby value. */ VALUE utf8_value(char const* s) const; /** * Converts a C++ string to a Ruby UTF-8 encoded string value. * @param s The C++ string. * @return Returns the string as a UTF-8 encoded Ruby value. */ VALUE utf8_value(std::string const& s) const; /** * A utility function for wrapping a callback with a rescue clause. * @param callback The callback to call in the context of the rescue clause. * @param rescue The rescue function to call if there is an exception. * @return Returns the VALUE returned from either the callback or the rescue function. */ VALUE rescue(std::function callback, std::function rescue) const; /** * A utility function for wrapping a callback with protection. * @param tag The returned jump tag. An exception occurred if the jump tag is non-zero. * @param callback The callback to call in the context of protection. * @return Returns the VALUE returned from the callback if successful or nil otherwise. */ VALUE protect(int& tag, std::function callback) const; /** * Enumerates an array. * @param array The array to enumerate. * @param callback The callback to call for every element in the array. */ void array_for_each(VALUE array, std::function callback) const; /** * Enumerates a hash. * @param hash The hash to enumerate. * @param callback The callback to call for every element in the hash. */ void hash_for_each(VALUE hash, std::function callback) const; /** * Converts the given exception into a string. * @param ex The exception to get the string representation of. * @param message The optional message to use instead of the exception's message. * @return Returns the string representation of the exception. */ std::string exception_to_string(VALUE ex, std::string const& message = std::string()) const; /** * Determines if the given value is an instance of the given class (or superclass). * @param value The value to check. * @param klass The class to check. * @return Returns true if the value is an instance of the given class (or a superclass) or false if it is not. */ bool is_a(VALUE value, VALUE klass) const; /** * Determines if the given value is nil. * @param value The value to check. * @return Returns true if the given value is nil or false if it is not. */ bool is_nil(VALUE value) const; /** * Determines if the given value is true. * @param value The value to check. * @return Returns true if the given value is true or false if it is not. */ bool is_true(VALUE value) const; /** * Determines if the given value is false. * @param value The value to check. * @return Returns true if the given value is false or false if it is not. */ bool is_false(VALUE value) const; /** * Determines if the given value is a hash. * @param value The value to check. * @return Returns true if the given value is a hash or false if it is not. */ bool is_hash(VALUE value) const; /** * Determines if the given value is an array. * @param value The value to check. * @return Returns true if the given value is an array or false if it is not. */ bool is_array(VALUE value) const; /** * Determines if the given value is a string. * @param value The value to check. * @return Returns true if the given value is a string or false if it is not. */ bool is_string(VALUE value) const; /** * Determines if the given value is a symbol. * @param value The value to check. * @return Returns true if the given value is a symbol or false if it is not. */ bool is_symbol(VALUE value) const; /** * Determines if the given value is an Integer. * @param value The value to check. * @return Returns true if the given value is an integer (Integer) or false if it is not. */ bool is_integer(VALUE value) const; /** * Determines if the given value is a float. * @param value The value to check. * @return Returns true if the given value is a float or false if it is not. */ bool is_float(VALUE value) const; /** * Gets the VALUE for nil. * @return Returns the VALUE for nil. */ VALUE nil_value() const; /** * Gets the VALUE for true. * @return Returns the VALUE for true. */ VALUE true_value() const; /** * Gets the VALUE for false. * @return Returns the VALUE for false. */ VALUE false_value() const; /** * Get the length of a ruby array. * Throws an invalid_conversion if teh array length can not be represented by a long. * @return Returns the length of the array. */ long array_len(VALUE array) const; /** * Looks up a constant based on the given names. The individual entries correspond to * namespaces, as in {"A", "B", "C"} => A::B::C. * @param names The names to lookup. * @return Returns the value or raises a NameError. */ VALUE lookup(std::initializer_list const& names) const; /** * Determines if two values are equal. * @param first The first value to compare. * @param second The second value to compare. * @return Returns true if eql? returns true for the first and second values. */ bool equals(VALUE first, VALUE second) const; /** * Determines if the first value has case equality (===) with the second value. * @param first The first value to compare. * @param second The second value to compare. * @return Returns true if === returns true for the first and second values. */ bool case_equals(VALUE first, VALUE second) const; /** * Evalutes a ASCII string as ruby code. * Any exception raised will be propagated as a C++ runtime_error * @param code the ruby code to execute */ VALUE eval(const std::string& code); /** * Gets the underlying native instance from a Ruby data object. * The Ruby object must have been allocated with rb_data_object_alloc. * @tparam T The underlying native type. * @param obj The Ruby data object to get the native instance for. * @return Returns a pointer to the underlying native type. */ template T* to_native(VALUE obj) const { return reinterpret_cast(reinterpret_cast(obj)->data); } /** * Registers a data object for cleanup when the API is destructed. * The object must have been created with rb_data_object_alloc. * @param obj The data object to register. */ void register_data_object(VALUE obj) const { _data_objects.insert(obj); } /** * Unregisters a data object. * @param obj The data object to unregister. */ void unregister_data_object(VALUE obj) const { _data_objects.erase(obj); } /** * Specifies the location of the preferred Ruby library. * Defaults to empty, must be specified before api::instance() is called. */ static std::string ruby_lib_location; private: explicit api(leatherman::dynamic_library::dynamic_library library); // Imported Ruby functions that should not be called externally int (* const ruby_setup)(); void (* const ruby_init)(); void* (* const ruby_options)(int, char**); int (* const ruby_cleanup)(volatile int); static leatherman::dynamic_library::dynamic_library create(); static leatherman::dynamic_library::dynamic_library find_library(); static leatherman::dynamic_library::dynamic_library find_loaded_library(); static VALUE callback_thunk(VALUE parameter); static VALUE rescue_thunk(VALUE parameter, VALUE exception); static VALUE protect_thunk(VALUE parameter); static int hash_for_each_thunk(VALUE key, VALUE value, VALUE arg); static std::set _data_objects; // Represents object data // This definition comes from Ruby (unfortunately) struct RData { VALUE flags; const VALUE klass; void (*dmark)(void*); void (*dfree)(void*); void *data; #ifdef __GNUC__ } __attribute__((aligned(sizeof(VALUE)))); #else }; #endif leatherman::dynamic_library::dynamic_library _library; VALUE _nil = 0u; VALUE _true = 0u; VALUE _false = 0u; bool _initialized = false; bool _include_stack_trace = false; }; }} // namespace leatherman::ruby leatherman-1.4.2+dfsg/ruby/src/000075500000000000000000000000001332360634000163235ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/src/api.cc000064400000000000000000000373321332360634000174130ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace leatherman::util; using namespace leatherman::execution; using namespace boost::filesystem; namespace lth_lib = leatherman::dynamic_library; namespace leatherman { namespace ruby { string api::ruby_lib_location = ""; set api::_data_objects; library_not_loaded_exception::library_not_loaded_exception(string const& message) : runtime_error(message) { } invalid_conversion::invalid_conversion(string const& message) : runtime_error(message) { } #define LOAD_SYMBOL(x) x(reinterpret_cast(library.find_symbol(#x, true))) #define LOAD_ALIASED_SYMBOL(x, y) x(reinterpret_cast(library.find_symbol(#x, true, #y))) #define LOAD_OPTIONAL_SYMBOL(x) x(reinterpret_cast(library.find_symbol(#x))) api::api(lth_lib::dynamic_library library) : LOAD_SYMBOL(rb_intern), LOAD_SYMBOL(rb_const_get), LOAD_SYMBOL(rb_const_set), LOAD_SYMBOL(rb_const_remove), LOAD_SYMBOL(rb_const_defined), LOAD_SYMBOL(rb_define_module), LOAD_SYMBOL(rb_define_module_under), LOAD_SYMBOL(rb_define_class_under), LOAD_SYMBOL(rb_define_method), LOAD_SYMBOL(rb_define_singleton_method), LOAD_SYMBOL(rb_class_new_instance), LOAD_SYMBOL(rb_gv_get), LOAD_SYMBOL(rb_eval_string), LOAD_SYMBOL(rb_funcall), LOAD_ALIASED_SYMBOL(rb_funcallv, rb_funcall2), LOAD_SYMBOL(rb_proc_new), LOAD_SYMBOL(rb_block_call), LOAD_SYMBOL(rb_funcall_passing_block), LOAD_SYMBOL(rb_num2ull), LOAD_SYMBOL(rb_num2ll), LOAD_SYMBOL(rb_num2dbl), LOAD_SYMBOL(rb_string_value_ptr), LOAD_SYMBOL(rb_rescue2), LOAD_SYMBOL(rb_protect), LOAD_SYMBOL(rb_jump_tag), LOAD_SYMBOL(rb_int2inum), LOAD_SYMBOL(rb_ll2inum), LOAD_SYMBOL(rb_enc_str_new), LOAD_SYMBOL(rb_utf8_encoding), LOAD_SYMBOL(rb_str_encode), LOAD_SYMBOL(rb_load), LOAD_SYMBOL(rb_raise), LOAD_SYMBOL(rb_block_proc), LOAD_SYMBOL(rb_block_given_p), LOAD_SYMBOL(rb_gc_register_address), LOAD_SYMBOL(rb_gc_unregister_address), LOAD_SYMBOL(rb_hash_foreach), LOAD_SYMBOL(rb_define_attr), LOAD_SYMBOL(rb_ivar_set), LOAD_SYMBOL(rb_ivar_get), LOAD_ALIASED_SYMBOL(rb_float_new_in_heap, rb_float_new), LOAD_ALIASED_SYMBOL(rb_ary_new_capa, rb_ary_new2), LOAD_SYMBOL(rb_ary_push), LOAD_SYMBOL(rb_ary_entry), LOAD_SYMBOL(rb_hash_new), LOAD_SYMBOL(rb_hash_aset), LOAD_SYMBOL(rb_hash_lookup), LOAD_SYMBOL(rb_hash_lookup2), LOAD_SYMBOL(rb_sym_to_s), LOAD_SYMBOL(rb_to_id), LOAD_SYMBOL(rb_id2name), LOAD_SYMBOL(rb_define_alloc_func), LOAD_ALIASED_SYMBOL(rb_data_object_alloc, rb_data_object_wrap), LOAD_SYMBOL(rb_gc_mark), LOAD_SYMBOL(rb_yield_values), LOAD_SYMBOL(rb_require), LOAD_SYMBOL(rb_last_status_set), LOAD_SYMBOL(rb_cObject), LOAD_SYMBOL(rb_cArray), LOAD_SYMBOL(rb_cHash), LOAD_SYMBOL(rb_cString), LOAD_SYMBOL(rb_cSymbol), LOAD_SYMBOL(rb_cFloat), LOAD_SYMBOL(rb_cInteger), LOAD_SYMBOL(rb_eException), LOAD_SYMBOL(rb_eArgError), LOAD_SYMBOL(rb_eTypeError), LOAD_SYMBOL(rb_eStandardError), LOAD_SYMBOL(rb_eRuntimeError), LOAD_SYMBOL(rb_eLoadError), LOAD_OPTIONAL_SYMBOL(ruby_setup), LOAD_SYMBOL(ruby_init), LOAD_SYMBOL(ruby_options), LOAD_SYMBOL(ruby_cleanup), _library(move(library)) { } api::~api() { uninitialize(); } api& api::instance() { static api instance { create() }; return instance; } lth_lib::dynamic_library api::create() { lth_lib::dynamic_library library = find_library(); if (!library.loaded()) { throw library_not_loaded_exception(_("could not locate a ruby library")); } else if (library.first_load()) { LOG_INFO("ruby loaded from \"{1}\".", library.name()); } else { LOG_INFO("ruby was already loaded."); } return library; } void api::initialize() { if (_initialized) { return; } // Prefer ruby_setup over ruby_init if present (2.0+) // If ruby is already initialized, this is a no-op if (ruby_setup) { ruby_setup(); } else { ruby_init(); } if (_library.first_load()) { // Run an empty script evaluation // ruby_options is a required call as it sets up some important stuff (unfortunately) char const* opts[] = { "ruby", "-e", "" }; // Check for bundler; this is the only ruby option we support string ruby_opt; if (environment::get("RUBYOPT", ruby_opt) && boost::starts_with(ruby_opt, "-rbundler/setup")) { environment::set("RUBYOPT", "-rbundler/setup"); } else { // Clear RUBYOPT so that only our options are used. environment::set("RUBYOPT", ""); } ruby_options(sizeof(opts) / sizeof(opts[0]), const_cast(opts)); } // Get the values for nil, true, and false // We do this because these are not constant across ruby versions _nil = rb_ivar_get(*rb_cObject, rb_intern("@expected_to_be_nil")); _true = rb_funcall(_nil, rb_intern("nil?"), 0); _false = rb_funcall(_true, rb_intern("nil?"), 0); // Delay logging until now; to_string depends on _nil. LOG_INFO("using ruby version {1}", to_string(rb_const_get(*rb_cObject, rb_intern("RUBY_VERSION")))); // Set SIGINT handling to system default // This prevents ruby from raising an interrupt exception. rb_funcall(*rb_cObject, rb_intern("trap"), 2, utf8_value("INT"), utf8_value("SYSTEM_DEFAULT")); _initialized = true; } bool api::initialized() const { return _initialized; } void api::uninitialize() { if (_initialized && _library.first_load()) { ruby_cleanup(0); _initialized = false; } // API is shutting down; free all remaining data objects // Destructors may unregister the data object, so increment the iterator before freeing for (auto it = _data_objects.begin(); it != _data_objects.end();) { auto data = reinterpret_cast(*it); ++it; if (data->dfree) { data->dfree(data->data); data->dfree = nullptr; data->dmark = nullptr; } } _data_objects.clear(); } bool api::include_stack_trace() const { return _include_stack_trace; } void api::include_stack_trace(bool value) { _include_stack_trace = value; } vector api::get_load_path() const { vector directories; array_for_each(rb_gv_get("$LOAD_PATH"), [&](VALUE value) { string path = to_string(value); // Ignore "." as a load path (present in 1.8.7) if (path == ".") { return false; } directories.emplace_back(move(path)); return true; }); return directories; } size_t api::num2size_t(VALUE v) const { auto size = rb_num2ull(v); if (size > numeric_limits::max()) { throw invalid_conversion(_("size_t maximum exceeded, requested size was {1}", to_string(size))); } return static_cast(size); } string api::to_string(VALUE v) const { v = rb_funcall(v, rb_intern("to_s"), 0); v = rb_str_encode(v, utf8_value("UTF-8"), 0, _nil); return string(rb_string_value_ptr(&v), num2size_t(rb_funcall(v, rb_intern("bytesize"), 0))); } VALUE api::to_symbol(string const& s) const { return rb_funcall(utf8_value(s), rb_intern("to_sym"), 0); } VALUE api::utf8_value(char const* s, size_t len) const { return rb_enc_str_new(s, len, rb_utf8_encoding()); } VALUE api::utf8_value(char const* s) const { return utf8_value(s, strlen(s)); } VALUE api::utf8_value(std::string const& s) const { return utf8_value(s.c_str(), s.size()); } VALUE api::rescue(function callback, function rescue) const { return rb_rescue2( RUBY_METHOD_FUNC(callback_thunk), reinterpret_cast(&callback), RUBY_METHOD_FUNC(rescue_thunk), reinterpret_cast(&rescue), *rb_eException, 0); } VALUE api::protect(int& tag, function callback) const { return rb_protect( callback_thunk, reinterpret_cast(&callback), &tag); } VALUE api::callback_thunk(VALUE parameter) { auto callback = reinterpret_cast*>(parameter); return (*callback)(); } VALUE api::rescue_thunk(VALUE parameter, VALUE exception) { auto rescue = reinterpret_cast*>(parameter); return (*rescue)(exception); } void api::array_for_each(VALUE array, std::function callback) const { long size = array_len(array); for (long i = 0; i < size; ++i) { if (!callback(rb_ary_entry(array, i))) { break; } } } void api::hash_for_each(VALUE hash, function callback) const { rb_hash_foreach(hash, reinterpret_cast(hash_for_each_thunk), reinterpret_cast(&callback)); } int api::hash_for_each_thunk(VALUE key, VALUE value, VALUE arg) { auto callback = reinterpret_cast*>(arg); return (*callback)(key, value) ? 0 /* continue */ : 1 /* stop */; } string api::exception_to_string(VALUE ex, string const& message) const { ostringstream result; if (message.empty()) { result << to_string(ex); } else { result << message; } if (_include_stack_trace) { result << "\nbacktrace:\n"; // Append ex.backtrace.join('\n') result << to_string(rb_funcall(rb_funcall(ex, rb_intern("backtrace"), 0), rb_intern("join"), 1, utf8_value("\n"))); } return result.str(); } bool api::is_a(VALUE value, VALUE klass) const { return rb_funcall(value, rb_intern("is_a?"), 1, klass) != 0; } bool api::is_nil(VALUE value) const { return value == _nil; } bool api::is_true(VALUE value) const { return value == _true; } bool api::is_false(VALUE value) const { return value == _false; } bool api::is_hash(VALUE value) const { return is_a(value, *rb_cHash); } bool api::is_array(VALUE value) const { return is_a(value, *rb_cArray); } bool api::is_string(VALUE value) const { return is_a(value, *rb_cString); } bool api::is_symbol(VALUE value) const { return is_a(value, *rb_cSymbol); } bool api::is_integer(VALUE value) const { return is_a(value, *rb_cInteger); } bool api::is_float(VALUE value) const { return is_a(value, *rb_cFloat); } VALUE api::nil_value() const { return _nil; } VALUE api::true_value() const { return _true; } VALUE api::false_value() const { return _false; } long api::array_len(VALUE array) const { // This is used for rb_ary_entry, which only accepts a 'long'. So we only expect to // encounter long values here. auto size = rb_num2ull(rb_funcall(array, rb_intern("size"), 0)); if (size > numeric_limits::max()) { throw invalid_conversion(_("maximum array size exceeded, reported size was {1}", to_string(size))); } return static_cast(size); } VALUE api::lookup(std::initializer_list const& names) const { volatile VALUE current = *rb_cObject; for (auto const& name : names) { current = rb_const_get(current, rb_intern(name.c_str())); } return current; } bool api::equals(VALUE first, VALUE second) const { return is_true(rb_funcall(first, rb_intern("eql?"), 1, second)); } bool api::case_equals(VALUE first, VALUE second) const { return is_true(rb_funcall(first, rb_intern("==="), 1, second)); } VALUE api::eval(const string& code) { std::string exception; VALUE result = rescue( [&]() { return rb_eval_string(code.c_str()); }, [&](VALUE exc) { exception = exception_to_string(exc); return nil_value(); }); if (!exception.empty()) { throw runtime_error(exception); } return result; } lth_lib::dynamic_library api::find_library() { // First search for an already loaded Ruby. auto library = find_loaded_library(); if (library.loaded()) { return library; } if (!ruby_lib_location.empty()) { // Ruby lib location was specified by the user, fix to that. if (library.load(ruby_lib_location)) { return library; } LOG_WARNING("preferred ruby library \"{1}\" could not be loaded.", ruby_lib_location); } // Next try an environment variable. // This allows users to directly specify the ruby version to use. string value; if (environment::get("LEATHERMAN_RUBY", value)) { if (library.load(value)) { return library; } else { LOG_WARNING("ruby library \"{1}\" could not be loaded.", value); } } // Search the path for ruby.exe and query it for the location of its library. string ruby = execution::which("ruby"); if (ruby.empty()) { LOG_DEBUG("ruby could not be found on the PATH."); return library; } LOG_DEBUG("ruby was found at \"{1}\".", ruby); auto exec = execute(ruby, { "-e", "print(['libdir', 'archlibdir', 'sitearchlibdir', 'bindir'].find do |name|" "dir = RbConfig::CONFIG[name];" "next unless dir;" "file = File.join(dir, RbConfig::CONFIG['LIBRUBY_SO']);" "break file if File.exist? file;" "false end)" }); if (!exec.success) { LOG_WARNING("ruby failed to run: {1}", exec.output); return library; } boost::system::error_code ec; if (!exists(exec.output, ec) || is_directory(exec.output, ec)) { LOG_DEBUG("ruby library \"{1}\" was not found: ensure ruby was built with the --enable-shared configuration option.", exec.output); return library; } library.load(exec.output); return library; } }} // namespace leatherman::ruby leatherman-1.4.2+dfsg/ruby/src/posix/000075500000000000000000000000001332360634000174655ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/src/posix/api.cc000064400000000000000000000005021332360634000205420ustar00rootroot00000000000000#include using namespace std; namespace lth_lib = leatherman::dynamic_library; namespace leatherman { namespace ruby { lth_lib::dynamic_library api::find_loaded_library() { return lth_lib::dynamic_library::find_by_symbol("ruby_init"); } }} // namespace leatherman::ruby leatherman-1.4.2+dfsg/ruby/src/windows/000075500000000000000000000000001332360634000200155ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/src/windows/api.cc000064400000000000000000000012601332360634000210740ustar00rootroot00000000000000#include using namespace std; namespace lth_lib = leatherman::dynamic_library; namespace leatherman { namespace ruby { lth_lib::dynamic_library api::find_loaded_library() { // Ruby DLL's follow a pattern of // ruby.dll, libruby.dll, ruby210.dll, libruby210.dll // msvcrt-ruby193.dll, x64-msvcrt-ruby210.dll, etc // To avoid detecting leatherman_ruby.dll as a Ruby DLL, look for // anything except an underscore. const string libruby_pattern = "^[^_]*ruby(\\d)?(\\d)?(\\d)?\\.dll$"; return lth_lib::dynamic_library::find_by_pattern(libruby_pattern); } }} // namespace leatherman::ruby leatherman-1.4.2+dfsg/ruby/tests/000075500000000000000000000000001332360634000166765ustar00rootroot00000000000000leatherman-1.4.2+dfsg/ruby/tests/api-test.cc000064400000000000000000000153471332360634000207450ustar00rootroot00000000000000#include #include #include using namespace std; using namespace leatherman::ruby; TEST_CASE("api::eval", "[ruby-api]") { SECTION("can load api and evaluate ruby code") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); REQUIRE(ruby.get_load_path().size() > 0u); REQUIRE(ruby.to_string(ruby.eval("'foo'")) == "foo"); } } TEST_CASE("api::is_*", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can correctly identify nil values") { REQUIRE(ruby.is_nil(ruby.nil_value())); REQUIRE_FALSE(ruby.is_nil(ruby.true_value())); } SECTION("can correctly identify true and false values") { REQUIRE(ruby.is_true(ruby.true_value())); REQUIRE_FALSE(ruby.is_true(ruby.false_value())); REQUIRE(ruby.is_false(ruby.false_value())); REQUIRE_FALSE(ruby.is_false(ruby.true_value())); } SECTION("can correctly identify strings") { REQUIRE(ruby.is_string(ruby.utf8_value("'I'm a string'"))); REQUIRE_FALSE(ruby.is_string(ruby.true_value())); } SECTION("can correctly identify symbols") { REQUIRE(ruby.is_symbol(ruby.to_symbol("mysymbol"))); REQUIRE_FALSE(ruby.is_symbol(ruby.false_value())); } SECTION("can correctly identify numbers") { REQUIRE(ruby.is_float(ruby.eval("1.5"))); REQUIRE_FALSE(ruby.is_float(ruby.utf8_value("foo"))); REQUIRE(ruby.is_integer(ruby.eval("2"))); REQUIRE_FALSE(ruby.is_integer(ruby.eval("1.5"))); } SECTION("can correctly identify hashes") { REQUIRE(ruby.is_hash(ruby.eval("{ 'red' => 2 }"))); REQUIRE_FALSE(ruby.is_hash(ruby.utf8_value("foo"))); } SECTION("can correctly identify type") { REQUIRE(ruby.is_a(ruby.eval("1"), ruby.eval("Integer"))); REQUIRE_FALSE(ruby.is_a(ruby.eval("'1'"), ruby.eval("Integer"))); } SECTION("can correctly identify arrays") { REQUIRE(ruby.is_array(ruby.eval("[1, 2, 3]"))); REQUIRE_FALSE(ruby.is_array(ruby.false_value())); } } TEST_CASE("api::equals", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can correctly test boolean values for equality") { REQUIRE(ruby.equals(ruby.true_value(), ruby.true_value())); REQUIRE_FALSE(ruby.equals(ruby.true_value(), ruby.false_value())); } SECTION("can correctly test strings for equality") { REQUIRE(ruby.equals(ruby.utf8_value("foo"), ruby.utf8_value("foo"))); REQUIRE_FALSE(ruby.equals(ruby.utf8_value("foo"), ruby.utf8_value("bar"))); } SECTION("can correctly test numbers for equality") { REQUIRE(ruby.equals(ruby.eval("1"), ruby.eval("1"))); REQUIRE_FALSE(ruby.equals(ruby.eval("1"), ruby.eval("3"))); REQUIRE(ruby.equals(ruby.eval("1.5"), ruby.eval("1.5"))); REQUIRE_FALSE(ruby.equals(ruby.eval("1.5"), ruby.eval("1"))); } SECTION("can correctly test Ruby hashes for equality") { REQUIRE(ruby.equals(ruby.eval("{ 'red' => 'blue' }"), ruby.eval("{ 'red' => 'blue' }"))); REQUIRE_FALSE(ruby.equals(ruby.eval("{ 'red' => 'blue' }"), ruby.eval("{ 'red' => 'green' }"))); } SECTION("can correctly test symbols for equality") { REQUIRE(ruby.equals(ruby.to_symbol("mysymbol"), ruby.eval(":mysymbol"))); REQUIRE_FALSE(ruby.equals(ruby.to_symbol("mysymbol"), ruby.to_symbol("notmysymbol"))); } } TEST_CASE("api::case_equals", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can detect class membership") { REQUIRE(ruby.case_equals(ruby.eval("Integer"), ruby.eval("1"))); REQUIRE_FALSE(ruby.case_equals(ruby.eval("String"), ruby.eval("4"))); } } VALUE test_func(VALUE self) { auto& ruby = api::instance(); ruby.initialize(); return ruby.utf8_value("test function"); } TEST_CASE("api::rb_define_singleton_method", "[ruby-api]") { SECTION("can define a new module with a new method") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); auto module = ruby.rb_define_module("Test"); REQUIRE(module); ruby.rb_define_singleton_method(module, "test_func", RUBY_METHOD_FUNC(test_func), 0); REQUIRE(ruby.to_string(ruby.eval("Test.test_func")) == "test function"); } } TEST_CASE("api::exception_to_string", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can print exception details") { try { ruby.eval("raise 'test_exception'"); } catch (runtime_error exc) { REQUIRE(string(exc.what()) == "test_exception"); } } SECTION("can print exception details with stack trace") { ruby.include_stack_trace(true); try { ruby.eval("raise 'test_exception'"); } catch (runtime_error exc) { REQUIRE(string(exc.what()).find("backtrace") != string::npos); } } } TEST_CASE("api::lookup", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can find module by name") { auto foo_module = ruby.rb_define_module("Foo"); ruby.rb_define_module_under(foo_module, "Bar"); REQUIRE(ruby.to_string(ruby.lookup({ "Foo", "Bar" })) == "Foo::Bar"); } } TEST_CASE("api::to_string", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can normalize encodings") { string john {"J\xc3\xb6hn"}; auto obj = ruby.utf8_value(john); auto encoded = ruby.rb_funcall(obj, ruby.rb_intern("encode"), 1, ruby.utf8_value("Windows-1252")); REQUIRE(ruby.to_string(encoded) == john); } } TEST_CASE("api::num2size_t", "[ruby-api]") { auto& ruby = api::instance(); ruby.initialize(); REQUIRE(ruby.initialized()); SECTION("can convert Ruby number to size_t") { auto fixednum = ruby.eval("1"); auto num = ruby.num2size_t(fixednum); REQUIRE(1u == num); } SECTION("can convert large Ruby number to size_t") { auto expected = numeric_limits::max(); auto largenum = ruby.eval(to_string(expected)); auto num = ruby.num2size_t(largenum); REQUIRE(expected == num); } #if 0 // Can't use this test yet, because Ruby SIGSEGVs on calling rb_num2ull. SECTION("throws exception on Ruby numbers exceeding size_t") { auto largenum = ruby.eval("184467440737095516150"); REQUIRE_THROWS_AS(ruby.num2size_t(largenum), runtime_error); } #endif } leatherman-1.4.2+dfsg/scripts/000075500000000000000000000000001332360634000162425ustar00rootroot00000000000000leatherman-1.4.2+dfsg/scripts/cpplint.py000075500000000000000000007303471332360634000203060ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright (c) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Does google-lint on c++ files. The goal of this script is to identify places in the code that *may* be in non-compliance with google style. It does not attempt to fix up these problems -- the point is to educate. It does also not attempt to find all problems, or to ensure that everything it does find is legitimately a problem. In particular, we can get very confused by /* and // inside strings! We do a small hack, which is to ignore //'s with "'s after them on the same line, but it is far from perfect (in either direction). """ import codecs import copy import getopt import math # for log import os import re import sre_compile import string import sys import unicodedata _USAGE = """ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--root=subdir] [--linelength=digits] [file] ... The style guidelines this tries to follow are those in http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. To suppress false-positive errors of a certain category, add a 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) suppresses errors of all categories on that line. The files passed in will be linted; at least one file must be provided. Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the extensions with the --extensions flag. Flags: output=vs7 By default, the output is formatted to ease emacs parsing. Visual Studio compatible output (vs7) may also be used. Other formats are unsupported. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. "-FOO" and "FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces --filter=whitespace,runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count is provided for each category like 'build/class'. root=subdir The root directory used for deriving header guard CPP variable. By default, the header guard CPP variable is calculated as the relative path to the directory that contains .git, .hg, or .svn. When this flag is specified, the relative path is calculated from the specified directory. If the specified directory does not exist, this flag is ignored. Examples: Assuming that src/.git exists, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is 80 characters. Examples: --linelength=120 extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: --extensions=hpp,cpp cpplint.py supports per-directory configurations specified in CPPLINT.cfg files. CPPLINT.cfg file can contain a number of key=value pairs. Currently the following options are supported: set noparent filter=+filter1,-filter2,... exclude_files=regex linelength=80 "set noparent" option prevents cpplint from traversing directory tree upwards looking for more .cfg files in parent directories. This option is usually placed in the top-level project directory. The "filter" option is similar in function to --filter flag. It specifies message filters in addition to the |_DEFAULT_FILTERS| and those specified through --filter command-line flag. "exclude_files" allows to specify a regular expression to be matched against a file name. If the expression matches, the file is skipped and not run through liner. "linelength" allows to specify the allowed line length for the project. CPPLINT.cfg has an effect on files in the same directory and all sub-directories, unless overridden by a nested configuration file. Example file: filter=-build/include_order,+build/include_alpha exclude_files=.*\.cc The above example disables build/include_order warning and enables build/include_alpha as well as excludes all .cc from being processed by linter, in the current directory (where the .cfg file is located) and all sub-directories. """ # We categorize each error message we print. Here are the categories. # We want an explicit list so we can list them all in cpplint --filter=. # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ 'build/class', 'build/c++11', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', 'build/include_alpha', 'build/include_order', 'build/include_what_you_use', 'build/namespaces', 'build/printf_format', 'build/storage_class', 'legal/copyright', 'readability/alt_tokens', 'readability/braces', 'readability/casting', 'readability/check', 'readability/constructors', 'readability/fn_size', 'readability/function', 'readability/inheritance', 'readability/multiline_comment', 'readability/multiline_string', 'readability/namespace', 'readability/nolint', 'readability/nul', 'readability/strings', 'readability/todo', 'readability/utf8', 'runtime/arrays', 'runtime/casting', 'runtime/explicit', 'runtime/int', 'runtime/init', 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', 'runtime/indentation_namespace', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', 'runtime/references', 'runtime/string', 'runtime/threadsafe_fn', 'runtime/vlog', 'whitespace/blank_line', 'whitespace/braces', 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', 'whitespace/parens', 'whitespace/semicolon', 'whitespace/tab', 'whitespace/todo', ] # These error categories are no longer enforced by cpplint, but for backwards- # compatibility they may still appear in NOLINT comments. _LEGACY_ERROR_CATEGORIES = [ 'readability/streams', ] # The default state of the category filter. This is overridden by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = ['-build/include_alpha'] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers _CPP_HEADERS = frozenset([ # Legacy 'algobase.h', 'algo.h', 'alloc.h', 'builtinbuf.h', 'bvector.h', 'complex.h', 'defalloc.h', 'deque.h', 'editbuf.h', 'fstream.h', 'function.h', 'hash_map', 'hash_map.h', 'hash_set', 'hash_set.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip.h', 'iostream.h', 'istream.h', 'iterator.h', 'list.h', 'map.h', 'multimap.h', 'multiset.h', 'ostream.h', 'pair.h', 'parsestream.h', 'pfstream.h', 'procbuf.h', 'pthread_alloc', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', 'set.h', 'slist', 'slist.h', 'stack.h', 'stdiostream.h', 'stl_alloc.h', 'stl_relops.h', 'streambuf.h', 'stream.h', 'strfile.h', 'strstream.h', 'tempbuf.h', 'tree.h', 'type_traits.h', 'vector.h', # 17.6.1.2 C++ library headers 'algorithm', 'array', 'atomic', 'bitset', 'chrono', 'codecvt', 'complex', 'condition_variable', 'deque', 'exception', 'forward_list', 'fstream', 'functional', 'future', 'initializer_list', 'iomanip', 'ios', 'iosfwd', 'iostream', 'istream', 'iterator', 'limits', 'list', 'locale', 'map', 'memory', 'mutex', 'new', 'numeric', 'ostream', 'queue', 'random', 'ratio', 'regex', 'set', 'sstream', 'stack', 'stdexcept', 'streambuf', 'string', 'strstream', 'system_error', 'thread', 'tuple', 'typeindex', 'typeinfo', 'type_traits', 'unordered_map', 'unordered_set', 'utility', 'valarray', 'vector', # 17.6.1.2 C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', 'ciso646', 'climits', 'clocale', 'cmath', 'csetjmp', 'csignal', 'cstdalign', 'cstdarg', 'cstdbool', 'cstddef', 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctgmath', 'ctime', 'cuchar', 'cwchar', 'cwctype', ]) # These headers are excluded from [build/include] and [build/include_order] # checks: # - Anything not following google file name conventions (containing an # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. _THIRD_PARTY_HEADERS_PATTERN = re.compile( r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') # Assertion macros. These are defined in base/logging.h and # testing/base/gunit.h. Note that the _M versions need to come first # for substring matching to work. _CHECK_MACROS = [ 'DCHECK', 'CHECK', 'EXPECT_TRUE_M', 'EXPECT_TRUE', 'ASSERT_TRUE_M', 'ASSERT_TRUE', 'EXPECT_FALSE_M', 'EXPECT_FALSE', 'ASSERT_FALSE_M', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. # # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { 'and': '&&', 'bitor': '|', 'or': '||', 'xor': '^', 'compl': '~', 'bitand': '&', 'and_eq': '&=', 'or_eq': '|=', 'xor_eq': '^=', 'not': '!', 'not_eq': '!=' } # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. # # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 _LIKELY_MY_HEADER = 3 _POSSIBLE_MY_HEADER = 4 _OTHER_HEADER = 5 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block _INSIDE_ASM = 1 # Inside inline assembly block _END_ASM = 2 # Last line of inline assembly block _BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') _regexp_compile_cache = {} # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. _error_suppressions = {} # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 # The allowed extensions for file names # This is set by --extensions flag. _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category) def ResetNolintSuppressions(): """Resets the set of NOLINT suppressions to empty.""" _error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set())) def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s) def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s) def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s) class _IncludeState(object): """Tracks line numbers for includes, and the order in which includes appear. include_list contains list of lists of (header, line number) pairs. It's a lists of lists rather than just one flat list to make it easier to update across preprocessor boundaries. Call CheckNextIncludeOrder() once for each header in the file, passing in the type constants defined above. Calls in an illegal order will raise an _IncludeError with an appropriate error message. """ # self._section will move monotonically through this set. If it ever # needs to move backwards, CheckNextIncludeOrder will raise an error. _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header', } def __init__(self): self.include_list = [[]] self.ResetSection('') def FindHeader(self, header): """Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before. """ for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1 def ResetSection(self, directive): """Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else"). """ # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = [] def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): return False return True def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return '' class _CppLintState(object): """Maintains module-wide state..""" def __init__(self): self.verbose_level = 1 # global setting. self.error_count = 0 # global count of reported errors # filters to apply when emitting error messages self.filters = _DEFAULT_FILTERS[:] # backup of filter list. Used to restore the state after each file. self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts # output format: # "emacs" - format that emacs can parse (default) # "vs7" - format that Microsoft Visual Studio 7 can parse self.output_format = 'emacs' def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): """Sets the module's counting options.""" self.counting = counting_style def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] self.AddFilters(filters) def AddFilters(self, filters): """ Adds more filters to the existing list of error-message filters. """ for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt) def BackupFilters(self): """ Saves the current filter list to backup storage.""" self._filters_backup = self.filters[:] def RestoreFilters(self): """ Restores filters previously backed up.""" self.filters = self._filters_backup[:] def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count) _cpplint_state = _CppLintState() def _OutputFormat(): """Gets the module's output format.""" return _cpplint_state.output_format def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): """Returns the module's verbosity setting.""" return _cpplint_state.verbose_level def _SetVerboseLevel(level): """Sets the module's verbosity, and returns the previous setting.""" return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level) def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters) def _AddFilters(filters): """Adds more filter overrides. Unlike _SetFilters, this function does not reset the current list of filters available. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.AddFilters(filters) def _BackupFilters(): """ Saves the current filter list to backup storage.""" _cpplint_state.BackupFilters() def _RestoreFilters(): """ Restores filters previously backed up.""" _cpplint_state.RestoreFilters() class _FunctionState(object): """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): """Start analyzing function body. Args: function_name: The name of the function being tracked. """ self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): """Count line in current function body.""" if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger)) def End(self): """Stop analyzing function body.""" self.in_a_function = False class _IncludeError(Exception): """Indicates a problem with the include order in a file.""" pass class FileInfo(object): """Provides utility functions for filenames. FileInfo provides easy access to the components of a file's path relative to the project root. """ def __init__(self, filename): self._filename = filename def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest) def BaseName(self): """File base name - text after the final slash, before the final period.""" return self.Split()[1] def Extension(self): """File extension - text following the final period.""" return self.Split()[2] def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2]) def IsSource(self): """File has a source file extension.""" return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Match a single C style comment on the same line. _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' # Matches multi-line C style comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + _RE_PATTERN_C_COMMENTS + r'\s+|' + r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + _RE_PATTERN_C_COMMENTS + r')') def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if matched: delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 4 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments. 2) lines member contains lines without comments. 3) raw_lines member contains all the lines without processing. 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw strings removed. All these members are of , and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed def FindEndOfExpressionInLine(line, startpos, stack): """Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ for i in xrange(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) if end_pos > -1: return (line, linenum, end_pos) # Did not find end of expression before end of file, give up return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, stack): """Find position at the matching start of current expression. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. stack: nesting stack at endpos. Returns: On finding matching start: (index at matching start, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at beginning of this line) """ i = endpos while i >= 0: char = line[i] if char in ')]}': # Found end of expression, push to expression stack stack.append(char) elif char == '>': # Found potential end of template argument list. # # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:]) or Search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator i -= 1 else: # If there is a matching '>', we can pop the expression stack. # Otherwise, ignore this '<' since it must be an operator. if stack and stack[-1] == '>': stack.pop() if not stack: return (i, None) elif char in '([{': # Found start of expression. # # If there are any unmatched '>' on the stack, they must be # operators. Remove those. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) if ((char == '(' and stack[-1] == ')') or (char == '[' and stack[-1] == ']') or (char == '{' and stack[-1] == '}')): stack.pop() if not stack: return (i, None) else: # Mismatched parentheses return (-1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '<', the matching '>' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) i -= 1 return (-1, stack) def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in ')}]>': return (line, 0, -1) # Check last line (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while stack and linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) if start_pos > -1: return (line, linenum, start_pos) # Did not find start of expression before beginning of file, give up return (line, 0, -1) def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] "') def GetIndentLevel(line): """Return the number of leading spaces in line. Args: line: A string to check. Returns: An integer count of leading spaces, possibly zero. """ indent = Match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0 def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' def CheckForHeaderGuard(filename, clean_lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found. """ # Don't check for header guards if there are error suppression # comments somewhere in this file. # # Because this is silencing a warning for a nonexistent line, we # only support the very specific NOLINT(build/header_guard) syntax, # and not the general NOLINT or NOLINT(*) syntax. raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) # Check for "//" comments on endif line. ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) if match: if match.group(1) == '_': # Issue low severity warning for deprecated double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif // %s"' % cppvar) return # Didn't find the corresponding "//" comment. If this file does not # contain any "//" comments at all, it could be that the compiler # only wants "/**/" comments, look for those instead. no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) if match: if match.group(1) == '_': # Low severity warning for double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif /* %s */"' % cppvar) return # Didn't find anything error(filename, endif_linenum, 'build/header_guard', 5, '#endif line should be "#endif // %s"' % cppvar) def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a .cc file does not include its header.""" # Do not check test files if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): return fileinfo = FileInfo(filename) headerfile = filename[0:len(filename) - 2] + 'h' if not os.path.exists(headerfile): return headername = FileInfo(headerfile).RepositoryName() first_include = 0 for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)) def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') def CheckForNewlineAtEOF(filename, lines, error): """Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.') # (non-threadsafe name, thread-safe alternative, validation pattern) # # The validation pattern is used to eliminate false positives such as: # _rand(); // false positive due to substring match. # ->rand(); // some member function rand(). # ACMRandom rand(seed); // some variable named rand. # ISAACRandom rand(); // another variable named rand. # # Basically we require the return value of these functions to be used # in some expression context on the same line by matching on some # operator before the function name. This eliminates constructors and # member function calls. _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' _THREADING_LIST = ( ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), ('strtok(', 'strtok_r(', _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), ) def CheckPosixThreading(filename, clean_lines, linenum, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: # Additional pattern matching check to confirm that this is the # function we are looking for if Search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_func + '...) instead of ' + single_thread_func + '...) for improved thread safety.') def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') def IsMacroDefinition(clean_lines, linenum): if Search(r'^#define', clean_lines[linenum]): return True if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): return True return False def IsForwardClassDeclaration(clean_lines, linenum): return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) class _BlockInfo(object): """Stores information about a generic block of code.""" def __init__(self, seen_open_brace): self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM self.check_namespace_indentation = False def CheckBegin(self, filename, clean_lines, linenum, error): """Run checks that applies to text up to the opening brace. This is mostly for checking the text after the class identifier and the "{", usually where the base class is specified. For other blocks, there isn't much to check, so we always pass. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def CheckEnd(self, filename, clean_lines, linenum, error): """Run checks that applies to text after the closing brace. This is mostly used for checking end of namespace comments. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def IsBlockInfo(self): """Returns true if this block is a _BlockInfo. This is convenient for verifying that an object is an instance of a _BlockInfo, but not an instance of any of the derived classes. Returns: True for this class, False for derived classes. """ return self.__class__ == _BlockInfo class _ExternCInfo(_BlockInfo): """Stores information about an 'extern "C"' block.""" def __init__(self): _BlockInfo.__init__(self, True) class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, False) self.name = name self.starting_linenum = linenum self.is_derived = False self.check_namespace_indentation = True if class_or_struct == 'struct': self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False # Remember initial indentation level for this class. Using raw_lines here # instead of elided to account for leading comments. self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) # Try to find the end of the class. This will be confused by things like: # class A { # } *x = { ... # # But it's still good enough for CheckSectionSpacing. self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += line.count('{') - line.count('}') if not depth: self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # If there is a DISALLOW macro, it should appear near the end of # the class. seen_last_thing_in_class = False for i in xrange(linenum - 1, self.starting_linenum, -1): match = Search( r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + self.name + r'\)', clean_lines.elided[i]) if match: if seen_last_thing_in_class: error(filename, i, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') break if not Match(r'^\s*$', clean_lines.elided[i]): seen_last_thing_in_class = True # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. indent = Match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent) class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): _BlockInfo.__init__(self, False) self.name = name or '' self.starting_linenum = linenum self.check_namespace_indentation = True def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace ." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"') class _PreprocessorInfo(object): """Stores checkpoints of nesting stacks when #if/#else is seen.""" def __init__(self, stack_before_if): # The entire nesting stack before #if self.stack_before_if = stack_before_if # The entire nesting stack up to #else self.stack_before_else = [] # Whether we have already seen #else or #elif self.seen_else = False class NestingState(object): """Holds states related to parsing braces.""" def __init__(self): # Stack for tracking all braces. An object is pushed whenever we # see a "{", and popped when we see a "}". Only 3 types of # objects are possible: # - _ClassInfo: a class or struct. # - _NamespaceInfo: a namespace. # - _BlockInfo: some other type of block. self.stack = [] # Top of the previous stack before each Update(). # # Because the nesting_stack is updated at the end of each line, we # had to do some convoluted checks to find out what is the current # scope at the beginning of the line. This check is simplified by # saving the previous top of nesting stack. # # We could save the full stack, but we only need the top. Copying # the full nesting stack would slow down cpplint by ~10%. self.previous_stack_top = [] # Stack of _PreprocessorInfo objects. self.pp_stack = [] def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo) def InExternC(self): """Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ExternCInfo) def InClassDeclaration(self): """Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ClassInfo) def InAsmBlock(self): """Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM. """ return self.stack and self.stack[-1].inline_asm != _NO_ASM def InTemplateArgumentList(self, clean_lines, linenum, pos): """Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments. """ while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) # These things do not look like template argument list: # class Suspect { # class Suspect x; } if token in ('{', '}', ';'): return False # These things look like template argument list: # template # template # template # template if token in ('>', '=', '[', ']', '.'): return True # Check if token is an unmatched '<'. # If not, move on to the next character. if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue # We can't be sure if we just find a single '<', and need to # find the matching '>'. (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: # Not sure if template argument list or syntax error in file return False linenum = end_line pos = end_pos return False def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass # TODO(unknown): Update() is too long, but we will refactor later. def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remember top of the previous nesting stack. # # The stack is always pushed/popped and not modified in place, so # we can just do a shallow copy instead of copy.deepcopy. Using # deepcopy would slow down cpplint by ~28%. if self.stack: self.previous_stack_top = self.stack[-1] else: self.previous_stack_top = None # Update pp_stack self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; class_decl_match = Match( r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): # We do not want to accept classes that are actually template arguments: # template , # template class Ignore3> # void Function() {}; # # To avoid template argument cases, we scan forward and look for # an unmatched '>'. If we see one, assume we are inside a # template argument list. end_declaration = len(class_decl_match.group(1)) if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): self.stack.append(_ClassInfo( class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(4) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True elif Match(r'^extern\s*"[^"]*"\s*\{', line): self.stack.append(_ExternCInfo()) else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): """Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise. """ for i in range(len(self.stack), 0, -1): classinfo = self.stack[i - 1] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name) def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and ))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. Also look for # non-single-argument constructors which are also technically valid, but # strongly suggest something is wrong. explicit_constructor_match = Match( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line) if explicit_constructor_match: is_marked_explicit = explicit_constructor_match.group(1) if not explicit_constructor_match.group(2): constructor_args = [] else: constructor_args = explicit_constructor_match.group(2).split(',') # collapse arguments so that commas in template parameter lists and function # argument parameter lists don't split arguments in two i = 0 while i < len(constructor_args): constructor_arg = constructor_args[i] while (constructor_arg.count('<') > constructor_arg.count('>') or constructor_arg.count('(') > constructor_arg.count(')')): constructor_arg += ',' + constructor_args[i + 1] del constructor_args[i + 1] constructor_args[i] = constructor_arg i += 1 defaulted_args = [arg for arg in constructor_args if '=' in arg] noarg_constructor = (not constructor_args or # empty arg list # 'void' arg specifier (len(constructor_args) == 1 and constructor_args[0].strip() == 'void')) onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg not noarg_constructor) or # all but at most one arg defaulted (len(constructor_args) >= 1 and not noarg_constructor and len(defaulted_args) >= len(constructor_args) - 1)) initializer_list_constructor = bool( onearg_constructor and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), constructor_args[0].strip())) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): if defaulted_args: error(filename, linenum, 'runtime/explicit', 5, 'Constructors callable with one argument ' 'should be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 5, 'Single-parameter constructors should be marked explicit.') elif is_marked_explicit and not onearg_constructor: if noarg_constructor: error(filename, linenum, 'runtime/explicit', 5, 'Zero-parameter constructors should not be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 0, 'Constructors that require multiple arguments ' 'should not be marked explicit.') def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and not Search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace() def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): is_namespace_indent_item = ( len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and nesting_state.previous_stack_top == nesting_state.stack[-2]) if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, clean_lines.elided, line): CheckItemIndentationInNamespace(filename, clean_lines.elided, line, error) def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') def CheckComment(line, filename, linenum, next_line_start, error): """Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found. """ commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if (line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes # Allow one space for new scopes, two spaces otherwise: if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # Checks for common mistakes in TODO comments. comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the // unless # it's a /// or //! Doxygen comment. if (Match(r'//[^ ]*\w', comment) and not Match(r'(///|//\!)(\s+|$)', comment)): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after if/for/while/switch, no spaces around parens in function calls, two spaces between code and comment, don't start a block with a blank line, don't end a function with a blank line, don't add a blank line after public/protected/private, don't have too many blank lines in a row. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw = clean_lines.lines_without_raw_strings line = raw[linenum] # Before nixing comments, check if the line is blank for no good # reason. This includes the first line after a block is opened, and # blank lines at the end of a function (ie, right before a line like '}' # # Skip all the blank line checks if we are immediately inside a # namespace body. In other words, don't issue blank line warnings # for this block: # namespace { # # } # # A warning about missing end of namespace comments will be issued instead. # # Also skip blank line checks for 'extern "C"' blocks, which are formatted # like namespaces. if (IsBlankLine(line) and not nesting_state.InNamespaceBody() and not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') # TODO(unknown): Don't complain if line before blank line, and line after, # both start with alnums and are indented the same amount. # This ignores whitespace at the start of a namespace block # because those are not usually indented. if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: # OK, we have a blank line at the start of a code block. Before we # complain, we check if it is an exception to the rule: The previous # non-empty line has the parameters of a function header that are indented # 4 spaces (because they did not fit in a 80 column line when placed on # the same line as the function name). We also check for the case where # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False if Match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 and Match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') else: # Search for the function arguments or an initializer list. We use a # simple heuristic here: If the line is indented 4 spaces; and we have a # closing paren, without the opening paren, followed by an opening brace # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) or Match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block ' 'should be deleted.') # Ignore blank lines at the end of a block in a long if-else # chain, like this: # if (condition1) { # // Something followed by a blank line # # } else if (condition2) { # // Something else # } if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line and Match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') matched = Match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) # Next, check comments next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) # get rid of comments and strings line = clean_lines.elided[linenum] # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'return []() {};' if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. if (Search(r'for *\(.*[^:]:[^: ]', line) or Search(r'for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) def CheckParenthesisSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around parentheses. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) def CheckCommaSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') def CheckBracesSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces. And since you should never have braces at the beginning of a line, # this is an easy test. match = Match(r'^(.*[^ ({>]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # ternary = expr ? new type{} : nullptr; # OuterTemplate{}> # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<>]:". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') def IsDecltype(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise. """ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False def IsTemplateParameterList(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise. """ (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, column) if (startpos > -1 and Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): return True return False def IsRValueType(typenames, clean_lines, nesting_state, linenum, column): """Check if the token ending on (linenum, column) is a type. Assumes that text to the right of the column is "&&" or a function name. Args: typenames: set of type names from template-argument-list. clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is a type, False if we are not sure. """ prefix = clean_lines.elided[linenum][0:column] # Get one word to the left. If we failed to do so, this is most # likely not a type, since it's unlikely that the type name and "&&" # would be split across multiple lines. match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix) if not match: return False # Check text following the token. If it's "&&>" or "&&," or "&&...", it's # most likely a rvalue reference used inside a template. suffix = clean_lines.elided[linenum][column:] if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix): return True # Check for known types and end of templates: # int&& variable # vector&& variable # # Because this function is called recursively, we also need to # recognize pointer and reference types: # int* Function() # int& Function() if (match.group(2) in typenames or match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool', 'short', 'int', 'long', 'signed', 'unsigned', 'float', 'double', 'void', 'auto', '>', '*', '&']): return True # If we see a close parenthesis, look for decltype on the other side. # decltype would unambiguously identify a type, anything else is # probably a parenthesized expression and not a type. if match.group(2) == ')': return IsDecltype( clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1) # Check for casts and cv-qualifiers. # match.group(1) remainder # -------------- --------- # const_cast< type&& # const type&& # type const&& if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|' r'reinterpret_cast\s*<|\w+\s)\s*$', match.group(1)): return True # Look for a preceding symbol that might help differentiate the context. # These are the cases that would be ambiguous: # match.group(1) remainder # -------------- --------- # Call ( expression && # Declaration ( type&& # sizeof ( type&& # if ( expression && # while ( expression && # for ( type&& # for( ; expression && # statement ; type&& # block { type&& # constructor { expression && start = linenum line = match.group(1) match_symbol = None while start >= 0: # We want to skip over identifiers and commas to get to a symbol. # Commas are skipped so that we can find the opening parenthesis # for function parameter lists. match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line) if match_symbol: break start -= 1 line = clean_lines.elided[start] if not match_symbol: # Probably the first statement in the file is an rvalue reference return True if match_symbol.group(2) == '}': # Found closing brace, probably an indicate of this: # block{} type&& return True if match_symbol.group(2) == ';': # Found semicolon, probably one of these: # for(; expression && # statement; type&& # Look for the previous 'for(' in the previous lines. before_text = match_symbol.group(1) for i in xrange(start - 1, max(start - 6, 0), -1): before_text = clean_lines.elided[i] + before_text if Search(r'for\s*\([^{};]*$', before_text): # This is the condition inside a for-loop return False # Did not find a for-init-statement before this semicolon, so this # is probably a new statement and not a condition. return True if match_symbol.group(2) == '{': # Found opening brace, probably one of these: # block{ type&& = ... ; } # constructor{ expression && expression } # Look for a closing brace or a semicolon. If we see a semicolon # first, this is probably a rvalue reference. line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1] end = start depth = 1 while True: for ch in line: if ch == ';': return True elif ch == '{': depth += 1 elif ch == '}': depth -= 1 if depth == 0: return False end += 1 if end >= clean_lines.NumLines(): break line = clean_lines.elided[end] # Incomplete program? return False if match_symbol.group(2) == '(': # Opening parenthesis. Need to check what's to the left of the # parenthesis. Look back one extra line for additional context. before_text = match_symbol.group(1) if linenum > 1: before_text = clean_lines.elided[linenum - 1] + before_text before_text = match_symbol.group(1) # Patterns that are likely to be types: # [](type&& # for (type&& # sizeof(type&& # operator=(type&& # if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text): return True # Patterns that are likely to be expressions: # if (expression && # while (expression && # : initializer(expression && # , initializer(expression && # ( FunctionCall(expression && # + FunctionCall(expression && # + (expression && # # The last '+' represents operators such as '+' and '-'. if Search(r'(?:\bif|\bwhile|[-+=%^(]*>)?\s*$', match_symbol.group(1)) if match_func: # Check for constructors, which don't have return types. if Search(r'\b(?:explicit|inline)$', match_func.group(1)): return True implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix) if (implicit_constructor and implicit_constructor.group(1) == implicit_constructor.group(2)): return True return IsRValueType(typenames, clean_lines, nesting_state, linenum, len(match_func.group(1))) # Nothing before the function name. If this is inside a block scope, # this is probably a function call. return not (nesting_state.previous_stack_top and nesting_state.previous_stack_top.IsBlockInfo()) if match_symbol.group(2) == '>': # Possibly a closing bracket, check that what's on the other side # looks like the start of a template. return IsTemplateParameterList( clean_lines, start, len(match_symbol.group(1))) # Some other symbol, usually something like "a=b&&c". This is most # likely not a type. return False def IsDeletedOrDefault(clean_lines, linenum): """Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor. """ open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) def IsRValueAllowed(clean_lines, linenum, typenames): """Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed. """ # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) # Reject types not mentioned in template-argument-list while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) # All RValue types that were in template-argument-list should have # been removed by now. Those were allowed, assuming that they will # be forwarded. # # If there are no remaining RValue types left (i.e. types that were # not found in template-argument-list), flag those as not allowed. return line.find('&&') < 0 def GetTemplateArgs(clean_lines, linenum): """Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters. """ # Find start of function func_line = linenum while func_line > 0: line = clean_lines.elided[func_line] if Match(r'^\s*$', line): return set() if line.find('(') >= 0: break func_line -= 1 if func_line == 0: return set() # Collapse template-argument-list into a single string argument_list = '' match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) if match: # template-argument-list on the same line as function name start_col = len(match.group(1)) _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) if end_col > -1 and end_line == func_line: start_col += 1 # Skip the opening bracket argument_list = clean_lines.elided[func_line][start_col:end_col] elif func_line > 1: # template-argument-list one line before function name match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) if match: end_col = len(match.group(1)) _, start_line, start_col = ReverseCloseExpression( clean_lines, func_line - 1, end_col) if start_col > -1: start_col += 1 # Skip the opening bracket while start_line < func_line - 1: argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[func_line - 1][start_col:end_col] if not argument_list: return set() # Extract type names typenames = set() while True: match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', argument_list) if not match: break typenames.add(match.group(1)) argument_list = match.group(2) return typenames def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): """Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&') def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1) def CheckBraces(filename, clean_lines, linenum, error): """Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'else if\s*\(', line): # could be multi-line if brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, # single statements, but we reject anything with more than one semicolon in # it. This means that the first semicolon after the if should be at the end of # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. if_else_match = Search(r'\b(if\s*\(|else\b)', line) if if_else_match and not Match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() if_match = Search(r'\bif\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. if (not Match(r'\s*{', endline[endpos:]) and not (Match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') if not Match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: # Make sure the next line is dedented next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. if (if_match and Match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' 'Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs: closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') def FindCheckMacro(line): """Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found. """ for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1) def CheckCheck(filename, clean_lines, linenum, error): """Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Decide the set of replacement macros that should be suggested lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) def CheckAltTokens(filename, clean_lines, linenum, error): """Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. elif ((initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') # Matches the first component of a filename delimited by -s and _s. That is: # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] def _IsTestFilename(filename): """Determines if the given filename has a suffix that identifies it as a test. Args: filename: The input filename. Returns: True if 'filename' looks like a test, False otherwise. """ if (filename.endswith('_test.cc') or filename.endswith('_unittest.cc') or filename.endswith('_regtest.cc')): return True else: return False def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) if target_base == include_base and ( include_dir == target_dir or include_dir == os.path.normpath(target_dir + '/../public')): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make certain tasks easier. However, to prevent false positives, checks applicable to #include lines in CheckLanguage must be put here. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] # "include" should use the new style "foo/bar.h" instead of just "bar.h" # Only do this check if the included header follows google naming # conventions. If not, assume that it's a 3rd party API that # requires special include conventions. # # We also make an exception for Lua headers, which follow google # naming convention but not the include convention. match = Match(r'#include\s*"([^/]+\.h)"', line) if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): error(filename, linenum, 'build/include', 4, 'Include the directory when naming .h files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's # not. match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') duplicate_line = include_state.FindHeader(include) if duplicate_line >= 0: error(filename, linenum, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, duplicate_line)) elif (include.endswith('.cc') and os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): error(filename, linenum, 'build/include', 4, 'Do not include .cc files from other packages') elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): include_state.include_list[-1].append((include, linenum)) # We want to ensure that headers appear in the right order: # 1) for foo.cc, foo.h (preferred location) # 2) c system files # 3) cpp system files # 4) for foo.cc, foo.h (deprecated location) # 5) other google headers # # We classify each include statement as one of those 5 types # using a number of techniques. The include_state object keeps # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( _ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, '%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, 'Include "%s" not in alphabetical order' % include) include_state.SetLastHeader(canonical_include) def _GetTextInside(text, start_pattern): r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found. """ # TODO(unknown): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes declare or disable copy/assign # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(unknown): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function(... # string Class::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') def CheckPrintf(filename, clean_lines, linenum, error): """Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) def IsDerivedFunction(clean_lines, linenum): """Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False def IsOutOfLineMethodDefinition(clean_lines, linenum): """Check if current line contains an out-of-line method definition. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains an out-of-line method definition. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None return False def IsInitializerList(clean_lines, linenum): """Check if current line is inside constructor initializer list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line appears to be inside constructor initializer list, False otherwise. """ for i in xrange(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: remove_function_body = Match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) if Search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True if Search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True if Search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. # - An opening brace, probably the start of current class or namespace. # # Current line is probably not inside an initializer list since # we saw one of those things without seeing the starting colon. return False # Got to the beginning of the file without seeing the start of # constructor initializer list. return False def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Don't warn on out-of-line method definitions, as we would warn on the # in-line declaration, if it isn't marked with 'override'. if IsOutOfLineMethodDefinition(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)) def CheckCasts(filename, clean_lines, linenum, error): """Various cast related checks. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) if match and not expecting_function: matched_type = match.group(2) # matched_new_or_template is used to silence two false positives: # - New operators # - Template arguments with function types # # For template arguments, we match on types immediately following # an opening bracket without any spaces. This is a fast way to # silence the common case where the function type is the first # template argument. False negative with less-than comparison is # avoided because those operators are usually followed by a space. # # function // bracket + no space = false positive # value < double(42) // bracket + space = true positive matched_new_or_template = match.group(1) # Avoid arrays by looking for brackets that come after the closing # parenthesis. if Match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: # - Function pointers # - Casts to pointer types # - Placement new # - Alias declarations matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and not Search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. # # Some non-identifier character is required before the '&' for the # expression to be recognized as a cast. These are casts: # expression = &static_cast(temporary()); # function(&(int*)(temporary())); # # This is not a cast: # reference_type&(int* function_param); match = Search( r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match: # Try a better error message when the & is bound to something # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': _, y2, x2 = CloseExpression(clean_lines, y1, x1) if x2 >= 0: extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] if Match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: error(filename, linenum, 'readability/casting', 4, ('Are you taking an address of something dereferenced ' 'from a cast? Wrapping the dereferenced expression in ' 'parentheses will make the binding more obvious')) else: error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # [](int) -> bool { # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # ; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True def ExpectingFunctionArgs(clean_lines, linenum): """Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types. """ line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1])))) _HEADERS_CONTAINING_TEMPLATES = ( ('', ('deque',)), ('', ('unary_function', 'binary_function', 'plus', 'minus', 'multiplies', 'divides', 'modulus', 'negate', 'equal_to', 'not_equal_to', 'greater', 'less', 'greater_equal', 'less_equal', 'logical_and', 'logical_or', 'logical_not', 'unary_negate', 'not1', 'binary_negate', 'not2', 'bind1st', 'bind2nd', 'pointer_to_unary_function', 'pointer_to_binary_function', 'ptr_fun', 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 'mem_fun_ref_t', 'const_mem_fun_t', 'const_mem_fun1_t', 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 'mem_fun_ref', )), ('', ('numeric_limits',)), ('', ('list',)), ('', ('map', 'multimap',)), ('', ('allocator',)), ('', ('queue', 'priority_queue',)), ('', ('set', 'multiset',)), ('', ('stack',)), ('', ('char_traits', 'basic_string',)), ('', ('tuple',)), ('', ('pair',)), ('', ('vector',)), # gcc extensions. # Note: std::hash is their hash, ::hash is our hash ('', ('hash_map', 'hash_multimap',)), ('', ('hash_set', 'hash_multiset',)), ('', ('slist',)), ) _RE_PATTERN_STRING = re.compile(r'\bstring\b') _re_pattern_algorithm_header = [] for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', 'transform'): # Match max(..., ...), max(..., ...), but not foo->max, foo.max or # type::max(). _re_pattern_algorithm_header.append( (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), _template, '')) _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( (re.compile(r'(\<|\b)' + _template + r'\s*\<'), _template + '<>', _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path def UpdateIncludeState(filename, include_dict, io=codecs): """Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the . Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required[''] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): """Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly') def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): """Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.') def CheckRedundantVirtual(filename, clean_lines, linenum, error): """Check if line contains a redundant "virtual" function-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for "virtual" on current line. line = clean_lines.elided[linenum] virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) if not virtual: return # Ignore "virtual" keywords that are near access-specifiers. These # are only used in class base-specifier and do not apply to member # functions. if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or Match(r'^\s+(public|protected|private)\b', virtual.group(3))): return # Ignore the "virtual" keyword from virtual base classes. Usually # there is a column on the same line in these cases (virtual base # classes are rare in google3 because multiple inheritance is rare). if Match(r'^.*[^:]:[^:].*$', line): return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). # TODO(unknown): doesn't work if there are virtual functions with # decltype() or other things that use parentheses, but csearch suggests # that this is rare. end_col = -1 end_line = -1 start_col = len(virtual.group(2)) for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] parameter_list = Match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( clean_lines, start_line, start_col + len(parameter_list.group(1))) break start_col = 0 if end_col < 0: return # Couldn't find end of parameter list, give up # Look for "override" or "final" after the parameter list # (possibly on the next few lines). for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] match = Search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' 'already declared as "%s"' % match.group(1))) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 if Search(r'[^\w]\s*$', line): break def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): """Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for closing parenthesis nearby. We need one to confirm where # the declarator ends and where the virt-specifier starts to avoid # false positives. line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if declarator_end >= 0: fragment = line[declarator_end:] else: if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: fragment = line else: return # Check that at most one of "override" or "final" is present, not both if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"')) # Returns true if we are at a new block, and it is directly # inside of a namespace. def IsBlockInNameSpace(nesting_state, is_forward_declaration): """Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace. """ if is_forward_declaration: if len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)): return True else: return False return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo)) def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration) # Call this method if the line is directly inside of a namespace. # If the line above is blank (excluding comments) or the start of # an inner namespace, it cannot be indented. def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): line = raw_lines_no_comments[linenum] if Match(r'^\s+', line): error(filename, linenum, 'runtime/indentation_namespace', 4, 'Do not indent within a namespace') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error) def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name) def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): """Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) if file_extension == 'h': CheckForHeaderGuard(filename, clean_lines, error) for line in xrange(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) # Check that the .cc file has included its header if it exists. if file_extension == 'cc': CheckHeaderFileIncluded(filename, include_state, error) # We check here rather than inside ProcessLine so that we see raw # lines rather than "cleaned" lines. CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error) def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True def ProcessFile(filename, vlevel, extra_check_functions=[]): """Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: sys.stderr.write( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in _valid_extensions: sys.stderr.write('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(_valid_extensions))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) # If end-of-line sequences are a mix of LF and CR-LF, issue # warnings on the lines with CR. # # Don't issue any warnings if all lines are uniformly LF or CR-LF, # since critique can handle these just fine, and the style guide # doesn't dictate a particular end of line sequence. # # We can't depend on os.linesep to determine what the desired # end-of-line sequence should be, since that will return the # server-side end-of-line sequence. if lf_lines and crlf_lines: # Warn on every line with CR. An alternative approach might be to # check whether the file is mostly CRLF or just LF, and warn on the # minority, we bias toward LF here since most tools prefer LF. for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') sys.stderr.write('Done processing %s\n' % filename) _RestoreFilters() def PrintUsage(message): """Prints a brief usage string and exits, optionally with an error message. Args: message: The optional error message. """ sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(1) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0) def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames def main(): filenames = ParseArguments(sys.argv[1:]) # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() sys.exit(_cpplint_state.error_count > 0) if __name__ == '__main__': main() leatherman-1.4.2+dfsg/tests/000075500000000000000000000000001332360634000157155ustar00rootroot00000000000000leatherman-1.4.2+dfsg/tests/CMakeLists.txt000064400000000000000000000017671332360634000204700ustar00rootroot00000000000000include_directories(BEFORE ${LEATHERMAN_CATCH_INCLUDE} ${LEATHERMAN_INCLUDE_DIRS}) add_executable(leatherman_test main.cc ${LEATHERMAN_TEST_SRCS}) if (LEATHERMAN_SHARED) # Include deps first, as they may be static. If they are, linking on Windows can # fail due to multiple definitions for the same symbol. set(LEATHERMAN_TEST_LIBS ${LEATHERMAN_DEPS} ${LEATHERMAN_LIBS}) else() set(LEATHERMAN_TEST_LIBS ${LEATHERMAN_LIBS} ${LEATHERMAN_DEPS}) endif() if (LEATHERMAN_USE_CURL AND LEATHERMAN_INT_CURL_LIBS AND LEATHERMAN_TEST_CURL_LIB) list(REMOVE_ITEM LEATHERMAN_TEST_LIBS ${LEATHERMAN_INT_CURL_LIBS}) list(APPEND LEATHERMAN_TEST_LIBS ${LEATHERMAN_TEST_CURL_LIB}) endif() # We link libmock_curl instead of real libcurl in tests target_link_libraries(leatherman_test ${LEATHERMAN_TEST_LIBS}) leatherman_logging_namespace("leatherman.test") set_target_properties(leatherman_test PROPERTIES COMPILE_FLAGS "${LEATHERMAN_CXX_FLAGS}") add_test(NAME "leatherman\\ tests" COMMAND leatherman_test) leatherman-1.4.2+dfsg/tests/main.cc000064400000000000000000000005531332360634000171530ustar00rootroot00000000000000#define CATCH_CONFIG_RUNNER #include // To enable log messages: // #define ENABLE_LOGGING #ifdef ENABLE_LOGGING #include #endif int main(int argc, char **argv) { #ifdef ENABLE_LOGGING leatherman::logging::set_level(leatherman::logging::log_level::debug); #endif return Catch::Session().run( argc, argv ); } leatherman-1.4.2+dfsg/util/000075500000000000000000000000001332360634000155305ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/CMakeLists.txt000064400000000000000000000016231332360634000202720ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED date_time chrono system) add_leatherman_deps(${Boost_LIBRARIES}) add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(nowide) if(WIN32) set(PLATFORM_SRCS "src/windows/time.cc" "src/windows/environment.cc" "src/windows/scoped_handle.cc") set(PLATFORM_TESTS "tests/windows/environment.cc") else() set(PLATFORM_SRCS "src/posix/time.cc" "src/posix/environment.cc" "src/posix/scoped_descriptor.cc") set(PLATFORM_TESTS "tests/posix/environment.cc") endif() add_leatherman_headers(inc/leatherman) add_leatherman_library( src/strings.cc src/time.cc src/environment.cc src/scope_exit.cc src/scoped_env.cc src/uri.cc ${PLATFORM_SRCS} ) add_leatherman_test( tests/scoped_env.cc tests/strings_test.cc tests/option_set.cc tests/environment.cc tests/timer.cc tests/uri.cc ${PLATFORM_TESTS} ) leatherman-1.4.2+dfsg/util/inc/000075500000000000000000000000001332360634000163015ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/inc/leatherman/000075500000000000000000000000001332360634000204215ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/inc/leatherman/util/000075500000000000000000000000001332360634000213765ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/inc/leatherman/util/environment.hpp000064400000000000000000000045621332360634000244620ustar00rootroot00000000000000/** * @file * Declares utility functions for environment variables. */ #pragma once #include #include #include namespace leatherman { namespace util { /** * Represents a platform-agnostic way for manipulating environment variables. */ struct environment { /** * Gets an environment variable. * @param name The name of the environment variable to get. * @param value Returns the value of the environment variable. * @return Returns true if the environment variable is present or false if it is not. */ static bool get(std::string const& name, std::string& value); /** * Sets an environment variable. * Note that on Windows, setting an environment variable to an empty string is * equivalent to clearing it. * @param name The name of the environment variable to set. * @param value The value of the environment variable to set. * @return Returns true if the environment variable could be changed. * If false, it sets the system error state. */ static bool set(std::string const& name, std::string const& value); /** * Unsets an environment variable. * @param name The name of the environment variable to unset. * @return Returns true if the environment variable could be unset. * If false, it sets the system error state. */ static bool clear(std::string const& name); /** * Gets the platform-specific path separator. * @return Returns the platform-specific path separator. */ static char get_path_separator(); /** * Gets the platform-specific search program paths. * @return Returns the platform-specific program search paths. */ static std::vector const& search_paths(); /** * Force search program paths to be reloaded. */ static void reload_search_paths(); /** * Enumerates the environment variables for the current process. * @param callback The callback to call for each environment variable (passes the variable name and value). */ static void each(std::function callback); }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/option_set.hpp000064400000000000000000000144461332360634000243030ustar00rootroot00000000000000/** * @file * Declares utility type for passing a set of options. */ #pragma once #include #include #include namespace leatherman { namespace util { /** * Represents a set of options (flags). * Adapted from http://stackoverflow.com/a/4226975/530189 * @tparam T The enum class type that makes up the available options. */ template struct option_set { /** * The underlying enum type for the option set. */ typedef T enum_type; /** * The value type for the enum type. */ typedef typename std::underlying_type::type value_type; /** * Constructs an empty option_set. */ option_set() : option_set(value_type(0)) { } /** * Constructs an option_set with the given list of options. * @param values The option values to store in the list. */ option_set(std::initializer_list const& values) { // Simply bitwise OR all the values together. _value = std::accumulate( std::begin(values), std::end(values), value_type(0), [](value_type acc, enum_type value) { return acc | static_cast(value); }); } /** * Constructs an option_set with an existing bitfield value. * @param value */ explicit option_set(value_type value) : _value(value) { } /** * Gets the underlying value of the set. * @return Returns the underlying value of the set. */ value_type value() const { return _value; } /** * Used to test if an option is present in the set. * @param option The option to check for. * @return Returns true if the option is in the set or false if it is not. */ bool operator [](enum_type option) const { return test(option); } /** * Sets all options to true. * @return Returns this option_set. */ option_set& set_all() { _value = ~value_type(0); return *this; } /** * Sets the given option to true. * @param option The option to set to true. * @return Returns this option_set. */ option_set& set(enum_type option) { _value |= static_cast(option); return *this; } /** * Clears the given option from the set. * @param option The option to clear. * @return Returns this option_set. */ option_set& clear(enum_type option) { _value &= ~static_cast(option); return *this; } /** * Resets the option_set by clearing all options. * @return Returns this option_set. */ option_set& reset() { _value = value_type(0); return *this; } /** * Toggles all options in the set. * @return Returns this option_set. */ option_set& toggle() { _value = ~_value; return *this; } /** * Toggles a specific option in the set. * @param option The option to toggle. * @return Returns this option_set. */ option_set& toggle(enum_type option) { _value ^= static_cast(option); return *this; } /** * Gets the count of options in the set. * @return Returns the count of options in the set. */ size_t count() const { // Do a simple bit count value_type bits = _value; size_t total = 0; for (; bits != 0; ++total) { bits &= bits - 1; // clear the least significant bit set } return total; } /** * Gets the bit size of the option_set. * The size is based on how many bits are present in the underlying value_type. * @return Returns the bit size of the option_set. */ constexpr size_t size() const { return sizeof(value_type)*8; } /** * Tests if the given option is in the set. * @param option The option to test for. * @return Returns true if the option is in the set or false if it is not. */ bool test(enum_type option) const { return _value & static_cast(option); } /** * Checks to see if the option_set is empty. * @return Returns true if the set is empty (no options) or false if there are options in the set. */ bool empty() const { return _value == 0; } private: value_type _value; }; /** * Bitwise AND operator for option_set. * @param lhs The lefthand option_set. * @param rhs The righthand option_set. * @return Returns an option_set that is the bitwise AND of the two given option_sets. */ template option_set operator &(option_set const& lhs, option_set const& rhs) { return option_set(lhs.value() & rhs.value()); } /** * Bitwise OR operator for option_set. * @param lhs The lefthand option_set. * @param rhs The righthand option_set. * @return Returns an option_set that is the bitwise OR of the two given option_sets. */ template option_set operator |(option_set const& lhs, option_set const& rhs) { return option_set(lhs.value() | rhs.value()); } /** * Bitwise XOR operator for option_set. * @param lhs The lefthand option_set. * @param rhs The righthand option_set. * @return Returns an option_set that is the bitwise XOR of the two given option_sets. */ template option_set operator ^(option_set const& lhs, option_set const& rhs) { return option_set(lhs.value() ^ rhs.value()); } }} // namespace leatherman::execution leatherman-1.4.2+dfsg/util/inc/leatherman/util/posix/000075500000000000000000000000001332360634000225405ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/inc/leatherman/util/posix/scoped_descriptor.hpp000064400000000000000000000015251332360634000267670ustar00rootroot00000000000000/** * @file * Declares the scoped descriptor resource for managing file/socket descriptors. */ #pragma once #include #include namespace leatherman { namespace util { namespace posix { /** * Represents a scoped file descriptor for POSIX systems. * Automatically closes the file descriptor when it goes out of scope. */ struct scoped_descriptor : scoped_resource { /** * Constructs a scoped_descriptor. * @param descriptor The file descriptor to close when destroyed. */ explicit scoped_descriptor(int descriptor); /** * Constructs a closed scoped_descriptor. */ scoped_descriptor(); private: static void close(int descriptor); }; }}} // namespace leatherman::util::posix leatherman-1.4.2+dfsg/util/inc/leatherman/util/regex.hpp000064400000000000000000000057331332360634000232310ustar00rootroot00000000000000/** * @file * Defines an abstraction for using regular expression calls. */ #pragma once #include #include namespace leatherman { namespace util { /** * Helper function for resolving variadic arguments to re_search. * @tparam Text The type of the text to search. * @param txt The text to search. * @param what The pattern to search the text with. * @param depth The current argument depth. * @return Returns true if the match group was found or false if it was not. */ template inline bool re_search_helper(Text &txt, const boost::smatch &what, size_t depth) { return true; } /** * Helper function for resolving variadic arguments to re_search. * @tparam Text The type of the text to search. * @tparam Arg The type of the current match group argument. * @tparam Args The variadic types of the remaining match group arguments. * @param txt The text to search. * @param what The pattern to search the text with. * @param depth The current argument depth. * @param arg The current match group argument. * @param args The remaining match group arguments. * @return Returns true if the match group was found or false if it was not. */ template inline bool re_search_helper(Text const& txt, const boost::smatch &what, size_t depth, Arg arg, Args&&... args) { if (depth >= what.size()) { return false; } // If the match was optional and unmatched, skip it and leave the variable uninitialized. if (what[depth].matched) { try { using ArgType = typename std::pointer_traits::element_type; auto val = boost::lexical_cast(what[depth]); *arg = val; } catch (const boost::bad_lexical_cast &e) { return false; } } return re_search_helper(txt, what, depth+1, std::forward(args)...); } /** * Searches the given text for the given pattern. Optional variadic arguments return matched * subgroups. If a subgroup is optional and unmatched, leaves the argument uninitialized. * @tparam Text The type of the text. * @tparam Args The variadic type of the match group arguments. * @param txt The text to search. * @param pattern The pattern to search the text with. * @param args The returned match groups. * @return Returns true if the text matches the given pattern or false if it does not. */ template inline bool re_search(Text const& txt, boost::regex const& pattern, Args&&... args) { boost::smatch what; if (!boost::regex_search(txt, what, pattern)) { return false; } return re_search_helper(txt, what, 1, std::forward(args)...); } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/scope_exit.hpp000064400000000000000000000031131332360634000242470ustar00rootroot00000000000000/** * @file * Declares the base class for scope exit. */ #pragma once #include namespace leatherman { namespace util { /** * Used to call a function when scope is exited. */ struct scope_exit { /** * Constructs a scope_exit. */ scope_exit(); /** * Constructs a scope_exit. * @param callback The function to call when scope is exited. */ explicit scope_exit(std::function callback); /** * Moves the given scope_exit into this scope_exit. * @param other The scope_exit to move into this scope_exit. */ scope_exit(scope_exit&& other); /** * Moves the given scoped_resource into this scoped_resource. * @param other The scoped_resource to move into this scoped_resource. * @return Returns this scope_exit. */ scope_exit& operator=(scope_exit&& other); /** * Destructs a scope_exit. */ ~scope_exit(); /** * Invokes the callback. * If called, the callback will not be called upon destruction. */ void invoke(); private: explicit scope_exit(scope_exit const&) = delete; scope_exit& operator=(scope_exit const&) = delete; void* operator new(size_t) = delete; void operator delete(void*) = delete; void* operator new[](size_t) = delete; void operator delete[](void* ptr) = delete; std::function _callback; }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/scoped_env.hpp000064400000000000000000000021221332360634000242310ustar00rootroot00000000000000/** * @file * Declares the scoped resource for temporarily changing an environment variable. */ #pragma once #include #include #include namespace leatherman { namespace util { /** * This is an RAII wrapper for temporarily changing an environment variable. * It sets the environment on construction and restores it on destruction. */ struct scoped_env : scoped_resource>> { /** * Temporarily overrides the value of an environment variable. * @param var The environment variable to update. * @param newval The value to set it to during existence of this object. */ explicit scoped_env(std::string var, std::string const& newval); /** * Temporarily unsets an environment variable. */ explicit scoped_env(std::string var); private: static void restore(std::tuple> &); }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/scoped_resource.hpp000064400000000000000000000071061332360634000252770ustar00rootroot00000000000000/** * @file * Declares the base class for scoped resources. */ #pragma once #include namespace leatherman { namespace util { /** * Simple class that is used for the RAII pattern. * Used to scope a resource. When it goes out of scope, a deleter * function is called to delete the resource. * This type can be moved but cannot be copied. * @tparam T The type of resource being scoped. */ template struct scoped_resource { /** * Constructs an uninitialized scoped_resource. * Can be initialized via move assignment. */ scoped_resource() : _resource(), _deleter(nullptr) { } /** * Constructs a scoped_resource. * Takes ownership of the given resource. * @param resource The resource to scope. * @param deleter The function to call when the resource goes out of scope. */ scoped_resource(T resource, std::function deleter) : _resource(std::move(resource)), _deleter(deleter) { } /** * Prevents the scoped_resource from being copied. */ explicit scoped_resource(scoped_resource const&) = delete; /** * Prevents the scoped_resource from being copied. * @returns Returns this scoped_resource. */ scoped_resource& operator=(scoped_resource const&) = delete; /** * Moves the given scoped_resource into this scoped_resource. * @param other The scoped_resource to move into this scoped_resource. */ scoped_resource(scoped_resource&& other) { *this = std::move(other); } /** * Moves the given scoped_resource into this scoped_resource. * @param other The scoped_resource to move into this scoped_resource. * @return Returns this scoped_resource. */ scoped_resource& operator=(scoped_resource&& other) { release(); _resource = std::move(other._resource); _deleter = std::move(other._deleter); // Ensure the deleter is in a known "empty" state; we can't rely on default move semantics for that other._deleter = nullptr; return *this; } /** * Destructs a scoped_resource. */ ~scoped_resource() { release(); } /** * Implicitly casts to T&. * @return Returns reference-to-T. */ operator T&() { return _resource; } /** * Implicitly casts to T const&. * @return Returns const-reference-to-T. */ operator T const&() const { return _resource; } /** * Releases the resource before destruction. */ void release() { if (_deleter) { _deleter(_resource); _deleter = std::function(); } } protected: /** * Stores the resource being scoped. */ T _resource; /** * Stores the function to call when the resource goes out of scope. */ std::function _deleter; private: void* operator new(size_t) = delete; void operator delete(void*) = delete; void* operator new[](size_t) = delete; void operator delete[](void* ptr) = delete; }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/strings.hpp000064400000000000000000000034531332360634000236050ustar00rootroot00000000000000/** * @file * Declares utility functions for dealing with strings. */ #pragma once #include #include #include #include #include namespace leatherman { namespace util { /** * Case-insensitive string comparison. */ struct ciless : std::binary_function { /** * Compares two strings for a "less than" relationship using a case-insensitive comparison. * @param s1 The first string to compare. * @param s2 The second string to compare. * @return Returns true if s1 is less than s2 or false if s1 is equal to or greater than s2. */ bool operator() (const std::string &s1, const std::string &s2) const { return boost::lexicographical_compare(s1, s2, boost::is_iless()); } }; /** * @return Returns the "s" string in case of more than one thing, * an empty string otherwise. */ std::string plural(int num_of_things); /** * @return Returns the "s" string if vector contains more than one item, * an empty string otherwise. */ template std::string plural(std::vector const& things); /** @return Returns universally unique identifier string. */ std::string get_UUID(); /** * Reads each line from the given string. * @param s The string to read. * @param callback The callback function that is passed each line in the string. */ void each_line(std::string const& s, std::function callback); }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/time.hpp000064400000000000000000000021671332360634000230530ustar00rootroot00000000000000/** * @file * Declares utility functions for dealing with time. */ #pragma once #include #include namespace leatherman { namespace util { /** * Adds the specified expiry_minutes to the current time and returns * the related date time string in UTC format. * @return Returns an empty string in case it fails to allocate the buffer. */ std::string get_expiry_datetime(int expiry_minutes = 1); /** * Gets the current time in ISO8601 format * @param modifier_in_secords Offset from the current time in seconds * @return Returns the current time plus the modifier */ std::string get_ISO8601_time(unsigned int modifier_in_seconds = 0); /** @return Returns the current datetime string in the %Y%m%d_%H%M%S format */ std::string get_date_time(); /** * Turns a stored time into a local time with correction for timezones. * @param stored_time The time to be converted. * @param result The struct in which to store the local time. */ void get_local_time(std::time_t* stored_time, std::tm* result); }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/timer.hpp000064400000000000000000000021461332360634000232320ustar00rootroot00000000000000/** * @file * Declares a simple timer class. */ #pragma once #include namespace leatherman { namespace util { /** * A simple stopwatch/timer we can use for user feedback. We use the * std::chrono::steady_clock as we don't want to be affected if the system * clock changed around us (think ntp skew/leapseconds). */ class Timer { public: Timer() { reset(); } /** @return Returns the time that has passed since last reset in seconds. */ double elapsed_seconds() { auto now = std::chrono::steady_clock::now(); return std::chrono::duration(now - start_).count(); } /** @return Returns the time that has passed since last reset in milliseconds. */ int elapsed_milliseconds() { auto now = std::chrono::steady_clock::now(); return std::chrono::duration_cast(now - start_).count(); } /** Resets the clock. */ void reset() { start_ = std::chrono::steady_clock::now(); } private: std::chrono::time_point start_; }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/uri.hpp000064400000000000000000000006521332360634000227110ustar00rootroot00000000000000/** * @file * Declares tools for deconstructing URIs */ #pragma once #include namespace leatherman { namespace util { /** * A class that parses a URI into its components. * Does not support user_info, and doesn't break fragment out from query. */ struct uri { std::string protocol, host, port, path, query; uri(std::string const&); std::string str() const; }; }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/inc/leatherman/util/windows/000075500000000000000000000000001332360634000230705ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/inc/leatherman/util/windows/scoped_handle.hpp000064400000000000000000000014271332360634000263750ustar00rootroot00000000000000/** * @file * Declares the scoped HANDLE resource for managing Windows HANDLEs. */ #pragma once #include typedef void *HANDLE; namespace leatherman { namespace util { namespace windows { /** * Represents a scoped HANDLE for Windows. * Automatically closes the HANDLE when it goes out of scope. */ struct scoped_handle : scoped_resource { /** * Constructs a scoped_handle. * @param handle The HANDLE to close when destroyed. */ explicit scoped_handle(HANDLE handle); /** * Constructs a closed scoped_handle. */ scoped_handle(); private: static void close(HANDLE handle); }; }}} // namespace leatherman::util::windows leatherman-1.4.2+dfsg/util/src/000075500000000000000000000000001332360634000163175ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/src/environment.cc000064400000000000000000000012651332360634000211760ustar00rootroot00000000000000#include #include using namespace std; namespace leatherman { namespace util { bool environment::get(string const& name, string& value) { auto variable = boost::nowide::getenv(name.c_str()); if (!variable) { return false; } value = variable; return true; } bool environment::set(string const& name, string const& value) { return boost::nowide::setenv(name.c_str(), value.c_str(), 1) == 0; } bool environment::clear(string const& name) { return boost::nowide::unsetenv(name.c_str()) == 0; } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/posix/000075500000000000000000000000001332360634000174615ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/src/posix/environment.cc000064400000000000000000000036531332360634000223430ustar00rootroot00000000000000#include #include #include #include using namespace std; // Some platforms need environ explicitly declared extern char** environ; namespace leatherman { namespace util { struct search_path_helper { search_path_helper() { string paths; if (environment::get("PATH", paths)) { auto is_sep = bind(equal_to(), placeholders::_1, environment::get_path_separator()); boost::trim_if(paths, is_sep); boost::split(_paths, paths, is_sep, boost::token_compress_on); } // Ruby Facter expects /sbin and /usr/sbin to be searched for programs _paths.push_back("/sbin"); _paths.push_back("/usr/sbin"); } vector const& search_paths() const { return _paths; } private: vector _paths; }; char environment::get_path_separator() { return ':'; } static search_path_helper helper; vector const& environment::search_paths() { return helper.search_paths(); } void environment::reload_search_paths() { helper = search_path_helper(); } void environment::each(function callback) { // Enumerate all environment variables for (char const* const* variable = environ; *variable; ++variable) { string pair = *variable; string name; string value; auto pos = pair.find('='); if (pos == string::npos) { name = move(pair); } else { name = pair.substr(0, pos); value = pair.substr(pos + 1); } if (!callback(name, value)) { break; } } } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/posix/scoped_descriptor.cc000064400000000000000000000010141332360634000234770ustar00rootroot00000000000000#include using namespace std; namespace leatherman { namespace util { namespace posix { scoped_descriptor::scoped_descriptor(int descriptor) : scoped_resource(move(descriptor), close) { } scoped_descriptor::scoped_descriptor() : scoped_resource(-1, nullptr) { } void scoped_descriptor::close(int descriptor) { if (descriptor >= 0) { ::close(descriptor); } } }}} // namespace leatherman::util::posix leatherman-1.4.2+dfsg/util/src/posix/time.cc000064400000000000000000000003451332360634000207300ustar00rootroot00000000000000#include namespace leatherman { namespace util { void get_local_time(std::time_t* stored_time, std::tm* result){ localtime_r(stored_time, result); } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/scope_exit.cc000064400000000000000000000015301332360634000207670ustar00rootroot00000000000000#include using namespace std; namespace leatherman { namespace util { scope_exit::scope_exit() { } scope_exit::scope_exit(function callback) : _callback(callback) { } scope_exit::scope_exit(scope_exit&& other) { *this = std::move(other); } scope_exit& scope_exit::operator=(scope_exit&& other) { _callback = std::move(other._callback); // Ensure the callback is in a known "empty" state; we can't rely on default move semantics for that other._callback = nullptr; return *this; } scope_exit::~scope_exit() { invoke(); } void scope_exit::invoke() { if (_callback) { _callback(); _callback = nullptr; } } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/scoped_env.cc000064400000000000000000000021421332360634000207520ustar00rootroot00000000000000#include #include using namespace std; namespace leatherman { namespace util { scoped_env::scoped_env(string var, string const& val) : scoped_resource() { string oldval; bool was_set = environment::get(var, oldval); environment::set(var, val); _resource = make_tuple(move(var), was_set ? boost::optional(move(oldval)) : boost::none); _deleter = scoped_env::restore; } scoped_env::scoped_env(string var) : scoped_resource() { string oldval; bool was_set = environment::get(var, oldval); environment::clear(var); _resource = make_tuple(move(var), was_set ? boost::optional(move(oldval)) : boost::none); _deleter = scoped_env::restore; } void scoped_env::restore(tuple> & old) { if (get<1>(old)) { environment::set(get<0>(old), *get<1>(old)); } else { environment::clear(get<0>(old)); } } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/strings.cc000064400000000000000000000020131332360634000203130ustar00rootroot00000000000000#include #include #include #include #include using namespace std; namespace leatherman { namespace util { string plural(int num_of_things) { return num_of_things == 1 ? "" : "s"; } template<> string plural(vector const& things) { return plural(things.size()); } string get_UUID() { static boost::uuids::random_generator gen; boost::uuids::uuid uuid = gen(); return boost::uuids::to_string(uuid); } void each_line(string const& s, function callback) { string line; istringstream in(s); while (getline(in, line)) { // Handle Windows CR in the string. if (line.size() && line.back() == '\r') { line.pop_back(); } if (!callback(line)) { break; } } } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/time.cc000064400000000000000000000031371332360634000175700ustar00rootroot00000000000000#include #include namespace leatherman { namespace util { std::string get_expiry_datetime(int expiry_minutes) { struct std::tm expiry_time_info; std::string expiry_time_buffer(80, '\0'); // Get current time and add the specified minutes std::time_t expiry_time { time(nullptr) }; expiry_time += 60 * expiry_minutes; // Get local time structure get_local_time(&expiry_time, &expiry_time_info); // Return the formatted string if (strftime(&expiry_time_buffer[0], 80, "%FT%TZ", &expiry_time_info) == 0) { // invalid buffer return ""; } expiry_time_buffer.resize(strlen(&expiry_time_buffer[0])); return expiry_time_buffer; } std::string get_ISO8601_time(unsigned int modifier_in_seconds) { boost::posix_time::ptime t = boost::posix_time::microsec_clock::universal_time() + boost::posix_time::seconds(modifier_in_seconds); return boost::posix_time::to_iso_extended_string(t) + "Z"; } std::string get_date_time() { struct std::tm now_info; std::string now_buffer(80, '\0'); // Get current time std::time_t now { time(nullptr) }; // Get local time structure get_local_time(&now, &now_info); // Return the formatted string strftime(&now_buffer[0], 80, "%Y%m%d_%H%M%S", &now_info); now_buffer.resize(strlen(&now_buffer[0])); return now_buffer; } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/uri.cc000064400000000000000000000041141332360634000174250ustar00rootroot00000000000000#include #include #include namespace leatherman { namespace util { uri::uri(std::string const& _uri) { if (_uri.length() == 0) { return; } auto uri_end = _uri.end(); // get query start auto query_start = std::find(_uri.begin(), uri_end, '?'); // protocol auto protocol_start = _uri.begin(); auto protocol_end = std::find(protocol_start, uri_end, ':'); if (protocol_end != uri_end) { std::string after_prot{protocol_end, uri_end}; if ((after_prot.length() > 3) && (after_prot.substr(0, 3) == "://")) { protocol = std::string(protocol_start, protocol_end); protocol_end += 3; } else { protocol_end = _uri.begin(); // no protocol } } else { protocol_end = _uri.begin(); // no protocol } // host auto host_start = protocol_end; auto path_start = std::find(host_start, uri_end, '/'); auto host_end = std::find(protocol_end, (path_start != uri_end) ? path_start : query_start, ':'); // check for port host = std::string(host_start, host_end); // port if ((host_end != uri_end) && (*host_end == ':')) { auto port_end = (path_start != uri_end) ? path_start : query_start; port = std::string(host_end+1, port_end); } // path if (path_start != uri_end) { path = std::string(path_start, query_start); } // query if (query_start != uri_end) { query = std::string(query_start, uri_end); } } std::string uri::str() const { std::stringstream ss; if (!protocol.empty()) { ss << protocol << "://"; } ss << host; if (!port.empty()) { ss << ":" << port; } ss << path << query; return ss.str(); } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/windows/000075500000000000000000000000001332360634000200115ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/src/windows/environment.cc000064400000000000000000000036011332360634000226640ustar00rootroot00000000000000#include #include #include #include #include using namespace std; namespace leatherman { namespace util { struct search_path_helper { search_path_helper() { string paths; if (environment::get("PATH", paths)) { auto is_sep = bind(equal_to(), placeholders::_1, environment::get_path_separator()); boost::trim_if(paths, is_sep); boost::split(_paths, paths, is_sep, boost::token_compress_on); } } vector const& search_paths() const { return _paths; } private: vector _paths; }; char environment::get_path_separator() { return ';'; } static search_path_helper helper; vector const& environment::search_paths() { return helper.search_paths(); } void environment::reload_search_paths() { helper = search_path_helper(); } void environment::each(function callback) { // Enumerate all environment variables auto ptr = GetEnvironmentStringsW(); for (auto variables = ptr; variables && *variables; variables += wcslen(variables) + 1) { string pair = boost::nowide::narrow(variables); string name; string value; auto pos = pair.find('='); if (pos == string::npos) { name = move(pair); } else { name = pair.substr(0, pos); value = pair.substr(pos + 1); } if (!callback(name, value)) { break; } } if (ptr) { FreeEnvironmentStringsW(ptr); } } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/src/windows/scoped_handle.cc000064400000000000000000000010321332360634000231040ustar00rootroot00000000000000#include #include using namespace std; namespace leatherman { namespace util { namespace windows { scoped_handle::scoped_handle(HANDLE h) : scoped_resource(h, close) { } scoped_handle::scoped_handle() : scoped_resource(INVALID_HANDLE_VALUE, nullptr) { } void scoped_handle::close(HANDLE handle) { if (handle != INVALID_HANDLE_VALUE) { CloseHandle(handle); } } }}} // namespace leatherman::util::windows leatherman-1.4.2+dfsg/util/src/windows/time.cc000064400000000000000000000003711332360634000212570ustar00rootroot00000000000000#include #include namespace leatherman { namespace util { void get_local_time(std::time_t* stored_time, std::tm* result){ localtime_s(result, stored_time); } }} // namespace leatherman::util leatherman-1.4.2+dfsg/util/tests/000075500000000000000000000000001332360634000166725ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/tests/environment.cc000064400000000000000000000056261332360634000215560ustar00rootroot00000000000000#include #include #include using namespace std; using namespace leatherman::util; SCENARIO("getting an environment variable") { string value; REQUIRE_FALSE(environment::get("ENVTEST", value)); REQUIRE(value.empty()); boost::nowide::setenv("ENVTEST", "FOO", 1); REQUIRE(environment::get("ENVTEST", value)); REQUIRE(value == "FOO"); boost::nowide::unsetenv("ENVTEST"); value = ""; REQUIRE_FALSE(environment::get("ENVTEST", value)); REQUIRE(value.empty()); } SCENARIO("setting an environment variable") { REQUIRE_FALSE(boost::nowide::getenv("")); GIVEN("a non-empty value") { REQUIRE(environment::set("ENVTEST", "FOO")); THEN("the value is set to the same value") { REQUIRE(boost::nowide::getenv("ENVTEST") == string("FOO")); } boost::nowide::unsetenv("ENVTEST"); } GIVEN("an empty value") { REQUIRE(environment::set("ENVTEST", "")); THEN("the value is set to empty or not present") { string value; environment::get("ENVTEST", value); REQUIRE(value == ""); } boost::nowide::unsetenv("ENVTEST"); } } SCENARIO("clearing an environment variable") { boost::nowide::setenv("ENVTEST", "FOO", 1); REQUIRE(environment::clear("ENVTEST")); REQUIRE_FALSE(boost::nowide::getenv("ENVTEST")); } SCENARIO("enumearing enviornment variables") { boost::nowide::setenv("ENVTEST1", "FOO", 1); boost::nowide::setenv("ENVTEST2", "BAR", 1); boost::nowide::setenv("ENVTEST3", "BAZ", 1); WHEN("true is returned from the callback") { THEN("all values are returned") { string value1; string value2; string value3; environment::each([&](string& name, string& value) { if (name == "ENVTEST1") { value1 = move(value); } else if (name == "ENVTEST2") { value2 = move(value); } else if (name == "ENVTEST3") { value3 = move(value); } return true; }); REQUIRE(value1 == "FOO"); REQUIRE(value2 == "BAR"); REQUIRE(value3 == "BAZ"); } } WHEN("false is returned from the callback") { THEN("enumeration stops") { int count_at_stop = 0; int count = 0; environment::each([&](string& name, string& value) { if (name == "ENVTEST1") { count_at_stop = count; return false; } ++count; return true; }); REQUIRE(count != 0); REQUIRE(count == count_at_stop); } } boost::nowide::unsetenv("ENVTEST1"); boost::nowide::unsetenv("ENVTEST2"); boost::nowide::unsetenv("ENVTEST3"); } leatherman-1.4.2+dfsg/util/tests/option_set.cc000064400000000000000000000176501332360634000213750ustar00rootroot00000000000000#include #include using namespace std; using namespace leatherman::util; enum class options { foo = (1 << 0), bar = (1 << 1), baz = (1 << 2) }; SCENARIO("using an option set") { GIVEN("a default constructed option set") { option_set set; THEN("no options are set") { REQUIRE(set.count() == 0u); REQUIRE_FALSE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } GIVEN("option foo is set") { option_set set = { options::foo }; THEN("only foo is set") { REQUIRE(set.count() == 1u); REQUIRE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } GIVEN("options foo and bar are set") { option_set set = { options::bar, options::foo }; THEN("only foo and bar are set") { REQUIRE(set.count() == 2u); REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } GIVEN("all options are specified") { option_set set = { options::baz, options::foo, options::bar }; THEN("all options are set") { REQUIRE(set.count() == 3u); REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE(set[options::baz]); } } GIVEN("all options are set by numeric value") { option_set set(1 | 2 | 4); THEN("all options are set") { REQUIRE(set.count() == 3u); REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE(set[options::baz]); } } GIVEN("all options are set with set_all") { option_set set; set.set_all(); THEN("all options are set") { REQUIRE(set.count() == set.size()); REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE(set[options::baz]); } } GIVEN("an empty set") { option_set set; REQUIRE(set.empty()); WHEN("set is called with foo") { set.set(options::foo); THEN("foo is set") { REQUIRE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } WHEN("set is called with bar") { set.set(options::bar); THEN("bar is set") { REQUIRE_FALSE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } WHEN("set is called with baz") { set.set(options::baz); THEN("baz is set") { REQUIRE_FALSE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE(set[options::baz]); } } WHEN("reset is called") { set.reset(); THEN("the set is still empty") { REQUIRE(set.count() == 0u); REQUIRE_FALSE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } WHEN("toggle is called") { set.toggle(); THEN("all options are set") { REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE(set[options::baz]); } } WHEN("toggle is called on a particular option") { set.toggle(options::foo); THEN("the option should be set") { REQUIRE(set[options::foo]); } THEN("the count is one") { REQUIRE(set.count() == 1u); } } WHEN("toggle is called twice on a particular option") { set.toggle(options::foo); set.toggle(options::foo); THEN("the option should not be set") { REQUIRE_FALSE(set[options::foo]); } THEN("the count is zero") { REQUIRE(set.count() == 0u); } } THEN("the count is zero") { REQUIRE(set.count() == 0u); } THEN("the set is reports as empty") { REQUIRE(set.empty()); } THEN("the size is the number of bits in an integer") { REQUIRE(set.size() == sizeof(int) * 8); } THEN("no option is set") { REQUIRE_FALSE(set.test(options::foo)); REQUIRE_FALSE(set.test(options::bar)); REQUIRE_FALSE(set.test(options::baz)); } } GIVEN("an option set with all values set") { option_set set = { options::foo, options::bar, options::baz }; WHEN("clear is called with foo") { set.clear(options::foo); THEN("foo is not set") { REQUIRE_FALSE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE(set[options::baz]); } } WHEN("clear is called with bar") { set.clear(options::bar); THEN("bar is not set") { REQUIRE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE(set[options::baz]); } } WHEN("clear is called with baz") { set.clear(options::baz); THEN("baz is not set") { REQUIRE(set[options::foo]); REQUIRE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } WHEN("reset is called") { set.reset(); THEN("the set is empty") { REQUIRE(set.count() == 0u); REQUIRE_FALSE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } WHEN("toggle is called") { set.toggle(); THEN("the set is empty options are set") { REQUIRE_FALSE(set[options::foo]); REQUIRE_FALSE(set[options::bar]); REQUIRE_FALSE(set[options::baz]); } } THEN("the count is three") { REQUIRE(set.count() == 3u); } THEN("the set is not empty") { REQUIRE_FALSE(set.empty()); } THEN("all options are set") { REQUIRE(set.test(options::foo)); REQUIRE(set.test(options::bar)); REQUIRE(set.test(options::baz)); } } GIVEN("two option sets") { option_set set1 = { options::foo, options::bar }; option_set set2 = { options::bar, options::baz }; WHEN("a third set is the result of bitwise AND") { option_set set3 = set1 & set2; THEN("only the intersecting options are set") { REQUIRE(set3.count() == 1u); REQUIRE_FALSE(set3[options::foo]); REQUIRE(set3[options::bar]); REQUIRE_FALSE(set3[options::baz]); } } WHEN("a third set is the result of bitwise OR") { option_set set3 = set1 | set2; THEN("the union of the options are set") { REQUIRE(set3.count() == 3u); REQUIRE(set3[options::foo]); REQUIRE(set3[options::bar]); REQUIRE(set3[options::baz]); } } WHEN("a third set is the result of bitwise XOR") { option_set set3 = set1 ^ set2; THEN("options that are in one set but not the other are set") { REQUIRE(set3.count() == 2u); REQUIRE(set3[options::foo]); REQUIRE_FALSE(set3[options::bar]); REQUIRE(set3[options::baz]); } } } } leatherman-1.4.2+dfsg/util/tests/posix/000075500000000000000000000000001332360634000200345ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/tests/posix/environment.cc000064400000000000000000000022421332360634000227070ustar00rootroot00000000000000#include #include #include #include using namespace std; using namespace leatherman::util; SCENARIO("path separator on POSIX") { REQUIRE(environment::get_path_separator() == ':'); } SCENARIO("environment search paths") { GIVEN("paths from the environment") { auto paths = environment::search_paths(); REQUIRE(paths.size() > 2); THEN("the second to last path should be /sbin") { REQUIRE(*(paths.rbegin() + 1) == "/sbin"); } THEN("the last path should be /usr/sbin") { REQUIRE(paths.back() == "/usr/sbin"); } } GIVEN("empty paths from the environment") { string value; REQUIRE(environment::get("PATH", value)); REQUIRE(environment::set("PATH", value+":")); environment::reload_search_paths(); auto paths = environment::search_paths(); THEN("an empty path should not be searched") { REQUIRE(count(paths.begin(), paths.end(), "") == 0); } REQUIRE(environment::set("PATH", value)); environment::reload_search_paths(); } } leatherman-1.4.2+dfsg/util/tests/scoped_env.cc000064400000000000000000000032541332360634000213320ustar00rootroot00000000000000#include #include #include using namespace std; using namespace leatherman::util; SCENARIO("scoping an environment variable") { string value; REQUIRE_FALSE(environment::get("LEATH_ENV_TEST", value)); REQUIRE(value.empty()); WHEN("the variable does not exist") { AND_WHEN("the variable is scoped") { scoped_env foo("LEATH_ENV_TEST", "FOO"); THEN("the new value is set") { REQUIRE(environment::get("LEATH_ENV_TEST", value)); REQUIRE(value == "FOO"); } } AND_WHEN("the variable is scoped as unset") { scoped_env foo("LEATH_ENV_TEST"); THEN("the variable is not set") { REQUIRE_FALSE(environment::get("LEATH_ENV_TEST", value)); } } } WHEN("the variable exists") { environment::set("LEATH_ENV_TEST", "bar"); AND_WHEN("the variable is scoped") { scoped_env foo("LEATH_ENV_TEST", "FOO"); THEN("the new value is set") { REQUIRE(environment::get("LEATH_ENV_TEST", value)); REQUIRE(value == "FOO"); } } AND_WHEN("the variable is scoped as unset") { scoped_env foo("LEATH_ENV_TEST"); THEN("the variable is not set") { REQUIRE_FALSE(environment::get("LEATH_ENV_TEST", value)); } } THEN("the variable should be restored") { REQUIRE(environment::get("LEATH_ENV_TEST", value)); REQUIRE(value == "bar"); } environment::clear("LEATH_ENV_TEST"); } } leatherman-1.4.2+dfsg/util/tests/strings_test.cc000064400000000000000000000036061332360634000217360ustar00rootroot00000000000000#include #include using namespace std; using namespace leatherman::util; TEST_CASE("strings::plural", "[strings]") { SECTION("pluralize string based on a number") { REQUIRE(plural(1) == ""); REQUIRE(plural(2) == "s"); REQUIRE(plural(0) == "s"); } SECTION("pluralize string based on a list") { vector things { "thing1" }; REQUIRE(plural(things) == ""); things.push_back("thing2"); REQUIRE(plural(things) == "s"); } } TEST_CASE("strings::get_UUID", "[strings]") { SECTION("returns a unique value each time") { set ids; ids.insert(get_UUID()); for(int i = 0; i < 100; i++) { string new_id = get_UUID(); REQUIRE(ids.find(new_id) == ids.end()); ids.insert(new_id); } } } TEST_CASE("each_line", "[strings]") { SECTION("empty string never calls callback") { each_line("", [](string &line) { FAIL("should not be called"); return true; }); } SECTION("an action is performed on each line") { string s = "test1\ntest2\ntest3\n"; int i = 0; each_line(s, [&i](string const &line) { i++; return line == ("test" + std::to_string(i)); }); REQUIRE(i == 3); } SECTION("a callback that returns false stops at the first line") { string s = "test1\ntest2\ntest3\n"; vector lines; each_line(s, [&](string& line) { lines.emplace_back(move(line)); return false; }); REQUIRE(lines.size() == 1u); REQUIRE(lines[0] == "test1"); } SECTION("strips '\r' from line endings") { string s = "test\r\n"; each_line(s, [](string& line) { REQUIRE(line == "test"); return true; }); } } leatherman-1.4.2+dfsg/util/tests/timer.cc000064400000000000000000000015601332360634000203230ustar00rootroot00000000000000#include #include using namespace leatherman::util; SCENARIO("Using Timer", "[util]") { SECTION("can instantiate") { REQUIRE_NOTHROW(Timer()); } SECTION("can reset") { Timer t {}; REQUIRE_NOTHROW(t.reset()); } SECTION("can retrieve durations [s]") { Timer t {}; auto d1_s = t.elapsed_seconds(); auto d2_s = t.elapsed_seconds(); REQUIRE(d1_s <= d2_s); } SECTION("can retrieve durations [ms]") { Timer t {}; auto d1_ms = t.elapsed_milliseconds(); auto d2_ms = t.elapsed_milliseconds(); REQUIRE(d1_ms <= d2_ms); } SECTION("can retrieve durations after resetting") { Timer t {}; t.reset(); REQUIRE_NOTHROW(t.elapsed_seconds()); REQUIRE_NOTHROW(t.elapsed_milliseconds()); } } leatherman-1.4.2+dfsg/util/tests/uri.cc000064400000000000000000000062641332360634000200100ustar00rootroot00000000000000#include #include namespace lth_util = leatherman::util; TEST_CASE("parses a uri") { SECTION("full uri") { auto uri = lth_util::uri("https://foo:1234/bar?some=1&other=2"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("without protocol") { auto uri = lth_util::uri("foo:1234/bar?some=1&other=2"); REQUIRE(uri.protocol == ""); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("without host") { auto uri = lth_util::uri("https://:1234/bar?some=1&other=2"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == ""); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("without port") { auto uri = lth_util::uri("https://foo/bar?some=1&other=2"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == ""); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("with missing port") { auto uri = lth_util::uri("https://foo:/bar?some=1&other=2"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == ""); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("without path") { auto uri = lth_util::uri("https://foo:1234?some=1&other=2"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == ""); REQUIRE(uri.query == "?some=1&other=2"); } SECTION("without query") { auto uri = lth_util::uri("https://foo:1234/bar"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == "/bar"); REQUIRE(uri.query == ""); } SECTION("only host") { auto uri = lth_util::uri("foo"); REQUIRE(uri.protocol == ""); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == ""); REQUIRE(uri.path == ""); REQUIRE(uri.query == ""); } SECTION("protocol, host, and port") { auto uri = lth_util::uri("https://foo:1234"); REQUIRE(uri.protocol == "https"); REQUIRE(uri.host == "foo"); REQUIRE(uri.port == "1234"); REQUIRE(uri.path == ""); REQUIRE(uri.query == ""); } } TEST_CASE("prints a uri") { SECTION("full uri") { auto uri = lth_util::uri("https://foo:1234/bar?some=1&other=2"); REQUIRE(uri.str() == "https://foo:1234/bar?some=1&other=2"); } SECTION("host and port") { auto uri = lth_util::uri("foo:1234"); REQUIRE(uri.str() == "foo:1234"); } SECTION("protocol, host, and port") { auto uri = lth_util::uri("https://foo:1234"); REQUIRE(uri.str() == "https://foo:1234"); } } leatherman-1.4.2+dfsg/util/tests/windows/000075500000000000000000000000001332360634000203645ustar00rootroot00000000000000leatherman-1.4.2+dfsg/util/tests/windows/environment.cc000064400000000000000000000016721332360634000232450ustar00rootroot00000000000000#include #include #include #include using namespace std; using namespace leatherman::util; SCENARIO("path separator on Windows") { REQUIRE(environment::get_path_separator() == ';'); } SCENARIO("environment search paths") { GIVEN("paths from the environment") { auto paths = environment::search_paths(); REQUIRE(paths.size() > 0u); } GIVEN("empty paths from the environment") { string value; REQUIRE(environment::get("PATH", value)); REQUIRE(environment::set("PATH", value+";")); environment::reload_search_paths(); auto paths = environment::search_paths(); THEN("an empty path should not be searched") { REQUIRE(count(paths.begin(), paths.end(), "") == 0); } REQUIRE(environment::set("PATH", value)); environment::reload_search_paths(); } } leatherman-1.4.2+dfsg/vendor/000075500000000000000000000000001332360634000160505ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/000075500000000000000000000000001332360634000173355ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/build/000075500000000000000000000000001332360634000204345ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/build/Jamfile.v2000064400000000000000000000013401332360634000222520ustar00rootroot00000000000000# Boost Nowide Library Build Jamfile # (C) Copyright Beman Dawes 2002, 2006, Artyom Beilis 2012 # # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or www.boost.org/LICENSE_1_0.txt) # See library home page at http://www.boost.org/libs/nowide project boost/nowide : source-location ../src : usage-requirements # pass these requirement to dependents (i.e. users) shared:BOOST_NOWIDE_DYN_LINK=1 static:BOOST_NOWIDE_STATIC_LINK=1 ; SOURCES = iostream ; lib boost_nowide : $(SOURCES).cpp : shared:BOOST_NOWIDE_DYN_LINK=1 static:BOOST_NOWIDE_STATIC_LINK=1 ; boost-install boost_nowide ; leatherman-1.4.2+dfsg/vendor/nowide/doc/000075500000000000000000000000001332360634000201025ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/doc/Doxyfile000064400000000000000000002047711332360634000216230ustar00rootroot00000000000000# Doxyfile 1.7.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = Boost.Nowide # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = . # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = ../include # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = ../include # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. The create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../include/boost/nowide ../include/boost/nowide/integration \ . # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.hpp *.txt # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = ../examples # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the stylesheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvances is that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = ../include # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = BOOST_NOWIDE_DECL= \ BOOST_NOWIDE_DOXYGEN # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES leatherman-1.4.2+dfsg/vendor/nowide/doc/LICENSE_1_0.txt000064400000000000000000000024721332360634000223710ustar00rootroot00000000000000Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. leatherman-1.4.2+dfsg/vendor/nowide/doc/gendoc.sh000075500000000000000000000003731332360634000217030ustar00rootroot00000000000000#!/bin/bash # # Copyright (c) 2009-2011 Artyom Beilis (Tonkikh) # # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # rm -f html/* && doxygen leatherman-1.4.2+dfsg/vendor/nowide/doc/main.txt000064400000000000000000000263471332360634000216030ustar00rootroot00000000000000// // Copyright (c) 2009-2011 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // /*! \mainpage Boost.Nowide Table of Contents: - \ref main - \ref main_rationale - \ref main_the_problem - \ref main_the_solution - \ref main_wide - \ref main_reading - \ref using - \ref using_standard - \ref using_custom - \ref using_integration - \ref technical - \ref technical_imple - \ref technical_cio - \ref qna - \ref standalone_version - \ref sources \section main What is Boost.Nowide Boost.Nowide is a library implemented by Artyom Beilis that makes cross platform Unicode aware programming easier. The library provides an implementation of standard C and C++ library functions, such that their inputs are UTF-8 aware on Windows without requiring to use Wide API. \section main_rationale Rationale \subsection main_the_problem The Problem Consider a simple application that splits a big file into chunks, such that they can be sent by e-mail. It requires doing a few very simple tasks: - Access command line arguments: int main(int argc,char **argv) - Open an input file, open several output files: std::fstream::open(char const *,std::ios::openmode m) - Remove the files in case of fault: std::remove(char const *file) - Print a progress report onto the console: std::cout << file_name Unfortunately it is impossible to implement this simple task in plain C++ if the file names contain non-ASCII characters. The simple program that uses the API would work on the systems that use UTF-8 internally -- the vast majority of Unix-Line operating systems: Linux, Mac OS X, Solaris, BSD. But it would fail on files like War and Peace - Война и мир - מלחמה ושלום.zip under Microsoft Windows because the native Windows Unicode aware API is Wide-API -- UTF-16. This incredibly trivial task is very hard to implement in a cross platform manner. \subsection main_the_solution The Solution Boost.Nowide provides a set of standard library functions that are UTF-8 aware and makes Unicode aware programming easier. The library provides: - Easy to use functions for converting UTF-8 to/from UTF-16 - A class to make the \c argc, \c argc and \c env parameters of \c main use UTF-8 - UTF-8 aware functions - \c stdio.h functions: - \c fopen - \c freopen - \c remove - \c rename - \c stdlib.h functions: - \c system - \c getenv - \c setenv - \c unsetenv - \c putenv - \c fstream - \c filebuf - \c fstream/ofstream/ifstream - \c iostream - \c cout - \c cerr - \c clog - \c cin \subsection main_wide Why Not Narrow and Wide? Why not provide both Wide and Narrow implementations so the developer can choose to use Wide characters on Unix-like platforms? Several reasons: - \c wchar_t is not really portable, it can be 2 bytes, 4 bytes or even 1 byte making Unicode aware programming harder - The C and C++ standard libraries use narrow strings for OS interactions. This library follows the same general rule. There is no such thing as fopen(wchar_t const *,wchar_t const *) in the standard library, so it is better to stick to the standards rather than re-implement Wide API in "Microsoft Windows Style" \subsection main_reading Further Reading - www.utf8everywhere.org - Windows console i/o approaches \section using Using The Library \subsection using_standard Standard Features The library is mostly header only, only console I/O requires separate compilation under Windows. As a developer you are expected to use \c boost::nowide functions instead of the functions available in the \c std namespace. For example, here is a Unicode unaware implementation of a line counter: \code #include #include int main(int argc,char **argv) { if(argc!=2) { std::cerr << "Usage: file_name" << std::endl; return 1; } std::ifstream f(argv[1]); if(!f) { std::cerr << "Can't open " << argv[1] << std::endl; return 1; } int total_lines = 0; while(f) { if(f.get() == '\n') total_lines++; } f.close(); std::cout << "File " << argv[1] << " has " << total_lines << " lines" << std::endl; return 0; } \endcode To make this program handle Unicode properly, we do the following changes: \code #include #include #include int main(int argc,char **argv) { boost::nowide::args a(argc,argv); // Fix arguments - make them UTF-8 if(argc!=2) { boost::nowide::cerr << "Usage: file_name" << std::endl; // Unicode aware console return 1; } boost::nowide::ifstream f(argv[1]); // argv[1] - is UTF-8 if(!f) { // the console can display UTF-8 boost::nowide::cerr << "Can't open " << argv[1] << std::endl; return 1; } int total_lines = 0; while(f) { if(f.get() == '\n') total_lines++; } f.close(); // the console can display UTF-8 boost::nowide::cout << "File " << argv[1] << " has " << total_lines << " lines" << std::endl; return 0; } \endcode This very simple and straightforward approach helps writing Unicode aware programs. \subsection using_custom Custom API Of course, this simple set of functions does not cover all needs. If you need to access Wide API from a Windows application that uses UTF-8 internally you can use functions like \c boost::nowide::widen and \c boost::nowide::narrow. For example: \code CopyFileW( boost::nowide::widen(existing_file).c_str(), boost::nowide::widen(new_file).c_str(), TRUE); \endcode The conversion is done at the last stage, and you continue using UTF-8 strings everywhere else. You only switch to the Wide API at glue points. \c boost::nowide::widen returns \c std::string. Sometimes it is useful to prevent allocation and use on-stack buffers instead. Boost.Nowide provides the \c boost::nowide::basic_stackstring class for this purpose. The example above could be rewritten as: \code boost::nowide::basic_stackstring wexisting_file,wnew_file; if(!wexisting_file.convert(existing_file) || !wnew_file.convert(new_file)) { // invalid UTF-8 return -1; } CopyFileW(wexisting_file.c_str(),wnew_file.c_str(),TRUE); \endcode \note There are a few convenience typedefs: \c stackstring and \c wstackstring using 256-character buffers, and \c short_stackstring and \c wshort_stackstring using 16-character buffers. If the string is longer, they fall back to memory allocation. \subsection using_windows_h The windows.h header The library does not include the \c windows.h in order to prevent namespace pollution with numerous defines and types. Instead, the library defines the prototypes of the Win32 API functions. However, you may request to use the \c windows.h header by defining \c BOOST_NOWIDE_USE_WINDOWS_H before including any of the Boost.Nowide headers \subsection using_integration Integration with Boost.Filesystem Boost.Filesystem supports selection of narrow encoding. Unfortunatelly the default narrow encoding on Windows isn't UTF-8, you can enable UTF-8 as default encoding on Boost.Filesystem by calling `boost::nowide::nowide_filesystem()` in the beginning of your program \section technical Technical Details \subsection technical_imple Windows vs POSIX For Microsoft Windows, the library provides UTF-8 aware variants of some \c std:: functions in the \c boost::nowide namespace. For example, \c std::fopen becomes \c boost::nowide::fopen. Under POSIX platforms, the functions in boost::nowide are aliases of their standard library counterparts: \code namespace boost { namespace nowide { #ifdef BOOST_WINDOWS inline FILE *fopen(char const *name,char const *mode) { ... } #else using std::fopen #endif } // nowide } // boost \endcode \subsection technical_cio Console I/O Console I/O is implemented as a wrapper around ReadConsoleW/WriteConsoleW (used when the stream goes to the "real" console) and ReadFile/WriteFile (used when the stream was piped/redirected). This approach eliminates a need of manual code page handling. If TrueType fonts are used the Unicode aware input and output works as intended. \section qna Q & A Q: Why doesn't the library convert the string to/from the locale's encoding (instead of UTF-8) on POSIX systems? A: It is inherently incorrect to convert strings to/from locale encodings on POSIX platforms. You can create a file named "\xFF\xFF.txt" (invalid UTF-8), remove it, pass its name as a parameter to a program and it would work whether the current locale is UTF-8 or not. Also, changing the locale from let's say \c en_US.UTF-8 to \c en_US.ISO-8859-1 would not magically change all files in the OS or the strings a user may pass to the program (which is different on Windows) POSIX OSs treat strings as \c NULL terminated cookies. So altering their content according to the locale would actually lead to incorrect behavior. For example, this is a naive implementation of a standard program "rm" \code #include int main(int argc,char **argv) { for(int i=1;i #include #include int main(int argc,char **argv) { nowide::args a(argc,argv); // Fix arguments - make them UTF-8 if(argc!=2) { nowide::cerr << "Usage: file_name" << std::endl; // Unicode aware console return 1; } nowide::ifstream f(argv[1]); // argv[1] - is UTF-8 if(!f) { // the console can display UTF-8 nowide::cerr << "Can't open a file " << argv[1] << std::endl; return 1; } int total_lines = 0; while(f) { if(f.get() == '\n') total_lines++; } f.close(); // the console can display UTF-8 nowide::cout << "File " << argv[1] << " has " << total_lines << " lines" << std::endl; return 0; } \endcode \endcode \subsection sources Sources and Downloads The upstream sources can be found at GitHub: https://github.com/artyom-beilis/nowide You can download the latest sources there: - Standard Version: nowide-master.zip - Standalone Boost independent version nowide_standalone.zip */ // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 filetype=cpp.doxygen leatherman-1.4.2+dfsg/vendor/nowide/include/000075500000000000000000000000001332360634000207605ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/include/boost/000075500000000000000000000000001332360634000221065ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/000075500000000000000000000000001332360634000233735ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/args.hpp000064400000000000000000000111571332360634000250450ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_ARGS_HPP_INCLUDED #define BOOST_NOWIDE_ARGS_HPP_INCLUDED #include #include #include #ifdef BOOST_WINDOWS #include #endif namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_DOXYGEN) class args { public: args(int &,char **&) {} args(int &,char **&,char **&){} ~args() {} }; #else /// /// \brief args is a class that fixes standard main() function arguments and changes them to UTF-8 under /// Microsoft Windows. /// /// The class uses \c GetCommandLineW(), \c CommandLineToArgvW() and \c GetEnvironmentStringsW() /// in order to obtain the information. It does not relates to actual values of argc,argv and env /// under Windows. /// /// It restores the original values in its destructor /// /// \note the class owns the memory of the newly allocated strings /// class args { public: /// /// Fix command line agruments /// args(int &argc,char **&argv) : old_argc_(argc), old_argv_(argv), old_env_(0), old_argc_ptr_(&argc), old_argv_ptr_(&argv), old_env_ptr_(0) { fix_args(argc,argv); } /// /// Fix command line agruments and environment /// args(int &argc,char **&argv,char **&en) : old_argc_(argc), old_argv_(argv), old_env_(en), old_argc_ptr_(&argc), old_argv_ptr_(&argv), old_env_ptr_(&en) { fix_args(argc,argv); fix_env(en); } /// /// Restore original argc,argv,env values, if changed /// ~args() { if(old_argc_ptr_) *old_argc_ptr_ = old_argc_; if(old_argv_ptr_) *old_argv_ptr_ = old_argv_; if(old_env_ptr_) *old_env_ptr_ = old_env_; } private: void fix_args(int &argc,char **&argv) { int wargc; wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(),&wargc); if(!wargv) { argc = 0; static char *dummy = 0; argv = &dummy; return; } try{ args_.resize(wargc+1,0); arg_values_.resize(wargc); for(int i=0;i args_; std::vector arg_values_; stackstring env_; std::vector envp_; int old_argc_; char **old_argv_; char **old_env_; int *old_argc_ptr_; char ***old_argv_ptr_; char ***old_env_ptr_; }; #endif } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/cenv.hpp000064400000000000000000000070541332360634000250450ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CENV_H_INCLUDED #define BOOST_NOWIDE_CENV_H_INCLUDED #include #include #include #include #include #include #ifdef BOOST_WINDOWS #include #endif namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_DOXYGEN) using ::getenv; using ::setenv; using ::unsetenv; using ::putenv; #else /// /// \brief UTF-8 aware getenv. Returns 0 if the variable is not set. /// /// This function is not thread safe or reenterable as defined by the standard library /// inline char *getenv(char const *key) { static stackstring value; wshort_stackstring name; if(!name.convert(key)) return 0; static const size_t buf_size = 64; wchar_t buf[buf_size]; std::vector tmp; wchar_t *ptr = buf; size_t n = GetEnvironmentVariableW(name.c_str(),buf,buf_size); if(n == 0 && GetLastError() == 203) // ERROR_ENVVAR_NOT_FOUND return 0; if(n >= buf_size) { tmp.resize(n+1,L'\0'); n = GetEnvironmentVariableW(name.c_str(),&tmp[0],static_cast(tmp.size() - 1)); // The size may have changed if(n >= tmp.size() - 1) return 0; ptr = &tmp[0]; } if(!value.convert(ptr)) return 0; return value.c_str(); } /// /// \brief UTF-8 aware setenv, \a key - the variable name, \a value is a new UTF-8 value, /// /// if override is not 0, that the old value is always overridded, otherwise, /// if the variable exists it remains unchanged /// inline int setenv(char const *key,char const *value,int override) { wshort_stackstring name; if(!name.convert(key)) return -1; if(!override) { wchar_t unused[2]; if(!(GetEnvironmentVariableW(name.c_str(),unused,2)==0 && GetLastError() == 203)) // ERROR_ENVVAR_NOT_FOUND return 0; } wstackstring wval; if(!wval.convert(value)) return -1; if(SetEnvironmentVariableW(name.c_str(),wval.c_str())) return 0; return -1; } /// /// \brief Remove enviroment variable \a key /// inline int unsetenv(char const *key) { wshort_stackstring name; if(!name.convert(key)) return -1; if(SetEnvironmentVariableW(name.c_str(),0)) return 0; return -1; } /// /// \brief UTF-8 aware putenv implementation, expects string in format KEY=VALUE /// inline int putenv(char *string) { char const *key = string; char const *key_end = string; while(*key_end!='=' && key_end!='\0') key_end++; if(*key_end == '\0') return -1; wshort_stackstring wkey; if(!wkey.convert(key,key_end)) return -1; wstackstring wvalue; if(!wvalue.convert(key_end+1)) return -1; if(SetEnvironmentVariableW(wkey.c_str(),wvalue.c_str())) return 0; return -1; } #endif } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/config.hpp000064400000000000000000000027451332360634000253610ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CONFIG_HPP_INCLUDED #define BOOST_NOWIDE_CONFIG_HPP_INCLUDED #include #ifndef BOOST_SYMBOL_VISIBLE # define BOOST_SYMBOL_VISIBLE #endif #ifdef BOOST_HAS_DECLSPEC # if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_NOWIDE_DYN_LINK) # ifdef BOOST_NOWIDE_SOURCE # define BOOST_NOWIDE_DECL BOOST_SYMBOL_EXPORT # else # define BOOST_NOWIDE_DECL BOOST_SYMBOL_IMPORT # endif // BOOST_NOWIDE_SOURCE # endif // DYN_LINK #endif // BOOST_HAS_DECLSPEC #ifndef BOOST_NOWIDE_DECL # define BOOST_NOWIDE_DECL #endif // // Automatically link to the correct build variant where possible. // #if !defined(BOOST_ALL_NO_LIB) && !defined(BOOST_NOWIDE_NO_LIB) && !defined(BOOST_NOWIDE_SOURCE) // // Set the name of our library, this will get undef'ed by auto_link.hpp // once it's done with it: // #define BOOST_LIB_NAME boost_nowide // // If we're importing code from a dll, then tell auto_link.hpp about it: // #if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_NOWIDE_DYN_LINK) # define BOOST_DYN_LINK #endif // // And include the header that does the work: // #include #endif // auto-linking disabled #endif // boost/nowide/config.hpp // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/convert.hpp000064400000000000000000000122031332360634000255620ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CONVERT_H_INCLUDED #define BOOST_NOWIDE_CONVERT_H_INCLUDED #include #include namespace boost { namespace nowide { /// /// \brief Template function that converts a buffer of UTF sequences in range [source_begin,source_end) /// to the output \a buffer of size \a buffer_size. /// /// In case of success a NULL terminated string is returned (buffer), otherwise 0 is returned. /// /// If there is not enough room in the buffer or the source sequence contains invalid UTF, /// 0 is returned, and the contents of the buffer are undefined. /// template CharOut *basic_convert(CharOut *buffer,size_t buffer_size,CharIn const *source_begin,CharIn const *source_end) { CharOut *rv = buffer; if(buffer_size == 0) return 0; buffer_size --; while(source_begin!=source_end) { using namespace boost::locale::utf; code_point c = utf_traits::template decode(source_begin,source_end); if(c==illegal || c==incomplete) { rv = 0; break; } size_t width = utf_traits::width(c); if(buffer_size < width) { rv=0; break; } buffer = utf_traits::template encode(c,buffer); buffer_size -= width; } *buffer++ = 0; return rv; } /// \cond INTERNAL namespace details { // // wcslen defined only in C99... So we will not use it // template Char const *basic_strend(Char const *s) { while(*s) s++; return s; } } /// \endcond /// /// Convert NULL terminated UTF source string to NULL terminated \a output string of size at /// most output_size (including NULL) /// /// In case of success output is returned, if the input sequence is illegal, /// or there is not enough room NULL is returned /// inline char *narrow(char *output,size_t output_size,wchar_t const *source) { return basic_convert(output,output_size,source,details::basic_strend(source)); } /// /// Convert UTF text in range [begin,end) to NULL terminated \a output string of size at /// most output_size (including NULL) /// /// In case of success output is returned, if the input sequence is illegal, /// or there is not enough room NULL is returned /// inline char *narrow(char *output,size_t output_size,wchar_t const *begin,wchar_t const *end) { return basic_convert(output,output_size,begin,end); } /// /// Convert NULL terminated UTF source string to NULL terminated \a output string of size at /// most output_size (including NULL) /// /// In case of success output is returned, if the input sequence is illegal, /// or there is not enough room NULL is returned /// inline wchar_t *widen(wchar_t *output,size_t output_size,char const *source) { return basic_convert(output,output_size,source,details::basic_strend(source)); } /// /// Convert UTF text in range [begin,end) to NULL terminated \a output string of size at /// most output_size (including NULL) /// /// In case of success output is returned, if the input sequence is illegal, /// or there is not enough room NULL is returned /// inline wchar_t *widen(wchar_t *output,size_t output_size,char const *begin,char const *end) { return basic_convert(output,output_size,begin,end); } /// /// Convert between Wide - UTF-16/32 string and UTF-8 string. /// /// boost::locale::conv::conversion_error is thrown in a case of a error /// inline std::string narrow(wchar_t const *s) { return boost::locale::conv::utf_to_utf(s); } /// /// Convert between UTF-8 and UTF-16 string, implemented only on Windows platform /// /// boost::locale::conv::conversion_error is thrown in a case of a error /// inline std::wstring widen(char const *s) { return boost::locale::conv::utf_to_utf(s); } /// /// Convert between Wide - UTF-16/32 string and UTF-8 string /// /// boost::locale::conv::conversion_error is thrown in a case of a error /// inline std::string narrow(std::wstring const &s) { return boost::locale::conv::utf_to_utf(s); } /// /// Convert between UTF-8 and UTF-16 string, implemented only on Windows platform /// /// boost::locale::conv::conversion_error is thrown in a case of a error /// inline std::wstring widen(std::string const &s) { return boost::locale::conv::utf_to_utf(s); } } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/cstdio.hpp000064400000000000000000000046621332360634000254010ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CSTDIO_H_INCLUDED #define BOOST_NOWIDE_CSTDIO_H_INCLUDED #include #include #include #include #include #include #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4996) #endif namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_DOXYGEN) using std::fopen; using std::freopen; using std::remove; using std::rename; #else /// /// \brief Same as freopen but file_name and mode are UTF-8 strings /// /// If invalid UTF-8 given, NULL is returned and errno is set to EINVAL /// inline FILE *freopen(char const *file_name,char const *mode,FILE *stream) { wstackstring wname; wshort_stackstring wmode; if(!wname.convert(file_name) || !wmode.convert(mode)) { errno = EINVAL; return 0; } return _wfreopen(wname.c_str(),wmode.c_str(),stream); } /// /// \brief Same as fopen but file_name and mode are UTF-8 strings /// /// If invalid UTF-8 given, NULL is returned and errno is set to EINVAL /// inline FILE *fopen(char const *file_name,char const *mode) { wstackstring wname; wshort_stackstring wmode; if(!wname.convert(file_name) || !wmode.convert(mode)) { errno = EINVAL; return 0; } return _wfopen(wname.c_str(),wmode.c_str()); } /// /// \brief Same as rename but old_name and new_name are UTF-8 strings /// /// If invalid UTF-8 given, -1 is returned and errno is set to EINVAL /// inline int rename(char const *old_name,char const *new_name) { wstackstring wold,wnew; if(!wold.convert(old_name) || !wnew.convert(new_name)) { errno = EINVAL; return -1; } return _wrename(wold.c_str(),wnew.c_str()); } /// /// \brief Same as rename but name is UTF-8 string /// /// If invalid UTF-8 given, -1 is returned and errno is set to EINVAL /// inline int remove(char const *name) { wstackstring wname; if(!wname.convert(name)) { errno = EINVAL; return -1; } return _wremove(wname.c_str()); } #endif } // nowide } // namespace boost #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/cstdlib.hpp000064400000000000000000000006631332360634000255350ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CSTDLIB_HPP_INCLUDED #define BOOST_NOWIDE_CSTDLIB_HPP_INCLUDED #include #include #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/filebuf.hpp000064400000000000000000000274211332360634000255260ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_FILEBUF_HPP #define BOOST_NOWIDE_FILEBUF_HPP #include #include #include #include #include #include #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4996 4244 4800) #endif namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_FSTREAM_TESTS) && !defined(BOOST_NOWIDE_DOXYGEN) using std::basic_filebuf; using std::filebuf; #else // Windows /// /// \brief This forward declaration defined the basic_filebuf type. /// /// it is implemented and specialized for CharType = char, it behaves /// implements std::filebuf over standard C I/O /// template > class basic_filebuf; /// /// \brief This is implementation of std::filebuf /// /// it is implemented and specialized for CharType = char, it behaves /// implements std::filebuf over standard C I/O /// template<> class basic_filebuf : public std::basic_streambuf { public: /// /// Creates new filebuf /// basic_filebuf() : buffer_size_(4), buffer_(0), file_(0), own_(true), mode_(std::ios::in | std::ios::out) { setg(0,0,0); setp(0,0); } virtual ~basic_filebuf() { if(file_) { ::fclose(file_); file_ = 0; } if(own_ && buffer_) delete [] buffer_; } /// /// Same as std::filebuf::open but s is UTF-8 string /// basic_filebuf *open(std::string const &s,std::ios_base::openmode mode) { return open(s.c_str(),mode); } /// /// Same as std::filebuf::open but s is UTF-8 string /// basic_filebuf *open(char const *s,std::ios_base::openmode mode) { if(file_) { sync(); ::fclose(file_); file_ = 0; } bool ate = bool(mode & std::ios_base::ate); if(ate) mode = mode ^ std::ios_base::ate; wchar_t const *smode = get_mode(mode); if(!smode) return 0; wstackstring name; if(!name.convert(s)) return 0; #ifdef BOOST_NOWIDE_FSTREAM_TESTS FILE *f = ::fopen(s,boost::nowide::convert(smode).c_str()); #else FILE *f = ::_wfopen(name.c_str(),smode); #endif if(!f) return 0; if(ate && fseek(f,0,SEEK_END)!=0) { fclose(f); return 0; } file_ = f; return this; } /// /// Same as std::filebuf::close() /// basic_filebuf *close() { bool res = sync() == 0; if(file_) { if(::fclose(file_)!=0) res = false; file_ = 0; } return res ? this : 0; } /// /// Same as std::filebuf::is_open() /// bool is_open() const { return file_ != 0; } private: void make_buffer() { if(buffer_) return; if(buffer_size_ > 0) { buffer_ = new char [buffer_size_]; own_ = true; } } protected: virtual std::streambuf *setbuf(char *s,std::streamsize n) { if(!buffer_ && n>=0) { buffer_ = s; buffer_size_ = n; own_ = false; } return this; } #ifdef BOOST_NOWIDE_DEBUG_FILEBUF void print_buf(char *b,char *p,char *e) { std::cerr << "-- Is Null: " << (b==0) << std::endl;; if(b==0) return; if(e != 0) std::cerr << "-- Total: " << e - b <<" offset from start " << p - b << std::endl; else std::cerr << "-- Total: " << p - b << std::endl; std::cerr << "-- ["; for(char *ptr = b;ptrprint_state(); } ~print_guard() { std::cerr << "Out: " << f << std::endl; self->print_state(); } basic_filebuf *self; char const *f; }; #else #endif int overflow(int c) { #ifdef BOOST_NOWIDE_DEBUG_FILEBUF print_guard g(this,__FUNCTION__); #endif if(!file_) return EOF; if(fixg() < 0) return EOF; size_t n = pptr() - pbase(); if(n > 0) { if(::fwrite(pbase(),1,n,file_) < n) return -1; fflush(file_); } if(buffer_size_ > 0) { make_buffer(); setp(buffer_,buffer_+buffer_size_); if(c!=EOF) sputc(c); } else if(c!=EOF) { if(::fputc(c,file_)==EOF) return EOF; fflush(file_); } return 0; } int sync() { return overflow(EOF); } int underflow() { #ifdef BOOST_NOWIDE_DEBUG_FILEBUF print_guard g(this,__FUNCTION__); #endif if(!file_) return EOF; if(fixp() < 0) return EOF; if(buffer_size_ == 0) { int c = ::fgetc(file_); if(c==EOF) { return EOF; } last_char_ = c; setg(&last_char_,&last_char_,&last_char_ + 1); return c; } make_buffer(); size_t n = ::fread(buffer_,1,buffer_size_,file_); setg(buffer_,buffer_,buffer_+n); if(n == 0) return EOF; return std::char_traits::to_int_type(*gptr()); } int pbackfail(int) { return pubseekoff(-1,std::ios::cur); } std::streampos seekoff(std::streamoff off, std::ios_base::seekdir seekdir, std::ios_base::openmode /*m*/) { #ifdef BOOST_NOWIDE_DEBUG_FILEBUF print_guard g(this,__FUNCTION__); #endif if(!file_) return EOF; if(fixp() < 0 || fixg() < 0) return EOF; if(seekdir == std::ios_base::cur) { if( ::fseek(file_,off,SEEK_CUR) < 0) return EOF; } else if(seekdir == std::ios_base::beg) { if( ::fseek(file_,off,SEEK_SET) < 0) return EOF; } else if(seekdir == std::ios_base::end) { if( ::fseek(file_,off,SEEK_END) < 0) return EOF; } else return -1; return ftell(file_); } std::streampos seekpos(std::streampos off,std::ios_base::openmode m) { return seekoff(std::streamoff(off),std::ios_base::beg,m); } private: int fixg() { if(gptr()!=egptr()) { std::streamsize off = gptr() - egptr(); setg(0,0,0); if(fseek(file_,off,SEEK_CUR) != 0) return -1; } setg(0,0,0); return 0; } int fixp() { if(pptr()!=0) { int r = sync(); setp(0,0); return r; } return 0; } void reset(FILE *f = 0) { sync(); if(file_) { fclose(file_); file_ = 0; } file_ = f; } static wchar_t const *get_mode(std::ios_base::openmode mode) { // // done according to n2914 table 106 27.9.1.4 // // note can't use switch case as overload operator can't be used // in constant expression if(mode == (std::ios_base::out)) return L"w"; if(mode == (std::ios_base::out | std::ios_base::app)) return L"a"; if(mode == (std::ios_base::app)) return L"a"; if(mode == (std::ios_base::out | std::ios_base::trunc)) return L"w"; if(mode == (std::ios_base::in)) return L"r"; if(mode == (std::ios_base::in | std::ios_base::out)) return L"r+"; if(mode == (std::ios_base::in | std::ios_base::out | std::ios_base::trunc)) return L"w+"; if(mode == (std::ios_base::in | std::ios_base::out | std::ios_base::app)) return L"a+"; if(mode == (std::ios_base::in | std::ios_base::app)) return L"a+"; if(mode == (std::ios_base::binary | std::ios_base::out)) return L"wb"; if(mode == (std::ios_base::binary | std::ios_base::out | std::ios_base::app)) return L"ab"; if(mode == (std::ios_base::binary | std::ios_base::app)) return L"ab"; if(mode == (std::ios_base::binary | std::ios_base::out | std::ios_base::trunc)) return L"wb"; if(mode == (std::ios_base::binary | std::ios_base::in)) return L"rb"; if(mode == (std::ios_base::binary | std::ios_base::in | std::ios_base::out)) return L"r+b"; if(mode == (std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::trunc)) return L"w+b"; if(mode == (std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app)) return L"a+b"; if(mode == (std::ios_base::binary | std::ios_base::in | std::ios_base::app)) return L"a+b"; return 0; } size_t buffer_size_; char *buffer_; FILE *file_; bool own_; char last_char_; std::ios::openmode mode_; }; /// /// \brief Convinience typedef /// typedef basic_filebuf filebuf; #endif // windows } // nowide } // namespace boost #ifdef BOOST_MSVC # pragma warning(pop) #endif #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/fstream.hpp000064400000000000000000000204311332360634000255450ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_FSTREAM_INCLUDED_HPP #define BOOST_NOWIDE_FSTREAM_INCLUDED_HPP #include #include #include #include #include #include #include namespace boost { /// /// \brief This namespace includes implementation of the standard library functios /// such that they accept UTF-8 strings on Windows. On other platforms it is just an alias /// of std namespace (i.e. not on Windows) /// namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_FSTREAM_TESTS) && !defined(BOOST_NOWIDE_DOXYGEN) using std::basic_ifstream; using std::basic_ofstream; using std::basic_fstream; using std::ifstream; using std::ofstream; using std::fstream; #else /// /// \brief Same as std::basic_ifstream but accepts UTF-8 strings under Windows /// template > class basic_ifstream : public std::basic_istream { public: typedef basic_filebuf internal_buffer_type; typedef std::basic_istream internal_stream_type; basic_ifstream() : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); } explicit basic_ifstream(char const *file_name,std::ios_base::openmode mode = std::ios_base::in) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } explicit basic_ifstream(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::in) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } void open(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::in) { open(file_name.c_str(),mode); } void open(char const *file_name,std::ios_base::openmode mode = std::ios_base::in) { if(!buf_->open(file_name,mode | std::ios_base::in)) { this->setstate(std::ios_base::failbit); } else { this->clear(); } } bool is_open() { return buf_->is_open(); } bool is_open() const { return buf_->is_open(); } void close() { if(!buf_->close()) this->setstate(std::ios_base::failbit); else this->clear(); } internal_buffer_type *rdbuf() const { return buf_.get(); } ~basic_ifstream() { buf_->close(); } private: boost::scoped_ptr buf_; }; /// /// \brief Same as std::basic_ofstream but accepts UTF-8 strings under Windows /// template > class basic_ofstream : public std::basic_ostream { public: typedef basic_filebuf internal_buffer_type; typedef std::basic_ostream internal_stream_type; basic_ofstream() : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); } explicit basic_ofstream(char const *file_name,std::ios_base::openmode mode = std::ios_base::out) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } explicit basic_ofstream(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::out) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } void open(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::out) { open(file_name.c_str(),mode); } void open(char const *file_name,std::ios_base::openmode mode = std::ios_base::out) { if(!buf_->open(file_name,mode | std::ios_base::out)) { this->setstate(std::ios_base::failbit); } else { this->clear(); } } bool is_open() { return buf_->is_open(); } bool is_open() const { return buf_->is_open(); } void close() { if(!buf_->close()) this->setstate(std::ios_base::failbit); else this->clear(); } internal_buffer_type *rdbuf() const { return buf_.get(); } ~basic_ofstream() { buf_->close(); } private: boost::scoped_ptr buf_; }; /// /// \brief Same as std::basic_fstream but accepts UTF-8 strings under Windows /// template > class basic_fstream : public std::basic_iostream { public: typedef basic_filebuf internal_buffer_type; typedef std::basic_iostream internal_stream_type; basic_fstream() : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); } explicit basic_fstream(char const *file_name,std::ios_base::openmode mode = std::ios_base::out | std::ios_base::in) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } explicit basic_fstream(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::out | std::ios_base::in) : internal_stream_type(0) { buf_.reset(new internal_buffer_type()); std::ios::rdbuf(buf_.get()); open(file_name,mode); } void open(std::string const &file_name,std::ios_base::openmode mode = std::ios_base::out | std::ios_base::out) { open(file_name.c_str(),mode); } void open(char const *file_name,std::ios_base::openmode mode = std::ios_base::out | std::ios_base::out) { if(!buf_->open(file_name,mode)) { this->setstate(std::ios_base::failbit); } else { this->clear(); } } bool is_open() { return buf_->is_open(); } bool is_open() const { return buf_->is_open(); } void close() { if(!buf_->close()) this->setstate(std::ios_base::failbit); else this->clear(); } internal_buffer_type *rdbuf() const { return buf_.get(); } ~basic_fstream() { buf_->close(); } private: boost::scoped_ptr buf_; }; /// /// \brief Same as std::filebuf but accepts UTF-8 strings under Windows /// typedef basic_filebuf filebuf; /// /// Same as std::ifstream but accepts UTF-8 strings under Windows /// typedef basic_ifstream ifstream; /// /// Same as std::ofstream but accepts UTF-8 strings under Windows /// typedef basic_ofstream ofstream; /// /// Same as std::fstream but accepts UTF-8 strings under Windows /// typedef basic_fstream fstream; #endif } // nowide } // namespace boost #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/integration/000075500000000000000000000000001332360634000257165ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/integration/filesystem.hpp000064400000000000000000000016041332360634000306140ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_INTEGRATION_FILESYSTEM_HPP_INCLUDED #define BOOST_NOWIDE_INTEGRATION_FILESYSTEM_HPP_INCLUDED #include #include namespace boost { namespace nowide { /// /// Instal utf8_codecvt facet into boost::filesystem::path such all char strings are interpreted as utf-8 strings /// inline void nowide_filesystem() { std::locale tmp = std::locale(std::locale(),new boost::nowide::utf8_codecvt()); boost::filesystem::path::imbue(tmp); } } // nowide } // boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/iostream.hpp000064400000000000000000000053161332360634000257340ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_IOSTREAM_HPP_INCLUDED #define BOOST_NOWIDE_IOSTREAM_HPP_INCLUDED #include #include #include #include #include #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4251) #endif namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_DOXYGEN) using std::cout; using std::cerr; using std::cin; using std::clog; #else /// \cond INTERNAL namespace details { class console_output_buffer; class console_input_buffer; class BOOST_NOWIDE_DECL winconsole_ostream : public std::ostream { winconsole_ostream(winconsole_ostream const &); void operator=(winconsole_ostream const &); public: winconsole_ostream(int fd); ~winconsole_ostream(); private: boost::scoped_ptr d; }; class BOOST_NOWIDE_DECL winconsole_istream : public std::istream { winconsole_istream(winconsole_istream const &); void operator=(winconsole_istream const &); public: winconsole_istream(); ~winconsole_istream(); private: struct data; boost::scoped_ptr d; }; } // details /// \endcond /// /// \brief Same as std::cin, but uses UTF-8 /// /// Note, the stream is not synchronized with stdio and not affected by std::ios::sync_with_stdio /// extern BOOST_NOWIDE_DECL details::winconsole_istream cin; /// /// \brief Same as std::cout, but uses UTF-8 /// /// Note, the stream is not synchronized with stdio and not affected by std::ios::sync_with_stdio /// extern BOOST_NOWIDE_DECL details::winconsole_ostream cout; /// /// \brief Same as std::cerr, but uses UTF-8 /// /// Note, the stream is not synchronized with stdio and not affected by std::ios::sync_with_stdio /// extern BOOST_NOWIDE_DECL details::winconsole_ostream cerr; /// /// \brief Same as std::clog, but uses UTF-8 /// /// Note, the stream is not synchronized with stdio and not affected by std::ios::sync_with_stdio /// extern BOOST_NOWIDE_DECL details::winconsole_ostream clog; #endif } // nowide } // namespace boost #ifdef BOOST_MSVC # pragma warning(pop) #endif #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/stackstring.hpp000064400000000000000000000076001332360634000264430ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_DETAILS_WIDESTR_H_INCLUDED #define BOOST_NOWIDE_DETAILS_WIDESTR_H_INCLUDED #include #include #include namespace boost { namespace nowide { /// /// \brief A class that allows to create a temporary wide or narrow UTF strings from /// wide or narrow UTF source. /// /// It uses on stack buffer of the string is short enough /// and allocated a buffer on the heap if the size of the buffer is too small /// template class basic_stackstring { public: static const size_t buffer_size = BufferSize; typedef CharOut output_char; typedef CharIn input_char; basic_stackstring(basic_stackstring const &other) : mem_buffer_(0) { clear(); if(other.mem_buffer_) { size_t len = 0; while(other.mem_buffer_[len]) len ++; mem_buffer_ = new output_char[len + 1]; memcpy(mem_buffer_,other.mem_buffer_,sizeof(output_char) * (len+1)); } else { memcpy(buffer_,other.buffer_,buffer_size * sizeof(output_char)); } } void swap(basic_stackstring &other) { std::swap(mem_buffer_,other.mem_buffer_); for(size_t i=0;i wstackstring; /// /// Convinience typedef /// typedef basic_stackstring stackstring; /// /// Convinience typedef /// typedef basic_stackstring wshort_stackstring; /// /// Convinience typedef /// typedef basic_stackstring short_stackstring; } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/system.hpp000064400000000000000000000016641332360634000254370ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_CSTDLIB_HPP #define BOOST_NOWIDE_CSTDLIB_HPP #include #include #include namespace boost { namespace nowide { #if !defined(BOOST_WINDOWS) && !defined(BOOST_NOWIDE_DOXYGEN) using ::system; #else // Windows /// /// Same as std::system but cmd is UTF-8. /// /// If the input is not valid UTF-8, -1 returned and errno set to EINVAL /// inline int system(char const *cmd) { if(!cmd) return _wsystem(0); wstackstring wcmd; if(!wcmd.convert(cmd)) { errno = EINVAL; return -1; } return _wsystem(wcmd.c_str()); } #endif } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/include/boost/nowide/utf8_codecvt.hpp000064400000000000000000000423421332360634000265060ustar00rootroot00000000000000// // Copyright (c) 2015 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_UTF8_CODECVT_HPP #define BOOST_NOWIDE_UTF8_CODECVT_HPP #include #include #include #include namespace boost { namespace nowide { // // Make sure that mbstate can keep 16 bit of UTF-16 sequence // BOOST_STATIC_ASSERT(sizeof(std::mbstate_t)>=2); #if defined _MSC_VER && _MSC_VER < 1700 // MSVC do_length is non-standard it counts wide characters instead of narrow and does not change mbstate #define BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST #endif template class utf8_codecvt; template class utf8_codecvt : public std::codecvt { public: utf8_codecvt(size_t refs = 0) : std::codecvt(refs) { } protected: typedef CharType uchar; virtual std::codecvt_base::result do_unshift(std::mbstate_t &s,char *from,char * /*to*/,char *&next) const { boost::uint16_t &state = *reinterpret_cast(&s); #ifdef DEBUG_CODECVT std::cout << "Entering unshift " << std::hex << state << std::dec << std::endl; #endif if(state != 0) return std::codecvt_base::error; next=from; return std::codecvt_base::ok; } virtual int do_encoding() const throw() { return 0; } virtual int do_max_length() const throw() { return 4; } virtual bool do_always_noconv() const throw() { return false; } virtual int do_length( std::mbstate_t #ifdef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST const #endif &std_state, char const *from, char const *from_end, size_t max) const { #ifndef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST char const *save_from = from; boost::uint16_t &state = *reinterpret_cast(&std_state); #else size_t save_max = max; boost::uint16_t state = *reinterpret_cast(&std_state); #endif while(max > 0 && from < from_end){ char const *prev_from = from; boost::uint32_t ch=boost::locale::utf::utf_traits::decode(from,from_end); if(ch==boost::locale::utf::incomplete || ch==boost::locale::utf::illegal) { from = prev_from; break; } max --; if(ch > 0xFFFF) { if(state == 0) { from = prev_from; state = 1; } else { state = 0; } } } #ifndef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST return from - save_from; #else return save_max - max; #endif } virtual std::codecvt_base::result do_in( std::mbstate_t &std_state, char const *from, char const *from_end, char const *&from_next, uchar *to, uchar *to_end, uchar *&to_next) const { std::codecvt_base::result r=std::codecvt_base::ok; // mbstate_t is POD type and should be initialized to 0 (i.a. state = stateT()) // according to standard. We use it to keep a flag 0/1 for surrogate pair writing // // if 0 no code above >0xFFFF observed, of 1 a code above 0xFFFF observerd // and first pair is written, but no input consumed boost::uint16_t &state = *reinterpret_cast(&std_state); while(to < to_end && from < from_end) { #ifdef DEBUG_CODECVT std::cout << "Entering IN--------------" << std::endl; std::cout << "State " << std::hex << state <::decode(from,from_end); if(ch==boost::locale::utf::illegal) { from = from_saved; r=std::codecvt_base::error; break; } if(ch==boost::locale::utf::incomplete) { from = from_saved; r=std::codecvt_base::partial; break; } // Normal codepoints go direcly to stream if(ch <= 0xFFFF) { *to++=ch; } else { // for other codepoints we do following // // 1. We can't consume our input as we may find ourselfs // in state where all input consumed but not all output written,i.e. only // 1st pair is written // 2. We only write first pair and mark this in the state, we also revert back // the from pointer in order to make sure this codepoint would be read // once again and then we would consume our input together with writing // second surrogate pair ch-=0x10000; boost::uint16_t vh = ch >> 10; boost::uint16_t vl = ch & 0x3FF; boost::uint16_t w1 = vh + 0xD800; boost::uint16_t w2 = vl + 0xDC00; if(state == 0) { from = from_saved; *to++ = w1; state = 1; } else { *to++ = w2; state = 0; } } } from_next=from; to_next=to; if(r == std::codecvt_base::ok && (from!=from_end || state!=0)) r = std::codecvt_base::partial; #ifdef DEBUG_CODECVT std::cout << "Returning "; switch(r) { case std::codecvt_base::ok: std::cout << "ok" << std::endl; break; case std::codecvt_base::partial: std::cout << "partial" << std::endl; break; case std::codecvt_base::error: std::cout << "error" << std::endl; break; default: std::cout << "other" << std::endl; break; } std::cout << "State " << std::hex << state <=2 in order // to be able to store first observerd surrogate pair // // State: state!=0 - a first surrogate pair was observerd (state = first pair), // we expect the second one to come and then zero the state /// boost::uint16_t &state = *reinterpret_cast(&std_state); while(to < to_end && from < from_end) { #ifdef DEBUG_CODECVT std::cout << "Entering OUT --------------" << std::endl; std::cout << "State " << std::hex << state <::width(ch); if(to_end - to < len) { r=std::codecvt_base::partial; break; } to = boost::locale::utf::utf_traits::encode(ch,to); state = 0; from++; } from_next=from; to_next=to; if(r==std::codecvt_base::ok && from!=from_end) r = std::codecvt_base::partial; #ifdef DEBUG_CODECVT std::cout << "Returning "; switch(r) { case std::codecvt_base::ok: std::cout << "ok" << std::endl; break; case std::codecvt_base::partial: std::cout << "partial" << std::endl; break; case std::codecvt_base::error: std::cout << "error" << std::endl; break; default: std::cout << "other" << std::endl; break; } std::cout << "State " << std::hex << state < class utf8_codecvt : public std::codecvt { public: utf8_codecvt(size_t refs = 0) : std::codecvt(refs) { } protected: typedef CharType uchar; virtual std::codecvt_base::result do_unshift(std::mbstate_t &/*s*/,char *from,char * /*to*/,char *&next) const { next=from; return std::codecvt_base::ok; } virtual int do_encoding() const throw() { return 0; } virtual int do_max_length() const throw() { return 4; } virtual bool do_always_noconv() const throw() { return false; } virtual int do_length( std::mbstate_t #ifdef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST const #endif &/*state*/, char const *from, char const *from_end, size_t max) const { #ifndef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST char const *start_from = from; #else size_t save_max = max; #endif while(max > 0 && from < from_end){ char const *save_from = from; boost::uint32_t ch=boost::locale::utf::utf_traits::decode(from,from_end); if(ch==boost::locale::utf::incomplete || ch==boost::locale::utf::illegal) { from = save_from; break; } max--; } #ifndef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST return from - start_from; #else return save_max - max; #endif } virtual std::codecvt_base::result do_in( std::mbstate_t &/*state*/, char const *from, char const *from_end, char const *&from_next, uchar *to, uchar *to_end, uchar *&to_next) const { std::codecvt_base::result r=std::codecvt_base::ok; // mbstate_t is POD type and should be initialized to 0 (i.a. state = stateT()) // according to standard. We use it to keep a flag 0/1 for surrogate pair writing // // if 0 no code above >0xFFFF observed, of 1 a code above 0xFFFF observerd // and first pair is written, but no input consumed while(to < to_end && from < from_end) { #ifdef DEBUG_CODECVT std::cout << "Entering IN--------------" << std::endl; std::cout << "State " << std::hex << state <::decode(from,from_end); if(ch==boost::locale::utf::illegal) { r=std::codecvt_base::error; from = from_saved; break; } if(ch==boost::locale::utf::incomplete) { r=std::codecvt_base::partial; from=from_saved; break; } *to++=ch; } from_next=from; to_next=to; if(r == std::codecvt_base::ok && from!=from_end) r = std::codecvt_base::partial; #ifdef DEBUG_CODECVT std::cout << "Returning "; switch(r) { case std::codecvt_base::ok: std::cout << "ok" << std::endl; break; case std::codecvt_base::partial: std::cout << "partial" << std::endl; break; case std::codecvt_base::error: std::cout << "error" << std::endl; break; default: std::cout << "other" << std::endl; break; } std::cout << "State " << std::hex << state <::width(ch); if(to_end - to < len) { r=std::codecvt_base::partial; break; } to = boost::locale::utf::utf_traits::encode(ch,to); from++; } from_next=from; to_next=to; if(r==std::codecvt_base::ok && from!=from_end) r = std::codecvt_base::partial; #ifdef DEBUG_CODECVT std::cout << "Returning "; switch(r) { case std::codecvt_base::ok: std::cout << "ok" << std::endl; break; case std::codecvt_base::partial: std::cout << "partial" << std::endl; break; case std::codecvt_base::error: std::cout << "error" << std::endl; break; default: std::cout << "other" << std::endl; break; } std::cout << "State " << std::hex << state < #ifdef BOOST_NOWIDE_USE_WINDOWS_H #include #else // // These are function prototypes... Allow to to include windows.h // extern "C" { __declspec(dllimport) wchar_t* __stdcall GetEnvironmentStringsW(void); __declspec(dllimport) int __stdcall FreeEnvironmentStringsW(wchar_t *); __declspec(dllimport) wchar_t* __stdcall GetCommandLineW(void); __declspec(dllimport) wchar_t** __stdcall CommandLineToArgvW(wchar_t const *,int *); __declspec(dllimport) unsigned long __stdcall GetLastError(); __declspec(dllimport) void* __stdcall LocalFree(void *); __declspec(dllimport) int __stdcall SetEnvironmentVariableW(wchar_t const *,wchar_t const *); __declspec(dllimport) unsigned long __stdcall GetEnvironmentVariableW(wchar_t const *,wchar_t *,unsigned long); } #endif #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/index.html000064400000000000000000000011631332360634000213330ustar00rootroot00000000000000 Boost.Locale Documentation Automatic redirection failed, please go to doc/html/index.html leatherman-1.4.2+dfsg/vendor/nowide/src/000075500000000000000000000000001332360634000201245ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/src/iostream.cpp000064400000000000000000000151211332360634000224530ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #define BOOST_NOWIDE_SOURCE #include #include #include #include #ifdef BOOST_WINDOWS #ifndef NOMINMAX # define NOMINMAX #endif #include namespace boost { namespace nowide { namespace details { namespace { bool is_atty_handle(HANDLE h) { if(h) { DWORD dummy; return GetConsoleMode(h,&dummy) == TRUE; } return false; } } class console_output_buffer : public std::streambuf { public: console_output_buffer(HANDLE h) : handle_(h) { } protected: int sync() { return overflow(EOF); } int overflow(int c) { if(!handle_) return -1; int n = pptr() - pbase(); int r = 0; if(n > 0 && (r=write(pbase(),n)) < 0) return -1; if(r < n) { memmove(pbase(),pbase() + r,n-r); } setp(buffer_, buffer_ + buffer_size); pbump(n-r); if(c!=EOF) sputc(c); return 0; } private: int write(char const *p,int n) { namespace uf = boost::locale::utf; char const *b = p; char const *e = p+n; DWORD size=0; if(n > buffer_size) return -1; wchar_t *out = wbuffer_; uf::code_point c; size_t decoded = 0; while(p < e && (c = uf::utf_traits::decode(p,e))!=uf::illegal && c!=uf::incomplete) { out = uf::utf_traits::encode(c,out); decoded = p-b; } if(c==uf::illegal) return -1; if(!WriteConsoleW(handle_,wbuffer_,out - wbuffer_,&size,0)) return -1; return decoded; } static const int buffer_size = 1024; char buffer_[buffer_size]; wchar_t wbuffer_[buffer_size]; // for null HANDLE handle_; }; class console_input_buffer: public std::streambuf { public: console_input_buffer(HANDLE h) : handle_(h), wsize_(0) { } protected: int pbackfail(int c) { if(c==EOF) return EOF; if(gptr()!=eback()) { gbump(-1); *gptr() = c; return 0; } if(pback_buffer_.empty()) { pback_buffer_.resize(4); char *b = &pback_buffer_[0]; char *e = b + pback_buffer_.size(); setg(b,e-1,e); *gptr() = c; } else { size_t n = pback_buffer_.size(); std::vector tmp; tmp.resize(n*2); memcpy(&tmp[n],&pback_buffer_[0],n); tmp.swap(pback_buffer_); char *b = &pback_buffer_[0]; char *e = b + n * 2; char *p = b+n-1; *p = c; setg(b,p,e); } return 0; } int underflow() { if(!handle_) return -1; if(!pback_buffer_.empty()) pback_buffer_.clear(); size_t n = read(); setg(buffer_,buffer_,buffer_+n); if(n == 0) return EOF; return std::char_traits::to_int_type(*gptr()); } private: size_t read() { namespace uf = boost::locale::utf; DWORD read_wchars = 0; size_t n = wbuffer_size - wsize_; if(!ReadConsoleW(handle_,wbuffer_,n,&read_wchars,0)) return 0; wsize_ += read_wchars; char *out = buffer_; wchar_t *b = wbuffer_; wchar_t *e = b + wsize_; wchar_t *p = b; uf::code_point c; wsize_ = e-p; while(p < e && (c = uf::utf_traits::decode(p,e))!=uf::illegal && c!=uf::incomplete) { out = uf::utf_traits::encode(c,out); wsize_ = e-p; } if(c==uf::illegal) return 0; if(c==uf::incomplete) { memmove(b,e-wsize_,sizeof(wchar_t)*wsize_); } return out - buffer_; } static const size_t buffer_size = 1024 * 3; static const size_t wbuffer_size = 1024; char buffer_[buffer_size]; wchar_t wbuffer_[buffer_size]; // for null HANDLE handle_; int wsize_; std::vector pback_buffer_; }; winconsole_ostream::winconsole_ostream(int fd) : std::ostream(0) { HANDLE h = 0; switch(fd) { case 1: h = GetStdHandle(STD_OUTPUT_HANDLE); break; case 2: h = GetStdHandle(STD_ERROR_HANDLE); break; } if(is_atty_handle(h)) { d.reset(new console_output_buffer(h)); std::ostream::rdbuf(d.get()); } else { std::ostream::rdbuf( fd == 1 ? std::cout.rdbuf() : std::cerr.rdbuf() ); } } winconsole_ostream::~winconsole_ostream() { try { flush(); } catch(...){} } winconsole_istream::winconsole_istream() : std::istream(0) { HANDLE h = GetStdHandle(STD_INPUT_HANDLE); if(is_atty_handle(h)) { d.reset(new console_input_buffer(h)); std::istream::rdbuf(d.get()); } else { std::istream::rdbuf(std::cin.rdbuf()); } } winconsole_istream::~winconsole_istream() { } } // details BOOST_NOWIDE_DECL details::winconsole_istream cin; BOOST_NOWIDE_DECL details::winconsole_ostream cout(1); BOOST_NOWIDE_DECL details::winconsole_ostream cerr(2); BOOST_NOWIDE_DECL details::winconsole_ostream clog(2); namespace { struct initialize { initialize() { boost::nowide::cin.tie(&boost::nowide::cout); boost::nowide::cerr.tie(&boost::nowide::cout); boost::nowide::clog.tie(&boost::nowide::cout); } } inst; } } // nowide } // namespace boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/standalone/000075500000000000000000000000001332360634000214655ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/standalone/CMakeLists.txt000064400000000000000000000054501332360634000242310ustar00rootroot00000000000000cmake_minimum_required(VERSION 2.6) include_directories(.) enable_testing() option(RUN_WITH_WINE "Use wine to run tests" OFF) if(NOT LIBDIR) set(LIBDIR lib CACHE STRING "Library installation directory" FORCE) endif() if(CMAKE_COMPILER_IS_GNUCXX) set(CXX_FLAGS "-Wall -Wextra") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(CXX_FLAGS "-Wall -Wextra") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") set(CXX_FLAGS "-Wall") elseif(MSVC) set(CXX_FLAGS "/EHsc /W3") endif() if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") if(MSVC) set(NOWIDE_SUFFIX "-d") endif() endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX_FLAGS}") set(NOWIDE_TESTS test_convert test_stdio test_fstream ) foreach(TEST ${NOWIDE_TESTS}) add_executable(${TEST} test/${TEST}.cpp) if(RUN_WITH_WINE) add_test(NAME ${TEST} WORKING_DIRECTORY ${CMAKE_BUILD_DIR} COMMAND wine ./${TEST}.exe) else() add_test(${TEST} ${TEST}) endif() endforeach() add_library(nowide SHARED src/iostream.cpp) set_target_properties(nowide PROPERTIES VERSION 0.0.0 SOVERSION 0) set_target_properties(nowide PROPERTIES CLEAN_DIRECT_OUTPUT 1 OUTPUT_NAME "nowide${NOWIDE_SUFFIX}" ) add_library(nowide-static STATIC src/iostream.cpp) set_target_properties(nowide-static PROPERTIES CLEAN_DIRECT_OUTPUT 1 OUTPUT_NAME "nowide${NOWIDE_SUFFIX}" ) if(MSVC) set_target_properties(nowide-static PROPERTIES PREFIX "lib") endif() add_executable(test_iostream_shared test/test_iostream.cpp) set_target_properties(nowide PROPERTIES COMPILE_DEFINITIONS DLL_EXPORT) set_target_properties(test_iostream_shared PROPERTIES COMPILE_DEFINITIONS DLL_EXPORT) target_link_libraries(test_iostream_shared nowide) add_executable(test_iostream_static test/test_iostream.cpp) target_link_libraries(test_iostream_static nowide-static) add_executable(test_system test/test_system.cpp) add_executable(test_env_proto test/test_env.cpp) add_executable(test_env_win test/test_env.cpp) set_target_properties(test_env_win PROPERTIES COMPILE_DEFINITIONS NOWIDE_TEST_INCLUDE_WINDOWS) set(OTHER_TESTS test_iostream_shared test_iostream_static test_env_win test_env_proto) if(RUN_WITH_WINE) foreach(T ${OTHER_TESTS}) add_test(NAME ${T} WORKING_DIRECTORY ${CMAKE_BUILD_DIR} COMMAND wine ./${T}.exe) endforeach() add_test(NAME test_system_n WORKING_DIRECTORY ${CMAKE_BUILD_DIR} COMMAND wine ./test_system.exe "-n") add_test(NAME test_system_w WORKING_DIRECTORY ${CMAKE_BUILD_DIR} COMMAND wine ./test_system.exe "-w") else() foreach(T ${OTHER_TESTS}) add_test(${T} ${T}) endforeach() add_test(test_system_n test_system "-n") add_test(test_system_w test_system "-w") endif() install(TARGETS nowide nowide-static RUNTIME DESTINATION bin LIBRARY DESTINATION ${LIBDIR} ARCHIVE DESTINATION ${LIBDIR}) install(DIRECTORY nowide DESTINATION include) leatherman-1.4.2+dfsg/vendor/nowide/standalone/MinGW.cmake000064400000000000000000000006241332360634000234520ustar00rootroot00000000000000SET(CMAKE_SYSTEM_NAME Windows) SET(CMAKE_C_COMPILER /usr/bin/i686-w64-mingw32-gcc) SET(CMAKE_CXX_COMPILER /usr/bin/i686-w64-mingw32-g++) SET(CMAKE_RC_COMPILER /usr/bin/i686-w64-mingw32-windres) SET(CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32) SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) set(CMAKE_SKIP_RPATH TRUE) leatherman-1.4.2+dfsg/vendor/nowide/standalone/config.hpp000064400000000000000000000015161332360634000234460ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef NOWIDE_CONFIG_H_INCLUDED #define NOWIDE_CONFIG_H_INCLUDED #if (defined(__WIN32) || defined(_WIN32) || defined(WIN32)) && !defined(__CYGWIN__) #define NOWIDE_WINDOWS #endif #ifdef _MSC_VER #define NOWIDE_MSVC #endif #ifdef NOWIDE_WINDOWS # if defined(DLL_EXPORT) || defined(NOWIDE_EXPORT) # ifdef NOWIDE_SOURCE # define NOWIDE_DECL __declspec(dllexport) # else # define NOWIDE_DECL __declspec(dllimport) # endif //NOWIDE_SOURCE # endif // DYN_LINK #endif #ifndef NOWIDE_DECL # define NOWIDE_DECL #endif #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/standalone/convert000075500000000000000000000020261332360634000230730ustar00rootroot00000000000000#!/bin/bash rm -fr nowide nowide_standalone nowide_standalone.zip boost_nowide boost_nowide.zip mkdir -p nowide/nowide mkdir -p nowide/src mkdir -p nowide/test cp ../include/boost/nowide/*.hpp nowide/nowide cp ../src/*.cpp nowide/src cp ../test/*.cpp ../test/*.hpp nowide/test cp ./*.hpp nowide/nowide/ SOURCES="nowide/test/* nowide/src/* nowide/nowide/*" sed 's/BOOST_NOWIDE_/NOWIDE_/g' -i $SOURCES sed 's/BOOST_/NOWIDE_/g' -i $SOURCES sed 's/boost::nowide::/nowide::/g' -i $SOURCES sed 's/boost::nowide/nowide/g' -i $SOURCES sed 's/boost::locale::/nowide::/g' -i $SOURCES sed 's/boost::/nowide::/g' -i $SOURCES sed 's/namespace boost {//' -i $SOURCES sed 's/} *\/\/ *namespace boost//' -i $SOURCES sed 's/ #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4275 4251 4231 4660) #endif #include namespace nowide { namespace conv { /// /// \addtogroup codepage /// /// @{ /// /// \brief The excepton that is thrown in case of conversion error /// class conversion_error : public std::runtime_error { public: conversion_error() : std::runtime_error("Conversion failed") {} }; /// /// enum that defines conversion policy /// typedef enum { skip = 0, ///< Skip illegal/unconvertable characters stop = 1, ///< Stop conversion and throw conversion_error default_method = skip ///< Default method - skip } method_type; /// @} } // conv } // nowide #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/standalone/encoding_utf.hpp000064400000000000000000000046751332360634000246560ustar00rootroot00000000000000// // Copyright (c) 2009-2011 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef NOWIDE_ENCODING_UTF_HPP_INCLUDED #define NOWIDE_ENCODING_UTF_HPP_INCLUDED #include #include #include #include #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4275 4251 4231 4660) #endif namespace nowide{ namespace conv { /// /// Convert a Unicode text in range [begin,end) to other Unicode encoding /// template std::basic_string utf_to_utf(CharIn const *begin,CharIn const *end,method_type how = default_method) { std::basic_string result; result.reserve(end-begin); typedef std::back_insert_iterator > inserter_type; inserter_type inserter(result); utf::code_point c; while(begin!=end) { c=utf::utf_traits::template decode(begin,end); if(c==utf::illegal || c==utf::incomplete) { if(how==stop) throw conversion_error(); } else { utf::utf_traits::template encode(c,inserter); } } return result; } /// /// Convert a Unicode NULL terminated string \a str other Unicode encoding /// template std::basic_string utf_to_utf(CharIn const *str,method_type how = default_method) { CharIn const *end = str; while(*end) end++; return utf_to_utf(str,end,how); } /// /// Convert a Unicode string \a str other Unicode encoding /// template std::basic_string utf_to_utf(std::basic_string const &str,method_type how = default_method) { return utf_to_utf(str.c_str(),str.c_str()+str.size(),how); } } // conv } // nowide #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/standalone/run_convert_and_build.sh000075500000000000000000000007701332360634000263750ustar00rootroot00000000000000#!/bin/bash export WINEPATH="/usr/lib/gcc/i686-w64-mingw32/5.3-win32/;/usr/lib/gcc/i686-w64-mingw32/5" rm -fr /tmp/nwlin /tmp/nw pushd . ./convert && mkdir nowide/build && cd nowide/build && cmake -DCMAKE_TOOLCHAIN_FILE=../../MinGW.cmake -DCMAKE_INSTALL_PREFIX=/tmp/nw -DRUN_WITH_WINE=ON .. && make && make test && make install popd pushd . ./convert && mkdir nowide/build && cd nowide/build && cmake -DLIBDIR=lin64 -DCMAKE_INSTALL_PREFIX=/tmp/nwlin .. && make && make test && make install popd leatherman-1.4.2+dfsg/vendor/nowide/standalone/scoped_ptr.hpp000064400000000000000000000036131332360634000243430ustar00rootroot00000000000000#ifndef NOWIDE_SCOPED_PTR_HPP #define NOWIDE_SCOPED_PTR_HPP // (C) Copyright Greg Colvin and Beman Dawes 1998, 1999. // Copyright (c) 2001, 2002 Peter Dimov, // Copyright (C) 2012 Artyom Beilis // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // http://www.boost.org/libs/smart_ptr/scoped_ptr.htm // #include namespace nowide { // scoped_ptr mimics a built-in pointer except that it guarantees deletion // of the object pointed to, either on destruction of the scoped_ptr or via // an explicit reset(). scoped_ptr is a simple solution for simple needs; // use shared_ptr or std::auto_ptr if your needs are more complex. template class scoped_ptr // noncopyable { private: T * px; scoped_ptr(scoped_ptr const &); scoped_ptr & operator=(scoped_ptr const &); typedef scoped_ptr this_type; void operator==( scoped_ptr const& ) const; void operator!=( scoped_ptr const& ) const; public: typedef T element_type; explicit scoped_ptr( T * p = 0 ): px( p ) // never throws { } ~scoped_ptr() // never throws { delete px; } void reset(T * p = 0) // never throws { assert( p == 0 || p != px ); // catch self-reset errors this_type(p).swap(*this); } T & operator*() const // never throws { assert( px != 0 ); return *px; } T * operator->() const // never throws { assert( px != 0 ); return px; } T * get() const // never throws { return px; } operator bool() const { return px!=0; } void swap(scoped_ptr & b) // never throws { T * tmp = b.px; b.px = px; px = tmp; } }; } // namespace nowide #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/standalone/utf.hpp000064400000000000000000000321251332360634000227770ustar00rootroot00000000000000// // Copyright (c) 2009-2011 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef NOWIDE_UTF_HPP_INCLUDED #define NOWIDE_UTF_HPP_INCLUDED #include #ifndef NOWIDE_MSVC namespace nowide { namespace utf { typedef unsigned uint32_t; typedef unsigned short uint16_t; typedef unsigned char uint8_t; } } #else #include #endif namespace nowide { /// /// \brief Namespace that holds basic operations on UTF encoded sequences /// /// All functions defined in this namespace do not require linking with Boost.Locale library /// namespace utf { /// \cond INTERNAL #ifdef __GNUC__ # define NOWIDE_LIKELY(x) __builtin_expect((x),1) # define NOWIDE_UNLIKELY(x) __builtin_expect((x),0) #else # define NOWIDE_LIKELY(x) (x) # define NOWIDE_UNLIKELY(x) (x) #endif /// \endcond /// /// \brief The integral type type that can hold a Unicode code point /// typedef uint32_t code_point; /// /// \brief Special constant that defines illegal code point /// static const code_point illegal = 0xFFFFFFFFu; /// /// \brief Special constant that defines incomplete code point /// static const code_point incomplete = 0xFFFFFFFEu; /// /// \brief the function checks if \a v is a valid code point /// inline bool is_valid_codepoint(code_point v) { if(v>0x10FFFF) return false; if(0xD800 <=v && v<= 0xDFFF) // surragates return false; return true; } #ifdef NOWIDE_DOXYGEN /// /// \brief UTF Traits class - functions to convert UTF sequences to and from Unicode code points /// template struct utf_traits { /// /// The type of the character /// typedef CharType char_type; /// /// Read one code point from the range [p,e) and return it. /// /// - If the sequence that was read is incomplete sequence returns \ref incomplete, /// - If illegal sequence detected returns \ref illegal /// /// Requirements /// /// - Iterator is valid input iterator /// /// Postconditions /// /// - p points to the last consumed character /// template static code_point decode(Iterator &p,Iterator e); /// /// Maximal width of valid sequence in the code units: /// /// - UTF-8 - 4 /// - UTF-16 - 2 /// - UTF-32 - 1 /// static const int max_width; /// /// The width of specific code point in the code units. /// /// Requirement: value is a valid Unicode code point /// Returns value in range [1..max_width] /// static int width(code_point value); /// /// Get the size of the trail part of variable length encoded sequence. /// /// Returns -1 if C is not valid lead character /// static int trail_length(char_type c); /// /// Returns true if c is trail code unit, always false for UTF-32 /// static bool is_trail(char_type c); /// /// Returns true if c is lead code unit, always true of UTF-32 /// static bool is_lead(char_type c); /// /// Convert valid Unicode code point \a value to the UTF sequence. /// /// Requirements: /// /// - \a value is valid code point /// - \a out is an output iterator should be able to accept at least width(value) units /// /// Returns the iterator past the last written code unit. /// template static Iterator encode(code_point value,Iterator out); /// /// Decodes valid UTF sequence that is pointed by p into code point. /// /// If the sequence is invalid or points to end the behavior is undefined /// template static code_point decode_valid(Iterator &p); }; #else template struct utf_traits; template struct utf_traits { typedef CharType char_type; static int trail_length(char_type ci) { unsigned char c = ci; if(c < 128) return 0; if(NOWIDE_UNLIKELY(c < 194)) return -1; if(c < 224) return 1; if(c < 240) return 2; if(NOWIDE_LIKELY(c <=244)) return 3; return -1; } static const int max_width = 4; static int width(code_point value) { if(value <=0x7F) { return 1; } else if(value <=0x7FF) { return 2; } else if(NOWIDE_LIKELY(value <=0xFFFF)) { return 3; } else { return 4; } } static bool is_trail(char_type ci) { unsigned char c=ci; return (c & 0xC0)==0x80; } static bool is_lead(char_type ci) { return !is_trail(ci); } template static code_point decode(Iterator &p,Iterator e) { if(NOWIDE_UNLIKELY(p==e)) return incomplete; unsigned char lead = *p++; // First byte is fully validated here int trail_size = trail_length(lead); if(NOWIDE_UNLIKELY(trail_size < 0)) return illegal; // // Ok as only ASCII may be of size = 0 // also optimize for ASCII text // if(trail_size == 0) return lead; code_point c = lead & ((1<<(6-trail_size))-1); // Read the rest unsigned char tmp; switch(trail_size) { case 3: if(NOWIDE_UNLIKELY(p==e)) return incomplete; tmp = *p++; if (!is_trail(tmp)) return illegal; c = (c << 6) | ( tmp & 0x3F); case 2: if(NOWIDE_UNLIKELY(p==e)) return incomplete; tmp = *p++; if (!is_trail(tmp)) return illegal; c = (c << 6) | ( tmp & 0x3F); case 1: if(NOWIDE_UNLIKELY(p==e)) return incomplete; tmp = *p++; if (!is_trail(tmp)) return illegal; c = (c << 6) | ( tmp & 0x3F); } // Check code point validity: no surrogates and // valid range if(NOWIDE_UNLIKELY(!is_valid_codepoint(c))) return illegal; // make sure it is the most compact representation if(NOWIDE_UNLIKELY(width(c)!=trail_size + 1)) return illegal; return c; } template static code_point decode_valid(Iterator &p) { unsigned char lead = *p++; if(lead < 192) return lead; int trail_size; if(lead < 224) trail_size = 1; else if(NOWIDE_LIKELY(lead < 240)) // non-BMP rare trail_size = 2; else trail_size = 3; code_point c = lead & ((1<<(6-trail_size))-1); switch(trail_size) { case 3: c = (c << 6) | ( static_cast(*p++) & 0x3F); case 2: c = (c << 6) | ( static_cast(*p++) & 0x3F); case 1: c = (c << 6) | ( static_cast(*p++) & 0x3F); } return c; } template static Iterator encode(code_point value,Iterator out) { if(value <= 0x7F) { *out++ = static_cast(value); } else if(value <= 0x7FF) { *out++ = static_cast((value >> 6) | 0xC0); *out++ = static_cast((value & 0x3F) | 0x80); } else if(NOWIDE_LIKELY(value <= 0xFFFF)) { *out++ = static_cast((value >> 12) | 0xE0); *out++ = static_cast(((value >> 6) & 0x3F) | 0x80); *out++ = static_cast((value & 0x3F) | 0x80); } else { *out++ = static_cast((value >> 18) | 0xF0); *out++ = static_cast(((value >> 12) & 0x3F) | 0x80); *out++ = static_cast(((value >> 6) & 0x3F) | 0x80); *out++ = static_cast((value & 0x3F) | 0x80); } return out; } }; // utf8 template struct utf_traits { typedef CharType char_type; // See RFC 2781 static bool is_first_surrogate(uint16_t x) { return 0xD800 <=x && x<= 0xDBFF; } static bool is_second_surrogate(uint16_t x) { return 0xDC00 <=x && x<= 0xDFFF; } static code_point combine_surrogate(uint16_t w1,uint16_t w2) { return ((code_point(w1 & 0x3FF) << 10) | (w2 & 0x3FF)) + 0x10000; } static int trail_length(char_type c) { if(is_first_surrogate(c)) return 1; if(is_second_surrogate(c)) return -1; return 0; } /// /// Returns true if c is trail code unit, always false for UTF-32 /// static bool is_trail(char_type c) { return is_second_surrogate(c); } /// /// Returns true if c is lead code unit, always true of UTF-32 /// static bool is_lead(char_type c) { return !is_second_surrogate(c); } template static code_point decode(It ¤t,It last) { if(NOWIDE_UNLIKELY(current == last)) return incomplete; uint16_t w1=*current++; if(NOWIDE_LIKELY(w1 < 0xD800 || 0xDFFF < w1)) { return w1; } if(w1 > 0xDBFF) return illegal; if(current==last) return incomplete; uint16_t w2=*current++; if(w2 < 0xDC00 || 0xDFFF < w2) return illegal; return combine_surrogate(w1,w2); } template static code_point decode_valid(It ¤t) { uint16_t w1=*current++; if(NOWIDE_LIKELY(w1 < 0xD800 || 0xDFFF < w1)) { return w1; } uint16_t w2=*current++; return combine_surrogate(w1,w2); } static const int max_width = 2; static int width(code_point u) { return u>=0x10000 ? 2 : 1; } template static It encode(code_point u,It out) { if(NOWIDE_LIKELY(u<=0xFFFF)) { *out++ = static_cast(u); } else { u -= 0x10000; *out++ = static_cast(0xD800 | (u>>10)); *out++ = static_cast(0xDC00 | (u & 0x3FF)); } return out; } }; // utf16; template struct utf_traits { typedef CharType char_type; static int trail_length(char_type c) { if(is_valid_codepoint(c)) return 0; return -1; } static bool is_trail(char_type /*c*/) { return false; } static bool is_lead(char_type /*c*/) { return true; } template static code_point decode_valid(It ¤t) { return *current++; } template static code_point decode(It ¤t,It last) { if(NOWIDE_UNLIKELY(current == last)) return nowide::utf::incomplete; code_point c=*current++; if(NOWIDE_UNLIKELY(!is_valid_codepoint(c))) return nowide::utf::illegal; return c; } static const int max_width = 1; static int width(code_point /*u*/) { return 1; } template static It encode(code_point u,It out) { *out++ = static_cast(u); return out; } }; // utf32 #endif } // utf } // nowide #endif // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/template.hpp000064400000000000000000000006501332360634000216620ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_???_H_INCLUDED #define BOOST_NOWIDE_???_H_INCLUDED namespace boost { namespace nowide { } // nowide } // boost #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/000075500000000000000000000000001332360634000203145ustar00rootroot00000000000000leatherman-1.4.2+dfsg/vendor/nowide/test/Jamfile.v2000064400000000000000000000031401332360634000221320ustar00rootroot00000000000000# Boost System Library test Jamfile # Copyright Beman Dawes 2003, 2006, Artyom Beilis 2012 # Distributed under the Boost Software License, Version 1.0. # See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt # See library home page at http://www.boost.org/libs/system project #: requirements /boost/nowide//boost_nowide ; test-suite "nowide" : [ run test_convert.cpp ] [ run test_fstream.cpp ] [ run test_stdio.cpp ] [ run test_codecvt.cpp ] [ run test_env.cpp : : : msvc:shell32.lib : test_env_proto ] [ run test_env.cpp : : : msvc:shell32.lib BOOST_NOWIDE_TEST_INCLUDE_WINDOWS=1 : test_env_with_native ] [ run test_system.cpp : "-w" : : msvc:shell32.lib : test_system_w ] [ run test_system.cpp : "-n" : : msvc:shell32.lib : test_system_n ] [ run test_fs.cpp : : : /boost/filesystem//boost_filesystem : test_fs ] [ run test_iostream.cpp : : : /boost/nowide//boost_nowide static : test_iostream_static ] [ run test_iostream.cpp : : : /boost/nowide//boost_nowide shared : test_iostream_shared ] ; leatherman-1.4.2+dfsg/vendor/nowide/test/test.hpp000064400000000000000000000013421332360634000220040ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_NOWIDE_LIB_TEST_H_INCLUDED #define BOOST_NOWIDE_LIB_TEST_H_INCLUDED #include #include #define TEST(x) do { \ if(x) \ break; \ std::ostringstream ss; \ ss<< "Error " #x " in " << __FILE__ \ <<':'<<__LINE__<<" "<< __FUNCTION__; \ throw std::runtime_error(ss.str()); \ }while(0) #endif /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_codecvt.cpp000064400000000000000000000173061332360634000235150ustar00rootroot00000000000000// // Copyright (c) 2015 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include #include #include #include #include "test.hpp" static char const *utf8_name = "\xf0\x9d\x92\x9e-\xD0\xBF\xD1\x80\xD0\xB8\xD0\xB2\xD0\xB5\xD1\x82-\xE3\x82\x84\xE3\x81\x82.txt"; static wchar_t const *wide_name = L"\U0001D49E-\u043F\u0440\u0438\u0432\u0435\u0442-\u3084\u3042.txt"; char const *res(std::codecvt_base::result r) { switch(r){ case std::codecvt_base::ok: return "ok"; case std::codecvt_base::partial: return "partial"; case std::codecvt_base::error: return "error"; case std::codecvt_base::noconv: return "noconv"; default: return "error"; } } typedef std::codecvt cvt_type; void test_codecvt_in_n_m(cvt_type const &cvt,int n,int m) { wchar_t const *wptr = wide_name; int wlen = wcslen(wide_name); int u8len = strlen(utf8_name); char const *from = utf8_name; char const *end = from; char const *real_end = utf8_name + u8len; char const *from_next = from; std::mbstate_t mb=std::mbstate_t(); while(from_next < real_end) { if(from == end) { end = from + n; if(end > real_end) end = real_end; } wchar_t buf[128]; wchar_t *to = buf; wchar_t *to_end = to + m; wchar_t *to_next = to; std::mbstate_t mb2 = mb; std::codecvt_base::result r = cvt.in(mb,from,end,from_next,to,to_end,to_next); //std::cout << "In from_size=" << (end-from) << " from move=" << (from_next - from) << " to move= " << to_next - to << " state = " << res(r) << std::endl; int count = cvt.length(mb2,from,end,to_end - to); #ifndef BOOST_NOWIDE_DO_LENGTH_MBSTATE_CONST TEST(memcmp(&mb,&mb2,sizeof(mb))==0); if(count != from_next - from) { std::cout << count << " " << from_next - from << std::endl; } TEST(count == from_next - from); #else TEST(count == to_next - to); #endif if(r == cvt_type::partial) { end+=n; if(end > real_end) end = real_end; } else TEST(r == cvt_type::ok); while(to!=to_next) { TEST(*wptr == *to); wptr++; to++; } to=to_next; from = from_next; } TEST(wptr == wide_name + wlen); TEST(from == real_end); } void test_codecvt_out_n_m(cvt_type const &cvt,int n,int m) { char const *nptr = utf8_name; int wlen = wcslen(wide_name); int u8len = strlen(utf8_name); std::mbstate_t mb=std::mbstate_t(); wchar_t const *from_next = wide_name; wchar_t const *real_from_end = wide_name + wlen; char buf[256]; char *to = buf; char *to_next = to; char *to_end = to + n; char *real_to_end = buf + sizeof(buf); while(from_next < real_from_end) { wchar_t const *from = from_next; wchar_t const *from_end = from + m; if(from_end > real_from_end) from_end = real_from_end; if(to_end == to) { to_end = to+n; } std::codecvt_base::result r = cvt.out(mb,from,from_end,from_next,to,to_end,to_next); //std::cout << "In from_size=" << (end-from) << " from move=" << (from_next - from) << " to move= " << to_next - to << " state = " << res(r) << std::endl; if(r == cvt_type::partial) { TEST(to_end - to_next < cvt.max_length()); to_end += n; if(to_end > real_to_end) to_end = real_to_end; } else { TEST(r == cvt_type::ok); } while(to!=to_next) { TEST(*nptr == *to); nptr++; to++; } from = from_next; } TEST(nptr == utf8_name + u8len); TEST(from_next == real_from_end); TEST(cvt.unshift(mb,to,to+n,to_next)==cvt_type::ok); TEST(to_next == to); } void test_codecvt_conv() { std::cout << "Conversions " << std::endl; std::locale l(std::locale::classic(),new boost::nowide::utf8_codecvt()); cvt_type const &cvt = std::use_facet(l); for(int i=1;i<=(int)strlen(utf8_name)+1;i++) { for(int j=1;j<=(int)wcslen(wide_name)+1;j++) { try { test_codecvt_in_n_m(cvt,i,j); test_codecvt_out_n_m(cvt,i,j); } catch(...) { std::cerr << "Wlen=" <()); cvt_type const &cvt = std::use_facet(l); std::cout << "- UTF-8" << std::endl; { wchar_t buf[2]; wchar_t *to=buf; wchar_t *to_end = buf+2; wchar_t *to_next = to; char const *err_utf="1\xFF\xFF"; { std::mbstate_t mb=std::mbstate_t(); char const *from=err_utf; char const *from_end = from + strlen(from); char const *from_next = from; to_next = to; TEST(cvt.in(mb,from,from_end,from_next,to,to_end,to_next)==cvt_type::error); TEST(from_next == from+1); TEST(to_next == to + 1); TEST(*to == '1'); } err_utf++; { std::mbstate_t mb=std::mbstate_t(); char const *from=err_utf; char const *from_end = from + strlen(from); char const *from_next = from; TEST(cvt.in(mb,from,from_end,from_next,to,to_end,to_next)==cvt_type::error); TEST(from_next == from); TEST(to_next == to); } } std::cout << "- UTF-16/32" << std::endl; { char buf[32]; char *to=buf; char *to_end = buf+32; char *to_next = to; wchar_t err_buf[3] = { '1' , 0xDC9E }; // second surrogate not works both for UTF-16 and 32 wchar_t const *err_utf = err_buf; { std::mbstate_t mb=std::mbstate_t(); wchar_t const *from=err_utf; wchar_t const *from_end = from + wcslen(from); wchar_t const *from_next = from; TEST(cvt.out(mb,from,from_end,from_next,to,to_end,to_next)==cvt_type::error); TEST(from_next == from+1); TEST(to_next == to + 1); TEST(*to == '1'); } err_utf++; { std::mbstate_t mb=std::mbstate_t(); wchar_t const *from=err_utf; wchar_t const *from_end = from + wcslen(from); wchar_t const *from_next = from; to_next = to; TEST(cvt.out(mb,from,from_end,from_next,to,to_end,to_next)==cvt_type::error); TEST(from_next == from); TEST(to_next == to); } } } int main() { try { test_codecvt_conv(); test_codecvt_err(); } catch(std::exception const &e) { std::cerr << "Failed : " << e.what() << std::endl; return 1; } std::cout << "Ok" << std::endl; return 0; } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_convert.cpp000064400000000000000000000071241332360634000235430ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include "test.hpp" #include int main() { try { std::string hello = "\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d"; std::wstring whello = L"\u05e9\u05dc\u05d5\u05dd"; std::cout << "- boost::nowide::widen" << std::endl; { char const *b=hello.c_str(); char const *e=b+8; wchar_t buf[6] = { 0,0,0,0,0,1 }; TEST(boost::nowide::widen(buf,5,b,e)==buf); TEST(buf == whello); TEST(buf[5] == 1); TEST(boost::nowide::widen(buf,4,b,e)==0); TEST(boost::nowide::widen(buf,5,b,e-1)==0); TEST(boost::nowide::widen(buf,5,b,e-2)==buf); TEST(boost::nowide::widen(buf,5,b,b)==buf && buf[0]==0); TEST(boost::nowide::widen(buf,5,b,b+2)==buf && buf[1]==0 && buf[0]==whello[0]); b="\xFF\xFF"; e=b+2; TEST(boost::nowide::widen(buf,5,b,e)==0); b="\xd7\xa9\xFF"; e=b+3; TEST(boost::nowide::widen(buf,5,b,e)==0); TEST(boost::nowide::widen(buf,5,b,b+1)==0); } std::cout << "- boost::nowide::narrow" << std::endl; { wchar_t const *b=whello.c_str(); wchar_t const *e=b+4; char buf[10] = {0}; buf[9]=1; TEST(boost::nowide::narrow(buf,9,b,e)==buf); TEST(buf == hello); TEST(buf[9] == 1); TEST(boost::nowide::narrow(buf,8,b,e)==0); TEST(boost::nowide::narrow(buf,7,b,e-1)==buf); TEST(buf==hello.substr(0,6)); } { char buf[3]; wchar_t wbuf[3]; TEST(boost::nowide::narrow(buf,3,L"xy")==std::string("xy")); TEST(boost::nowide::widen(wbuf,3,"xy")==std::wstring(L"xy")); } std::cout << "- boost::nowide::stackstring" << std::endl; { { boost::nowide::basic_stackstring sw; TEST(sw.convert(hello.c_str())); TEST(sw.c_str() == whello); TEST(sw.convert(hello.c_str(),hello.c_str()+hello.size())); TEST(sw.c_str() == whello); } { boost::nowide::basic_stackstring sw; TEST(sw.convert(hello.c_str())); TEST(sw.c_str() == whello); TEST(sw.convert(hello.c_str(),hello.c_str()+hello.size())); TEST(sw.c_str() == whello); } { boost::nowide::basic_stackstring sw; TEST(sw.convert(whello.c_str())); TEST(sw.c_str() == hello); TEST(sw.convert(whello.c_str(),whello.c_str()+whello.size())); TEST(sw.c_str() == hello); } { boost::nowide::basic_stackstring sw; TEST(sw.convert(whello.c_str())); TEST(sw.c_str() == hello); TEST(sw.convert(whello.c_str(),whello.c_str()+whello.size())); TEST(sw.c_str() == hello); } } } catch(std::exception const &e) { std::cerr << "Failed :" << e.what() << std::endl; return 1; } std::cout << "Passed" << std::endl; return 0; } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_env.cpp000064400000000000000000000026431332360634000226540ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include "test.hpp" #if defined(BOOST_NOWIDE_TEST_INCLUDE_WINDOWS) && defined(BOOST_WINDOWS) #include #endif #ifdef BOOST_MSVC # pragma warning(disable : 4996) #endif int main() { try { std::string example = "\xd7\xa9-\xd0\xbc-\xce\xbd"; char penv[256] = {0}; strncpy(penv,("BOOST_TEST2=" + example + "x").c_str(),sizeof(penv)-1); TEST(boost::nowide::setenv("BOOST_TEST1",example.c_str(),1)==0); TEST(boost::nowide::getenv("BOOST_TEST1")); TEST(boost::nowide::getenv("BOOST_TEST1")==example); TEST(boost::nowide::setenv("BOOST_TEST1","xx",0)==0); TEST(boost::nowide::getenv("BOOST_TEST1")==example); TEST(boost::nowide::putenv(penv)==0); TEST(boost::nowide::getenv("BOOST_TEST2")); TEST(boost::nowide::getenv("BOOST_TEST_INVALID")==0); TEST(boost::nowide::getenv("BOOST_TEST2")==example + "x"); std::cout << "Ok" << std::endl; return 0; } catch(std::exception const &e) { std::cerr << "Failed " << e.what() << std::endl; return 1; } } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_fs.cpp000064400000000000000000000031241332360634000224670ustar00rootroot00000000000000// // Copyright (c) 2015 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include #include #include #include #include "test.hpp" static char const *utf8_name = "\xf0\x9d\x92\x9e-\xD0\xBF\xD1\x80\xD0\xB8\xD0\xB2\xD0\xB5\xD1\x82-\xE3\x82\x84\xE3\x81\x82.txt"; static wchar_t const *wide_name = L"\U0001D49E-\u043F\u0440\u0438\u0432\u0435\u0442-\u3084\u3042.txt"; int main() { try { boost::nowide::nowide_filesystem(); TEST(boost::nowide::widen(utf8_name) == wide_name); TEST(boost::nowide::narrow(wide_name) == utf8_name); boost::nowide::ofstream f(utf8_name); TEST(f); f << "Test" << std::endl; f.close(); TEST(boost::filesystem::is_regular_file(wide_name)==true); TEST(boost::filesystem::is_regular_file(utf8_name)==true); boost::nowide::remove(utf8_name); TEST(boost::filesystem::is_regular_file(utf8_name)==false); TEST(boost::filesystem::is_regular_file(wide_name)==false); } catch(std::exception const &e) { std::cerr << "Failed : " << e.what() << std::endl; return 1; } std::cout << "Ok" << std::endl; return 0; } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_fstream.cpp000064400000000000000000000122421332360634000235210ustar00rootroot00000000000000#include #include #include #include #include "test.hpp" #ifdef BOOST_MSVC # pragma warning(disable : 4996) #endif int main() { char const *example = "\xd7\xa9-\xd0\xbc-\xce\xbd" ".txt"; #ifdef BOOST_WINDOWS wchar_t const *wexample = L"\u05e9-\u043c-\u03bd.txt"; #endif try { namespace nw=boost::nowide; std::cout << "Testing fstream" << std::endl; { nw::ofstream fo; fo.open(example); TEST(fo); fo<<"test"<> tmp; TEST(tmp=="test"); fi.close(); } { nw::ifstream fi(example); TEST(fi); std::string tmp; fi >> tmp; TEST(tmp=="test"); fi.close(); } #if defined(BOOST_WINDOWS) || defined(BOOST_NOWIDE_FSTREAM_TESTS) // C++11 interfaces aren't enabled at all platforms so need to skip // for std::*fstream { std::string name = example; nw::ifstream fi(name); TEST(fi); std::string tmp; fi >> tmp; TEST(tmp=="test"); fi.close(); } { nw::ifstream fi; fi.open(std::string(example)); TEST(fi); std::string tmp; fi >> tmp; TEST(tmp=="test"); fi.close(); } #endif { nw::ifstream fi(example,std::ios::binary); TEST(fi); std::string tmp; fi >> tmp; TEST(tmp=="test"); fi.close(); } { nw::ifstream fi; nw::remove(example); fi.open(example); TEST(!fi); } { nw::fstream f(example,nw::fstream::in | nw::fstream::out | nw::fstream::trunc | nw::fstream::binary); TEST(f); f << "test2" ; std::string tmp; f.seekg(0); f>> tmp; TEST(tmp=="test2"); f.close(); } { nw::ifstream fi(example,nw::fstream::ate | nw::fstream::binary); TEST(fi); TEST(fi.tellg()==std::streampos(5)); fi.seekg(-2,std::ios_base::cur); std::string tmp; fi >> tmp; TEST(tmp == "t2"); fi.close(); } nw::remove(example); } for(int i=-1;i<16;i++) { std::cout << "Complex io with buffer = " << i << std::endl; char buf[16]; nw::fstream f; if(i==0) f.rdbuf()->pubsetbuf(0,0); else if (i > 0) f.rdbuf()->pubsetbuf(buf,i); f.open(example,nw::fstream::in | nw::fstream::out | nw::fstream::trunc | nw::fstream::binary); f.put('a'); f.put('b'); f.put('c'); f.put('d'); f.put('e'); f.put('f'); f.put('g'); f.seekg(0); TEST(f.get()=='a'); f.seekg(1,std::ios::cur); TEST(f.get()=='c'); f.seekg(-1,std::ios::cur); TEST(f.get()=='c'); TEST(f.seekg(1)); f.put('B'); TEST(f.get()=='c'); TEST(f.seekg(1)); TEST(f.get() == 'B'); f.seekg(2); f.put('C'); TEST(f.get()=='d'); f.seekg(0); TEST(f.get()=='a'); TEST(f.get()=='B'); TEST(f.get()=='C'); TEST(f.get()=='d'); TEST(f.get()=='e'); TEST(f.putback('e')); TEST(f.putback('d')); TEST(f.get()=='d'); TEST(f.get()=='e'); TEST(f.get()=='f'); TEST(f.get()=='g'); TEST(f.get()==EOF); f.clear(); f.seekg(1); TEST(f.get()=='B'); TEST(f.putback('B')); TEST(f.putback('a')); TEST(!f.putback('x')); f.close(); TEST(boost::nowide::remove(example)==0); } } catch(std::exception const &e) { std::cerr << e.what() << std::endl; return 1; } std::cout << "Ok" << std::endl; return 0; } leatherman-1.4.2+dfsg/vendor/nowide/test/test_iostream.cpp000064400000000000000000000031071332360634000237030ustar00rootroot00000000000000#include #include "test.hpp" int main(int argc,char **argv) { char const *example = "Basic letters: \xd7\xa9-\xd0\xbc-\xce\xbd\n" "East Asian Letters: \xe5\x92\x8c\xe5\xb9\xb3\n" "Non-BMP letters: \xf0\x9d\x84\x9e\n"; try { int maxval = 15000; for(int i=0;i=0;i--) { int c = i % 96 + ' '; TEST(boost::nowide::cin.get() == c); } std::string v1,v2; boost::nowide::cout << "Normal I/O:" << std::endl; boost::nowide::cout << example << std::endl; boost::nowide::cerr << example << std::endl; boost::nowide::cout << "Flushing each character:" << std::endl; for(char const *s=example;*s;s++) { boost::nowide::cout << *s << std::flush; TEST(boost::nowide::cout); } TEST(boost::nowide::cout); TEST(boost::nowide::cerr); if(argc==2 && argv[1]==std::string("-i")) { boost::nowide::cin >> v1 >> v2; TEST(boost::nowide::cin); boost::nowide::cout << "First: "<< v1 << std::endl; boost::nowide::cout << "Second: "<< v2 << std::endl; TEST(boost::nowide::cout); } } catch(std::exception const &e) { std::cerr << "Fail: " << e.what() << std::endl; return 1; } std::cout <<"Ok" << std::endl; return 0; } leatherman-1.4.2+dfsg/vendor/nowide/test/test_stdio.cpp000064400000000000000000000031651332360634000232060ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include #include "test.hpp" #ifdef BOOST_MSVC # pragma warning(disable : 4996) #endif int main() { try { std::string example = "\xd7\xa9-\xd0\xbc-\xce\xbd.txt"; std::wstring wexample = L"\u05e9-\u043c-\u03bd.txt"; #ifdef BOOST_WINDOWS FILE *f=_wfopen(wexample.c_str(),L"w"); #else FILE *f=std::fopen(example.c_str(),"w"); #endif TEST(f); std::fprintf(f,"test\n"); std::fclose(f); f=0; TEST((f=boost::nowide::fopen(example.c_str(),"r"))!=0); char buf[16]; TEST(std::fgets(buf,16,f)!=0); TEST(strcmp(buf,"test\n")==0); TEST((f=boost::nowide::freopen(example.c_str(),"r+",f))!=0); std::fclose(f); f=0; TEST(boost::nowide::rename(example.c_str(),(example+".1").c_str())==0); TEST(boost::nowide::remove(example.c_str())<0); TEST((f=boost::nowide::fopen((example+".1").c_str(),"r"))!=0); std::fclose(f); f=0; TEST(boost::nowide::remove(example.c_str())<0); TEST(boost::nowide::remove((example+".1").c_str())==0); } catch(std::exception const &e) { std::cerr << "Failed " << e.what() << std::endl; return 1; } std::cout << "Ok" << std::endl; return 0; } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/vendor/nowide/test/test_system.cpp000064400000000000000000000063651332360634000234150ustar00rootroot00000000000000// // Copyright (c) 2012 Artyom Beilis (Tonkikh) // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include #include #include #include #include "test.hpp" #include int main(int argc,char **argv,char **env) { try { std::string example = "\xd7\xa9-\xd0\xbc-\xce\xbd"; std::wstring wexample = L"\u05e9-\u043c-\u03bd"; boost::nowide::args a(argc,argv,env); if(argc==2 && argv[1][0]!='-') { TEST(argv[1]==example); TEST(argv[2] == 0); TEST(boost::nowide::getenv("BOOST_NOWIDE_TEST")); TEST(boost::nowide::getenv("BOOST_NOWIDE_TEST_NONE") == 0); TEST(boost::nowide::getenv("BOOST_NOWIDE_TEST")==example); std::string sample = "BOOST_NOWIDE_TEST=" + example; bool found = false; for(char **e=env;*e!=0;e++) { char *eptr = *e; //printf("%s\n",eptr); char *key_end = strchr(eptr,'='); TEST(key_end); std::string key = std::string(eptr,key_end); std::string value = key_end + 1; TEST(boost::nowide::getenv(key.c_str())); TEST(boost::nowide::getenv(key.c_str()) == value); if(*e == sample) found = true; } TEST(found); std::cout << "Subprocess ok" << std::endl; } else if(argc==2 && argv[1][0]=='-') { switch(argv[1][1]) { case 'w': { #ifdef BOOST_WINDOWS std::wstring env = L"BOOST_NOWIDE_TEST=" + wexample; _wputenv(env.c_str()); std::wstring wcommand = boost::nowide::widen(argv[0]); wcommand += L" "; wcommand += wexample; TEST(_wsystem(wcommand.c_str()) == 0); std::cout << "Wide Parent ok" << std::endl; #else std::cout << "Wide API is irrelevant" << std::endl; #endif } return 0; case 'n': TEST(boost::nowide::setenv("BOOST_NOWIDE_TEST",example.c_str(),1) == 0); TEST(boost::nowide::setenv("BOOST_NOWIDE_TEST_NONE",example.c_str(),1) == 0); TEST(boost::nowide::unsetenv("BOOST_NOWIDE_TEST_NONE") == 0); break; default: std::cout << "Invalid parameters expected '-n/-w'" << std::endl; return 1; } std::string command = "\""; command += argv[0]; command += "\" "; command += example; TEST(boost::nowide::system(command.c_str()) == 0); std::cout << "Parent ok" << std::endl; } else { std::cerr << "Invalid parameters" << std::endl; return 1; } return 0; } catch(std::exception const &e) { std::cerr << "Failed " << e.what() << std::endl; return 1; } } /// // vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 leatherman-1.4.2+dfsg/windows/000075500000000000000000000000001332360634000162455ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/CMakeLists.txt000064400000000000000000000012711332360634000210060ustar00rootroot00000000000000find_package(Boost 1.54 REQUIRED COMPONENTS filesystem) add_leatherman_deps(Wbemuuid.lib userenv.lib "${Boost_LIBRARIES}") add_leatherman_includes("${Boost_INCLUDE_DIRS}") leatherman_dependency(nowide) leatherman_dependency(locale) leatherman_dependency(logging) leatherman_dependency(util) if (BUILDING_LEATHERMAN) leatherman_logging_namespace("leatherman.windows") leatherman_logging_line_numbers() endif() set(WINDOWS_UTIL_SOURCES "src/file_util.cc" "src/registry.cc" "src/process.cc" "src/user.cc" "src/wmi.cc" "src/system_error.cc" ) add_leatherman_library(${WINDOWS_UTIL_SOURCES}) add_leatherman_headers(inc/leatherman) add_leatherman_test(tests/file_utils_test.cc) leatherman-1.4.2+dfsg/windows/inc/000075500000000000000000000000001332360634000170165ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/inc/leatherman/000075500000000000000000000000001332360634000211365ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/000075500000000000000000000000001332360634000226305ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/file_util.hpp000064400000000000000000000012501332360634000253130ustar00rootroot00000000000000/** * @file * Declares utility functions for working with files in Windows */ #pragma once #include #include namespace leatherman { namespace windows { namespace file_util { struct unknown_folder_exception : public std::runtime_error { explicit unknown_folder_exception(const std::string& msg) : std::runtime_error(msg) {} }; /** * Finds the ProgramData directory in a Windows-friendly way. * @return The ProgramData directory, using the Windows function * Throws unknown_folder_exception if SHGetKnownFolderPath fails. */ std::string get_programdata_dir(); }}} // namespace leatherman::windows::file_util leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/process.hpp000064400000000000000000000012371332360634000250220ustar00rootroot00000000000000/** * @file * Declares utility functions for querying process properties */ #pragma once namespace leatherman { namespace windows { namespace process { /** * Returns whether or not the OS has the ability to set elevated token information. * @return True on Windows Vista or later, otherwise false. */ bool supports_elevated_security(); /** * Returns whether or not the owner of the current process is running with elevated security privileges. * Only supported on Windows Vista or later. * @return True if elevated, otherwise false. */ bool has_elevated_security(); }}} // namespace leatherman::windows::process leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/registry.hpp000064400000000000000000000037221332360634000252150ustar00rootroot00000000000000/** * @file * Declares utility functions for interacting with the Windows registry. */ #pragma once #include #include #include namespace leatherman { namespace windows { /** * Exception thrown when registry lookupfails. */ struct registry_exception : std::runtime_error { /** * Constructs a registry_exception. * @param message The exception message. */ explicit registry_exception(std::string const& message); }; namespace registry { /** * HKEY Classes, derived from * http://msdn.microsoft.com/en-us/library/windows/desktop/ms724868(v=vs.85).aspxs */ enum class HKEY { CLASSES_ROOT, CURRENT_CONFIG, CURRENT_USER, LOCAL_MACHINE, PERFORMANCE_DATA, PERFORMANCE_NLSTEXT, PERFORMANCE_TEXT, USERS }; /** * Retrieve a string value from the registry. * @param hkey The registry key handle. * @param subkey The name of the registry key. * @param value The name of the registry value. * @return A string value corresponding to a REG_SZ or REG_EXPAND_SZ type. * Returns an empty string if the value doesn't exist or isn't a string type. */ std::string get_registry_string(HKEY hkey, std::string const& subkey, std::string const& value); /** * Retrieve a vector of string values from the registry. * @param hkey The registry key handle. * @param subkey The name of the registry key. * @param value The name of the registry value. * @return An array of string values corresponding to the REG_MULTI_SZ type. * Returns an empty vector if the value doesn't exist or isn't a composite string type. */ std::vector get_registry_strings(HKEY hkey, std::string const& subkey, std::string const& value); } }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/system_error.hpp000064400000000000000000000012521332360634000260760ustar00rootroot00000000000000/** * @file * Declares utility functions for getting Windows error messages. */ #pragma once #include namespace leatherman { namespace windows { /** * This is a wrapper for printing error messages on Windows. * @param err The Windows error code. * @return A formatted string " ()". */ std::string system_error(unsigned long err); /** * This is a wrapper for printing error messages on Windows. * It calls system_error with GetLastError as the argument. * @return A formatted string " ()". */ std::string system_error(); }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/user.hpp000064400000000000000000000016301332360634000243170ustar00rootroot00000000000000/** * @file * Declares utility functions for querying user properties */ #pragma once #include namespace leatherman { namespace windows { namespace user { /** * Determines whether the current process has Administrator privileges and can be expected to succeed at * tasks restricted to Administrators. * @return True if the current process has Administrator privileges, otherwise false. */ bool is_admin(); /** * Query token membership to determine whether the current user is a member of the Administrators group. * @return True if user is an Administrator, otherwise false. */ bool check_token_membership(); /** * Finds the user's home directory in a Ruby-compatible way. * @return The home directory, trying %HOME% > %HOMEDRIVE%%HOMEPATH% > %USERPROFILE% */ std::string home_dir(); }}} // namespace leatherman::windows::user leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/windows.hpp000064400000000000000000000004251332360634000250340ustar00rootroot00000000000000/** * @file * Utility header for including Windows API headers. */ #pragma once // Windows has several header files with fixed dependency ordering. Include interdependent headers here, // and anywhere we need include this header. #include #include leatherman-1.4.2+dfsg/windows/inc/leatherman/windows/wmi.hpp000064400000000000000000000151451332360634000241430ustar00rootroot00000000000000/** * @file * Declares utility functions for interacting with Windows Management Instrumentation. */ #pragma once #include #include #include #include #include #include // Forward declarations class IWbemLocator; class IWbemServices; namespace leatherman { namespace windows { /** * Exception thrown when wmi initialization fails. */ struct wmi_exception : std::runtime_error { /** * Constructs a wmi_exception. * @param message The exception message. */ explicit wmi_exception(std::string const& message); }; /** * A class for initiating a WMI connection over COM and querying it. */ struct wmi { /** * Identifier for the WMI class Win32_ComputerSystem */ constexpr static char const* computersystem = "Win32_ComputerSystem"; /** * Identifier for the WMI class Win32_ComputerSystemProduct */ constexpr static char const* computersystemproduct = "Win32_ComputerSystemProduct"; /** * Identifier for the WMI class Win32_OperatingSystem */ constexpr static char const* operatingsystem = "Win32_OperatingSystem"; /** * Identifier for the WMI class Win32_BIOS */ constexpr static char const* bios = "Win32_Bios"; /** * Identifier for the WMI class Win32_Processor */ constexpr static char const* processor = "Win32_Processor"; /** * Identifier for the WMI property Architecture */ constexpr static char const* architecture = "Architecture"; /** * Identifier for the WMI property Name */ constexpr static char const* name = "Name"; /** * Identifier for the WMI property Manufacturer */ constexpr static char const* manufacturer = "Manufacturer"; /** * Identifier for the WMI property Model */ constexpr static char const* model = "Model"; /** * Identifier for the WMI property SerialNumber */ constexpr static char const* serialnumber = "SerialNumber"; /** * Identifier for the WMI property NumberOfLogicalProcessors */ constexpr static char const* numberoflogicalprocessors = "NumberOfLogicalProcessors"; /** * Identifier for the WMI property LastBootUpTime */ constexpr static char const* lastbootuptime = "LastBootUpTime"; /** * Identifier for the WMI property LocalDateTime */ constexpr static char const* localdatetime = "LocalDateTime"; /** * Identifier for the WMI property ProductType */ constexpr static char const* producttype = "ProductType"; /** * Identifier for the WMI property OtherTypeDescription */ constexpr static char const* othertypedescription = "OtherTypeDescription"; /** * Multi-map with case-insensitive lookup. */ using imap = std::multimap; /** * Vector of case-insensitive multi-maps. */ using imaps = std::vector; /** * Range of values for an array type. */ using kv_range = boost::iterator_range; /** * Initializes a COM connection for WMI queries. Throws a wmi_exception on failure. */ wmi(); /** * This is a utility for querying WMI classes. Windows queries are case-insensitive, * so the returned keys aren't guaranteed to have the same case as the arguments. * Returns a vector of case-insensitive maps so the argument keys can safely be used for lookup. * Some groups can return multiple objects; in that case the returned vector will * have a multi-map for each object. If a property returns an array, it will have * multiple entries in the multimap. * @param group The class alias to query * @param keys A list of keys to query from the specified class * @param extra Extra arguments to the WMI query, such as filters * @return A vector of case-insensitive maps of the keys argument and their corresponding values */ imaps query(std::string const& group, std::vector const& keys, std::string const& extra = "") const; /** * A utility for retrieving a single entry from an imap. It should only be used if * it's known that the requested property is not an array. * To retrieve an array, use imap's equal_range. * @param kvmap A case-insensitive multimap of keys and their values. * @param key The key to lookup. * @return Return the value matching the specified key. */ static std::string const& get(imap const& kvmap, std::string const& key); /** * A utility for retrieving an array entry from an imap. If only one value exists * it returns a range of one element. * @param kvmap A case-insensitive multimap of keys and their values. * @param key The key to lookup. * @return An iterator range of key-value pairs matching the specified key. */ static kv_range get_range(imap const& kvmap, std::string const& key); /** * A utility for retrieving a single entry from an imaps. It should only be used if * it's known that the requested group will only return a single object. * @param kvmaps A vector of case-insensitive multimap of keys and their values. * @param key The key to lookup. * @return Return the value matching the specified key. */ static std::string const& get(imaps const& kvmaps, std::string const& key); /** * A utility for retrieving an array entry from an imaps. It should only be used if * it's known that the requested group will only return a single object. * @param kvmaps A vector of case-insensitive multimap of keys and their values. * @param key The key to lookup. * @return An iterator range of key-value pairs matching the specified key. */ static kv_range get_range(imaps const& kvmaps, std::string const& key); private: util::scoped_resource _coInit; util::scoped_resource _pLoc; util::scoped_resource _pSvc; }; }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/src/000075500000000000000000000000001332360634000170345ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/src/file_util.cc000064400000000000000000000015431332360634000213220ustar00rootroot00000000000000#include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; using namespace boost::filesystem; namespace leatherman { namespace windows { namespace file_util { string get_programdata_dir() { PWSTR pdir; if (SUCCEEDED(SHGetKnownFolderPath(FOLDERID_ProgramData, 0, nullptr, &pdir))) { auto p = path(pdir); return p.string(); } throw unknown_folder_exception(_("error finding FOLDERID_ProgramData: {1}", leatherman::windows::system_error())); } }}} // namespace leatherman::windows::file_util leatherman-1.4.2+dfsg/windows/src/process.cc000064400000000000000000000032511332360634000210220ustar00rootroot00000000000000#include #include #include #include using namespace std; namespace leatherman { namespace windows { namespace process { bool supports_elevated_security() { // In the future this can use IsWindowsVistaOrGreater, but as of the initial work versionhelpers.h is only in // the master branch of MinGW-w64. OSVERSIONINFOEXW vi = {sizeof(vi), HIBYTE(_WIN32_WINNT_VISTA), LOBYTE(_WIN32_WINNT_VISTA), 0, 0, {0}, 0}; return VerifyVersionInfoW(&vi, VER_MAJORVERSION|VER_MINORVERSION|VER_SERVICEPACKMAJOR, VerSetConditionMask(VerSetConditionMask(VerSetConditionMask(0, VER_MAJORVERSION, VER_GREATER_EQUAL), VER_MINORVERSION, VER_GREATER_EQUAL), VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL)); } bool has_elevated_security() { HANDLE temp_token = INVALID_HANDLE_VALUE; if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &temp_token)) { LOG_DEBUG("OpenProcessToken call failed: {1}", system_error()); return false; } util::scoped_resource token(temp_token, CloseHandle); TOKEN_ELEVATION token_elevation; DWORD token_elevation_length; if (!GetTokenInformation(token, TokenElevation, &token_elevation, sizeof(TOKEN_ELEVATION), &token_elevation_length)) { LOG_DEBUG("GetTokenInformation call failed: {1}", system_error()); return false; } return token_elevation.TokenIsElevated; } }}} // namespace leatherman::windows::process leatherman-1.4.2+dfsg/windows/src/registry.cc000064400000000000000000000077471332360634000212320ustar00rootroot00000000000000#include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; namespace leatherman { namespace windows { registry_exception::registry_exception(string const& message) : runtime_error(message) { } static HKEY get_hkey(registry::HKEY hkey) { switch (hkey) { case registry::HKEY::CLASSES_ROOT: return HKEY_CLASSES_ROOT; case registry::HKEY::CURRENT_CONFIG: return HKEY_CURRENT_CONFIG; case registry::HKEY::CURRENT_USER: return HKEY_CURRENT_USER; case registry::HKEY::LOCAL_MACHINE: return HKEY_LOCAL_MACHINE; case registry::HKEY::PERFORMANCE_DATA: return HKEY_PERFORMANCE_DATA; case registry::HKEY::PERFORMANCE_NLSTEXT: return HKEY_PERFORMANCE_NLSTEXT; case registry::HKEY::PERFORMANCE_TEXT: return HKEY_PERFORMANCE_TEXT; case registry::HKEY::USERS: return HKEY_USERS; default: throw registry_exception(_("invalid HKEY specified")); } } // Returns the registry value as a wstring buffer. It's up to the caller to interpret it. // This only really works for RRF_RT_REG_EXPAND_SZ, RRF_RT_REG_MULTI_SZ, and RRF_RT_REG_SZ. static wstring get_regvalue(registry::HKEY hkey, string const& lpSubKey, string const& lpValue, DWORD flags) { auto hk = get_hkey(hkey); auto lpSubKeyW = boost::nowide::widen(lpSubKey); auto lpValueW = boost::nowide::widen(lpValue); DWORD size = 0u; auto err = RegGetValueW(hk, lpSubKeyW.c_str(), lpValueW.c_str(), flags, nullptr, nullptr, &size); if (err != ERROR_SUCCESS) { throw registry_exception(_("error reading registry key {1} {2}: {3}", lpSubKey, lpValue, windows::system_error(err))); } // Size is the number of bytes needed. wstring buffer((size*sizeof(char))/sizeof(wchar_t), '\0'); err = RegGetValueW(hk, lpSubKeyW.c_str(), lpValueW.c_str(), flags, nullptr, &buffer[0], &size); if (err != ERROR_SUCCESS) { throw registry_exception(_("error reading registry key {1} {2}: {3}", lpSubKey, lpValue, windows::system_error(err))); } // Size now represents bytes copied (which can be less than we allocated). Resize, and also remove the // extraneous null-terminator from RegGetValueW (wstring handles termination internally). auto numwchars = (size*sizeof(char))/sizeof(wchar_t); buffer.resize(numwchars > 0u ? numwchars - 1u : 0u); return buffer; } string registry::get_registry_string(registry::HKEY hkey, string const& subkey, string const& value) { // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms724868(v=vs.85).aspx // "RRF_RT_REG_SZV automatically converts REG_EXPAND_SZ to REG_SZ unless RRF_NOEXPAND is specified." // This seems like the desired behavior most of the time. return boost::nowide::narrow(get_regvalue(hkey, subkey, value, RRF_RT_REG_SZ)); } vector registry::get_registry_strings(registry::HKEY hkey, string const& subkey, string const& value) { auto buffer = get_regvalue(hkey, subkey, value, RRF_RT_REG_MULTI_SZ); vector strings; wstring accum; for (auto c : buffer) { if (c == L'\0') { string val = boost::trim_copy(boost::nowide::narrow(accum)); if (!val.empty()) { strings.emplace_back(move(val)); } accum.clear(); } else { accum += c; } } return strings; } }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/src/system_error.cc000064400000000000000000000021171332360634000221010ustar00rootroot00000000000000#include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; using namespace std; namespace leatherman { namespace windows { string system_error(DWORD err) { LPWSTR buffer = nullptr; if (FormatMessageW( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, err, 0, reinterpret_cast(&buffer), 0, nullptr) == 0 || !buffer) { return _("unknown error ({1})", err); } // boost format could throw, so ensure the buffer is freed. util::scoped_resource guard(buffer, [](LPWSTR ptr) { if (ptr) LocalFree(ptr); }); return _("{1} ({2})", boost::nowide::narrow(buffer), err); } string system_error() { return system_error(GetLastError()); } }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/src/user.cc000064400000000000000000000046541332360634000203320ustar00rootroot00000000000000#include #include #include #include #include #include #include #include using namespace std; namespace leatherman { namespace windows { namespace user { bool is_admin() { if (process::supports_elevated_security()) { return process::has_elevated_security(); } return check_token_membership(); } bool check_token_membership() { DWORD sid_size = SECURITY_MAX_SID_SIZE; unsigned char sid_buffer[SECURITY_MAX_SID_SIZE]; auto sid = static_cast(&sid_buffer); if (!CreateWellKnownSid(WinBuiltinAdministratorsSid, nullptr, sid, &sid_size)) { LOG_DEBUG("Failed to create administrators SID: {1}", system_error()); return false; } if (!IsValidSid(sid)) { LOG_DEBUG("Invalid SID"); return false; } BOOL is_member; if (!CheckTokenMembership(nullptr, sid, &is_member)) { LOG_DEBUG("Failed to check membership: {1}", system_error()); return false; } return is_member; } string home_dir() { HANDLE temp_token = INVALID_HANDLE_VALUE; if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &temp_token)) { LOG_DEBUG("OpenProcessToken call failed: {1}", system_error()); return {}; } util::scoped_resource token(temp_token, CloseHandle); DWORD pathLen = 0u; if (GetUserProfileDirectoryW(token, nullptr, &pathLen)) { LOG_DEBUG("GetUserProfileDirectoryW call returned unexpectedly"); return {}; } else if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { LOG_DEBUG("GetUserProfileDirectoryW call failed: {1}", system_error()); return {}; } wstring buffer(pathLen, '\0'); if (!GetUserProfileDirectoryW(token, &buffer[0], &pathLen)) { LOG_DEBUG("GetUserProfileDirectoryW call failed: {1}", system_error()); return {}; } // Strip the trailing null character. buffer.resize(pathLen > 0u ? pathLen - 1u : 0u); return boost::nowide::narrow(buffer); } }}} // namespace leatherman::windows::user leatherman-1.4.2+dfsg/windows/src/wmi.cc000064400000000000000000000173631332360634000201510ustar00rootroot00000000000000#include #include #include #include #include #include #include #include // Mark string for translation (alias for leatherman::locale::format) using leatherman::locale::_; #define _WIN32_DCOM #include #include using namespace std; namespace leatherman { namespace windows { wmi_exception::wmi_exception(string const& message) : runtime_error(message) { } static string format_hresult(std::string s, HRESULT hres) { #ifdef LEATHERMAN_I18N // LOCALE: format a pointer as hex for printing an error message. return _("{1} (0x{2,num=hex})", s, hres); #else return _("%1% (0x%2$#x)", s, hres); #endif } // GUID taken from a Windows installation and unaccepted change to MinGW-w64. The MinGW-w64 library // doesn't define it, but obscures the Windows Platform SDK version of wbemuuid.lib. constexpr static CLSID MyCLSID_WbemLocator = {0x4590f811, 0x1d3a, 0x11d0, 0x89, 0x1f, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24}; wmi::wmi() { LOG_DEBUG("initializing WMI"); auto hres = CoInitializeEx(0, COINIT_APARTMENTTHREADED); if (FAILED(hres)) { if (hres == RPC_E_CHANGED_MODE) { LOG_DEBUG("using prior COM concurrency model"); } else { // Retry with multi-threaded, in case we're on Nano Server hres = CoInitializeEx(0, COINIT_MULTITHREADED); if (FAILED(hres)) { throw wmi_exception(format_hresult(_("failed to initialize COM library"), hres)); } else { LOG_DEBUG("COM single-threaded apartment not supported, using multi-threaded"); } } } if (SUCCEEDED(hres)) { _coInit = util::scoped_resource(true, [](bool b) { CoUninitialize(); }); } IWbemLocator *pLoc; hres = CoCreateInstance(MyCLSID_WbemLocator, 0, CLSCTX_INPROC_SERVER, IID_IWbemLocator, reinterpret_cast(&pLoc)); if (FAILED(hres)) { throw wmi_exception(format_hresult(_("failed to create IWbemLocator object"), hres)); } _pLoc = util::scoped_resource(pLoc, [](IWbemLocator *loc) { if (loc) loc->Release(); }); IWbemServices *pSvc; hres = (*_pLoc).ConnectServer(_bstr_t(L"ROOT\\CIMV2"), nullptr, nullptr, nullptr, 0, nullptr, nullptr, &pSvc); if (FAILED(hres)) { throw wmi_exception(format_hresult(_("could not connect to WMI server"), hres)); } _pSvc = util::scoped_resource(pSvc, [](IWbemServices *svc) { if (svc) svc->Release(); }); hres = CoSetProxyBlanket(_pSvc, RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, NULL, RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE); if (FAILED(hres)) { throw wmi_exception(format_hresult(_("could not set proxy blanket"), hres)); } } static void wmi_add_result(wmi::imap &vals, string const& group, string const& s, VARIANT *vtProp) { if (V_VT(vtProp) == (VT_ARRAY | VT_BSTR)) { // It's an array of elements; serialize the array as elements with the same key in the imap. // To keep this simple, ignore multi-dimensional arrays. SAFEARRAY *arr = V_ARRAY(vtProp); if (arr->cDims != 1) { LOG_DEBUG("ignoring {1}-dimensional array in query {2}.{3}", arr->cDims, group, s); return; } BSTR *pbstr; if (FAILED(SafeArrayAccessData(arr, reinterpret_cast(&pbstr)))) { return; } for (auto i = 0u; i < arr->rgsabound[0].cElements; ++i) { vals.emplace(s, boost::trim_copy(boost::nowide::narrow(pbstr[i]))); } SafeArrayUnaccessData(arr); } else if (FAILED(VariantChangeType(vtProp, vtProp, 0, VT_BSTR)) || V_VT(vtProp) != VT_BSTR) { // Uninitialized (null) values can just be ignored. Any others get reported. if (V_VT(vtProp) != VT_NULL) { LOG_DEBUG("WMI query {1}.{2} result could not be converted from type {3} to a string", group, s, V_VT(vtProp)); } } else { vals.emplace(s, boost::trim_copy(boost::nowide::narrow(V_BSTR(vtProp)))); } } wmi::imaps wmi::query(string const& group, vector const& keys, string const& extended) const { IEnumWbemClassObject *_pEnum = NULL; string qry = "SELECT " + boost::join(keys, ",") + " FROM " + group; if (!extended.empty()) { qry += " " + extended; } auto hres = (*_pSvc).ExecQuery(_bstr_t(L"WQL"), _bstr_t(boost::nowide::widen(qry).c_str()), WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY, NULL, &_pEnum); if (FAILED(hres)) { LOG_DEBUG("query {1} failed", qry); return {}; } util::scoped_resource pEnum(_pEnum, [](IEnumWbemClassObject *rsc) { if (rsc) rsc->Release(); }); imaps array_of_vals; IWbemClassObject *pclsObjs[256]; ULONG uReturn = 0; while (pEnum) { auto hr = (*pEnum).Next(WBEM_INFINITE, 256, pclsObjs, &uReturn); if (FAILED(hr) || 0 == uReturn) { break; } for (auto pclsObj : boost::make_iterator_range(pclsObjs, pclsObjs+uReturn)) { imap vals; for (auto &s : keys) { VARIANT vtProp; CIMTYPE vtType; hr = pclsObj->Get(_bstr_t(boost::nowide::widen(s).c_str()), 0, &vtProp, &vtType, 0); if (FAILED(hr)) { LOG_DEBUG("query {1}.{2} could not be found", group, s); break; } wmi_add_result(vals, group, s, &vtProp); VariantClear(&vtProp); } pclsObj->Release(); array_of_vals.emplace_back(move(vals)); } } return array_of_vals; } string const& wmi::get(wmi::imap const& kvmap, string const& key) { static const string empty = {}; auto valIt = kvmap.find(key); if (valIt == kvmap.end()) { return empty; } else { if (kvmap.count(key) > 1) { LOG_DEBUG("only single value requested from array for key {1}", key); } return valIt->second; } } wmi::kv_range wmi::get_range(wmi::imap const& kvmap, string const& key) { return kv_range(kvmap.equal_range(key)); } string const& wmi::get(wmi::imaps const& kvmaps, string const& key) { if (kvmaps.size() > 0) { if (kvmaps.size() > 1) { LOG_DEBUG("only single entry requested from array of entries for key {1}", key); } return get(kvmaps[0], key); } else { throw wmi_exception(_("unable to get from empty array of objects")); } } wmi::kv_range wmi::get_range(wmi::imaps const& kvmaps, string const& key) { if (kvmaps.size() > 0) { if (kvmaps.size() > 1) { LOG_DEBUG("only single entry requested from array of entries for key {1}", key); } return get_range(kvmaps[0], key); } else { throw wmi_exception(_("unable to get_range from empty array of objects")); } } }} // namespace leatherman::windows leatherman-1.4.2+dfsg/windows/tests/000075500000000000000000000000001332360634000174075ustar00rootroot00000000000000leatherman-1.4.2+dfsg/windows/tests/file_utils_test.cc000064400000000000000000000006301332360634000231130ustar00rootroot00000000000000#include #include namespace leatherman { namespace windows { namespace file_util { TEST_CASE("windows::file_util::get_programdata_dir", "[windows]") { SECTION("should return the expected value of C:\\ProgramData") { REQUIRE(get_programdata_dir() == "C:\\ProgramData"); } } }}} // namespace leatherman::windows::file_util